midah commited on
Commit
af7e93c
·
verified ·
1 Parent(s): 3579199

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. BioMedLM_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +300 -0
  2. Complex-Lineart_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +24 -0
  3. DeepSeek-V3-Base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
  4. FilmPortrait_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv +85 -0
  5. Fin-R1_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +235 -0
  6. Florence-2-large_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +0 -0
  7. Flux-Uncensored-V2_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +131 -0
  8. Magma-8B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +487 -0
  9. Marco-o1_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv +316 -0
  10. Meta-Llama-3-120B-Instruct_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +291 -0
  11. Meta-Llama-3-8B-Instruct-GGUF_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +838 -0
  12. Mistral-NeMo-Minitron-8B-Base_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
  13. NexusRaven-V2-13B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +466 -0
  14. OmniParser_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +953 -0
  15. OpenCoder-8B-Instruct_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +221 -0
  16. OpenELM-3B-Instruct_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +798 -0
  17. Phi-4-multimodal-instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
  18. Phind-CodeLlama-34B-Python-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +107 -0
  19. Qwen-7B-Chat_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +749 -0
  20. Qwen-7B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +684 -0
  21. Sky-T1-32B-Preview_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +714 -0
  22. Tifa-Deepsex-14b-CoT-GGUF-Q4_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +217 -0
  23. Triplex_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +87 -0
  24. VoiceCraft_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +10 -0
  25. Wizard-Vicuna-13B-Uncensored-GPTQ_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +267 -0
  26. Yi-6B-200K_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
  27. Yi-VL-34B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +261 -0
  28. anime-pencil-diffusion_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +85 -0
  29. bloom-560m_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
  30. btlm-3b-8k-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +243 -0
  31. chatglm2-6b-int4_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +91 -0
  32. chatglm2-6b_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +96 -0
  33. deepseek-llm-7b-chat_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
  34. distilbert-base-uncased-finetuned-sst-2-english_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv +0 -0
  35. distilroberta-finetuned-financial-news-sentiment-analysis_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
  36. dpt-large_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +191 -0
  37. finbert-tone_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +260 -0
  38. flan-t5-small_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +0 -0
  39. flan-t5-xl_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
  40. flux-dev-de-distill_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv +26 -0
  41. flux-ip-adapter-v2_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +67 -0
  42. flux1-dev-bnb-nf4_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv +44 -0
  43. gemma-2B-10M_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +317 -0
  44. gorilla-openfunctions-v2_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +468 -0
  45. gte-small_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
  46. instructor-large_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
  47. internlm-xcomposer2d5-7b_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +291 -0
  48. jetmoe-8b_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +416 -0
  49. jina-embeddings-v2-base-en_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
  50. mT5_multilingual_XLSum_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
BioMedLM_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ stanford-crfm/BioMedLM,"---
3
+ license: bigscience-bloom-rail-1.0
4
+ datasets:
5
+ - pubmed
6
+ widget:
7
+ - text: 'Photosynthesis is'
8
+ ---
9
+
10
+ # Model Card for BioMedLM 2.7B
11
+
12
+ Note: This model was previously known as PubMedGPT 2.7B, but we have changed it due to a request from the NIH which holds the trademark for ""PubMed"".
13
+
14
+ Paper: [BioMedLM: A 2.7B Parameter Language Model Trained On Biomedical Text](https://arxiv.org/abs/2403.18421)
15
+
16
+ BioMedLM 2.7B is new language model trained exclusively on biomedical abstracts and papers from [The Pile](https://pile.eleuther.ai/). This GPT-style model can achieve strong results on a variety of biomedical NLP tasks, including a new state of the art performance of 50.3% accuracy on the MedQA biomedical question answering task.
17
+
18
+ As an autoregressive language model, BioMedLM 2.7B is also capable of natural language generation. However, we have only begun to explore the generation capabilities and limitations of this model, and we emphasize that this model’s generation capabilities are for research purposes only and not suitable for production. In releasing this model, we hope to advance both the development of biomedical NLP applications and best practices for responsibly training and utilizing domain-specific language models; issues of reliability, truthfulness, and explainability are top of mind for us.
19
+
20
+ This model was a joint collaboration of [Stanford CRFM](https://crfm.stanford.edu/) and [MosaicML](https://www.mosaicml.com/).
21
+
22
+ # Table of Contents
23
+
24
+ - [Model Card for BioMedLM 2.7B](#model-card-for--model_id-)
25
+ - [Table of Contents](#table-of-contents)
26
+ - [Model Details](#model-details)
27
+ - [Model Description](#model-description)
28
+ - [Uses](#uses)
29
+ - [Downstream Use](#downstream-use)
30
+ - [Out-of-Scope Use](#out-of-scope-use)
31
+ - [Bias, Risks, and Limitations](#bias-risks-and-limitations)
32
+ - [Recommendations](#recommendations)
33
+ - [Training Details](#training-details)
34
+ - [Training Data](#training-data)
35
+ - [Training Procedure](#training-procedure)
36
+ - [Preprocessing](#preprocessing)
37
+ - [Environmental Impact](#environmental-impact)
38
+ - [Technical Specifications](#technical-specifications)
39
+ - [Model Architecture and Objective](#model-architecture-and-objective)
40
+ - [Compute Infrastructure](#compute-infrastructure)
41
+
42
+ # Model Details
43
+
44
+ ## Model Description
45
+
46
+ <!-- Provide a longer summary of what this model is/does. -->
47
+ BioMedLM 2.7B is new language model trained exclusively on biomedical abstracts and papers from [The Pile](https://pile.eleuther.ai/). This GPT-style model can achieve strong results on a variety of biomedical NLP tasks, including a new state of the art performance of 50.3% accuracy on the MedQA biomedical question answering task.
48
+
49
+ As an autoregressive language model, BioMedLM 2.7B is also capable of natural language generation. However, we have only begun to explore the generation capabilities and limitations of this model, and we emphasize that this model’s generation capabilities are for research purposes only and not suitable for production. In releasing this model, we hope to advance both the development of biomedical NLP applications and best practices for responsibly training and utilizing domain-specific language models; issues of reliability, truthfulness, and explainability are top of mind for us.
50
+
51
+ This model was a joint collaboration of [Stanford CRFM](https://crfm.stanford.edu/) and [MosaicML](https://www.mosaicml.com/).
52
+
53
+
54
+ - **Developed by:** Stanford CRFM, MosaicML
55
+ - **Shared by:** Stanford CRFM
56
+ - **Model type:** Language model
57
+ - **Language(s) (NLP):** en
58
+ - **License:** [bigscience-bloom-rail-1.0](https://huggingface.co/spaces/bigscience/license)
59
+
60
+ # Uses
61
+
62
+ This model is licensed under the terms of [BigScience Open RAIL-M license](https://huggingface.co/spaces/bigscience/license) used for [BLOOM](https://huggingface.co/bigscience/bloom-1b1). Please note that, among other restrictions, this license forbids use of the model (or derivatives thereof)
63
+ ""To provide medical advice and medical results interpretation."" If you are concerned that your use case would follow under the ""letter"" of this restriction, but not the ""spirit,"" you can contact us to discuss.
64
+
65
+ ## Direct Use
66
+
67
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
68
+ <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say ""more info needed."" -->
69
+ It is possible to use this model to generate text, which is useful for experimentation and understanding its capabilities. It should not be directly used for production or work that may directly impact people.
70
+
71
+ ## Downstream Use
72
+
73
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
74
+ The main way we have used this model is finetuning for downstream question answering tasks, and we recommend using this model that way.
75
+
76
+ ## Out-of-Scope Use
77
+
78
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
79
+ We do not recommend using this model for natural language generation in a production environment, finetuned or otherwise.
80
+
81
+ # Bias, Risks, and Limitations
82
+
83
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
84
+ Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf)). Predictions generated by the model may include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups.
85
+
86
+ ## Recommendations
87
+
88
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
89
+ While this model is capable of generating natural language text, we have only begun to explore this capability and its limitations. Understanding these limitations is especially important in a domain like medicine. Therefore, **we strongly recommend against using this model in production for natural language generation.**
90
+
91
+ # Training Details
92
+
93
+ ## Training Data
94
+
95
+ <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
96
+
97
+ This model was trained on the Pubmed Abstracts and Full Text from [The Pile](https://pile.eleuther.ai/).
98
+
99
+ ## Training Procedure
100
+
101
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
102
+
103
+ The model was trained on [MosaicML Cloud](https://www.mosaicml.com/cloud), a platform designed for large workloads like LLMs. Using the [Composer](https://github.com/mosaicml/composer) training library and [PyTorch FSDP](https://pytorch.org/docs/stable/fsdp.html), it was easy to enable multi-node training across 128 A100-40GB GPUs, and the total run was completed in ~6.25 days. The model was trained with batch size=1024 and sequence length=1024 for 300B tokens using Decoupled AdamW with the following settings:
104
+
105
+ | | |
106
+ | --- | ------ |
107
+ | lr | 1.6e-4 |
108
+ | eps | 1e-8 |
109
+ | betas | \[0.9, 0.95\] |
110
+ | weight decay | 1.6e-5 |
111
+
112
+ The training process was very smooth and did not suffer from any divergences.
113
+
114
+ As we were preparing the training run, we were unsure of the benefits of training out to 300B tokens for language model perplexity and downstream task performance. While most models of this scale (e.g. GPT Neo 2.7B) are trained to 300-400B tokens, the datasets those models use are vastly larger than PubMed. For instance, The Pile is 8x the size of its PubMed subcorpora.
115
+
116
+ Fortunately, we did continue to see steady perplexity improvements on the validation and training sets for the entirety of training, and preliminary experiments showed improved downstream task performance as we trained out to the full 300B tokens. Our takeaway from this was that it was indeed worth it to train for the full 300B tokens, even though this represented dramatically more passes through the data than comparable models.
117
+
118
+ ### Preprocessing
119
+
120
+ The model uses a custom tokenizer trained on the PubMed Abstracts. When building domain specific models we have found it important to use a tokenizer trained on in-domain text to maximize performance on downstream tasks. A key benefit is that common biomedical terms are represented as entire tokens.
121
+
122
+ For instance, all of these following terms are tokenized into single tokens by the biomedical tokenizer and multiple tokens by the standard GPT-2 tokenizer:
123
+
124
+ | | |
125
+ | --- | --- |
126
+ | chromatography | chrom/atography |
127
+ | cytotoxicity | cyt/ot/oxicity |
128
+ | Immunohistochemistry | Immun/oh/ist/ochemistry |
129
+ | photosynthesis | photos/ynthesis |
130
+ | probiotic | prob/iotic |
131
+
132
+ This allows the model to encode information about these concepts in their individual token representations rather than spread out across subword tokens like “oh” shared with many other terms.
133
+
134
+ # Technical Specifications
135
+
136
+ ## Model Architecture and Objective
137
+
138
+ BioMedLM 2.7B is a standard GPT-2 implementation (trained with Flash Attention) with the following hyperparameters:
139
+
140
+ | | |
141
+ | ----------- | ----- |
142
+ | hidden size | 2560 |
143
+ | heads | 20 |
144
+ | layers | 32 |
145
+ | vocab size | 28896 |
146
+ | sequence length| 1024 |
147
+
148
+ ## Compute Infrastructure
149
+
150
+ The model was trained on [MosaicML Cloud](https://www.mosaicml.com/cloud), a platform designed for large workloads like LLMs. Using the [Composer](https://github.com/mosaicml/composer) training library and [PyTorch FSDP](https://pytorch.org/docs/stable/fsdp.html), it was easy to enable multi-node training across 128 A100-40GB GPUs, and the total run was completed in ~6.25 days.
151
+ ","{""id"": ""stanford-crfm/BioMedLM"", ""author"": ""stanford-crfm"", ""sha"": ""3e1a0abb814b8398bc34b4b6680ecf2c26d6a66f"", ""last_modified"": ""2024-03-28 13:57:14+00:00"", ""created_at"": ""2022-12-14 08:14:59+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 13792, ""downloads_all_time"": null, ""likes"": 421, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""gpt2"", ""text-generation"", ""dataset:pubmed"", ""arxiv:2403.18421"", ""license:bigscience-bloom-rail-1.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- pubmed\nlicense: bigscience-bloom-rail-1.0\nwidget:\n- text: Photosynthesis is"", ""widget_data"": [{""text"": ""Photosynthesis is""}], ""model_index"": null, ""config"": {""architectures"": [""GPT2LMHeadModel""], ""model_type"": ""gpt2"", ""tokenizer_config"": {""unk_token"": ""<|endoftext|>"", ""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""katielink/compare-bio-llm"", ""sugarbee/stanford-crfm-pubmedgpt"", ""Otakarasagashi/stanford-crfm-BioMedLM"", ""aaditkapoorbionlp/clinical_trial_match"", ""sntx/stanford-crfm-pubmedgpt"", ""matrixscore/stanford-crfm-pubmedgpt"", ""photon-life/stanford-crfm-pubmedgpt"", ""maxpumperla/stanford-crfm-BioMedLM"", ""Skakade/stanford-crfm-BioMedLM"", ""xinfyxinfy/stanford-crfm-BioMedLM"", ""yejunbin/stanford-crfm-BioMedLM"", ""ayub567/Biochat"", ""jake-aft/stanford-crfm-BioMedLM"", ""gabriellaang/stanford-crfm-BioMedLM"", ""gang21/stanford-crfm-BioMedLM"", ""Aryan10/stanford-crfm-BioMedLM"", ""UraniaLi/Energenesis_Biomedical_AI_BioMedLM"", ""modwived/stanford-crfm-BioMedLM"", ""iramirsina/stanford-crfm-BioMedLM"", ""Zhuravlev/stanford-crfm-BioMedLM"", ""atabaris/stanford-crfm-BioMedLM"", ""apollo2155/stanford-crfm-BioMedLM"", ""Luka777/stanford-crfm-BioMedLM"", ""meghanag5/clinical_notes_summarization_gpt"", ""Nurpeyis/stanford-crfm-BioMedLM"", ""wangzhang/stanford-crfm-BioMedLM"", ""gyanendrol9/stanford-crfm-BioMedLM"", ""arifdroid/med_notes_demo"", ""Vfrae/Healthcare_Assistant"", ""azra-kml/fast_detect_gpt"", ""oula23/demo-llm"", ""Tufan1/CVD-Predictor"", ""Tufan1/CVD-Predictor-Gradio"", ""oussamaor/Drug_assisstant"", ""Tufan1/CVD-Predictor-Probablity""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-28 13:57:14+00:00"", ""cardData"": ""datasets:\n- pubmed\nlicense: bigscience-bloom-rail-1.0\nwidget:\n- text: Photosynthesis is"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""639986030544e39696db3e2b"", ""modelId"": ""stanford-crfm/BioMedLM"", ""usedStorage"": 32119629859}",0,https://huggingface.co/raidium/MQG,1,https://huggingface.co/petualang/BioMedDoctor32bit,1,,0,,0,"Otakarasagashi/stanford-crfm-BioMedLM, Skakade/stanford-crfm-BioMedLM, aaditkapoorbionlp/clinical_trial_match, azra-kml/fast_detect_gpt, bigscience/license, huggingface/InferenceSupport/discussions/new?title=stanford-crfm/BioMedLM&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bstanford-crfm%2FBioMedLM%5D(%2Fstanford-crfm%2FBioMedLM)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, katielink/compare-bio-llm, matrixscore/stanford-crfm-pubmedgpt, maxpumperla/stanford-crfm-BioMedLM, photon-life/stanford-crfm-pubmedgpt, sntx/stanford-crfm-pubmedgpt, sugarbee/stanford-crfm-pubmedgpt, xinfyxinfy/stanford-crfm-BioMedLM, yejunbin/stanford-crfm-BioMedLM",14
152
+ raidium/MQG,"---
153
+ library_name: transformers
154
+ license: apache-2.0
155
+ datasets:
156
+ - raidium/ECNQA_generated_questions
157
+ - raidium/ECN-QA
158
+ language:
159
+ - en
160
+ metrics:
161
+ - accuracy
162
+ tags:
163
+ - medical
164
+ base_model: stanford-crfm/BioMedLM
165
+ ---
166
+
167
+
168
+ # Model Card for Raidium MQG model
169
+
170
+
171
+ The model is introduced in the paper ""Efficient Medical Question Answering with Knowledge-Augmented Question Generation"".
172
+
173
+ Paper: [https://arxiv.org/abs/2405.14654](https://arxiv.org/abs/2405.14654)
174
+
175
+ MQG is is a transformer language model pre-trained on a series of medical textbooks, and medical questions generated by GPT-4. The weights are initialized with
176
+ [BioMedLM](https://huggingface.co/stanford-crfm/BioMedLM), then further pre-trained on those datasets.
177
+
178
+ The questions have been generated from prompt containing medical data from the textbooks.
179
+ They are available here: [ECNQA_generated_questions](https://huggingface.co/datasets/raidium/ECNQA_generated_questions).
180
+
181
+ MQG is designed to be fine-tuned for Medical Question Answering tasks.
182
+
183
+ ## Model Details
184
+
185
+ ### Model Description
186
+
187
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cdea59a9be5c195561c2b8/tMb8cNuV6ZYnjrnUC1Tg2.png)
188
+
189
+ In the expanding field of language model applications, medical knowledge representation remains a significant challenge due to the specialized nature of the domain.
190
+ Large language models, such as GPT-4, obtain reasonable scores on medical question answering tasks, but smaller models are far behind.
191
+ In this work, we introduce a method to improve the proficiency of a small language model in the medical domain by employing a two-fold approach.
192
+ We first fine-tune the model on a corpus of medical textbooks. Then, we use GPT-4 to generate questions similar to the downstream task, prompted with textbook knowledge, and use them to fine-tune the model.
193
+ We show the benefits of our training strategy on a medical answering question dataset.
194
+
195
+
196
+ ### Using the model
197
+
198
+ ```python
199
+ from transformers import AutoTokenizer, AutoModelForCausalLM
200
+
201
+ tokenizer = AutoTokenizer.from_pretrained(""raidium/MQG"")
202
+ model = AutoModelForCausalLM.from_pretrained(""raidium/MQG"")
203
+ ```
204
+
205
+
206
+ - **Developed by:** Raidium
207
+ - **Model type:** Transformer
208
+ - **License:** Aopache 2.0
209
+ - **Finetuned from model:** [BioMedLM](https://huggingface.co/stanford-crfm/BioMedLM)
210
+
211
+ ### Model Sources [optional]
212
+
213
+ <!-- Provide the basic links for the model. -->
214
+
215
+ - **Repository:** [https://github.com/raidium-med/MQG]
216
+ - **Paper:** [https://arxiv.org/abs/2405.14654](https://arxiv.org/abs/2405.14654)
217
+
218
+ ## Uses
219
+
220
+ ### Direct Use
221
+
222
+ MQG is trained using next-token-prediction on generated questions.
223
+ Therefore, it can be used out-of-the-box to generate potential answers for medical question answering tasks.
224
+ However, the generated questions might contain some errors, so it is advised to fine-tune the model on your dataset, and use the models to rank the potential answers.
225
+
226
+ ### Downstream Use
227
+
228
+ MQG can be fine-tuned for Medical Question Answering tasks.
229
+ For multiple choice questions, a classification head should be appended at the end of the model, to rank different proposed answers.
230
+
231
+ ### Out-of-Scope Use
232
+
233
+ This model should not be used for datasets outside medical tasks.
234
+
235
+ ## Bias, Risks, and Limitations
236
+
237
+ There is no guarantee that the model answers medical questions correctly. It should only be used for academic purposes, and not in clinical care.
238
+
239
+ ## Training Details
240
+
241
+ ### Training Data
242
+
243
+ The model is trained on a corpus of medical textbooks, and further pre-trained on generated questions: [ECNQA_generated_questions](https://huggingface.co/datasets/raidium/ECNQA_generated_questions).
244
+
245
+ ### Training Procedure
246
+
247
+ MGQ is trained using next-token-prediction on both datasets.
248
+
249
+ #### Training Hyperparameters
250
+
251
+ - **Training regime:** fp16 mixed-precision training. <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
252
+
253
+ ## Evaluation
254
+
255
+ ### Testing Data, Factors & Metrics
256
+
257
+ #### Testing Data
258
+
259
+ We tested the model on a medical question answering dataset, ECN-QA, based on the french medical residency examination.
260
+ It is composed of ""single"" and ""progressive"" questions (i.e a serie of multiple related questions).
261
+ It is a multiple-choice question dataset, containing 5 propositions for each question.
262
+
263
+ #### Metrics
264
+
265
+ We use the accuracy to evaluate the model on Medical Question Answering.
266
+
267
+ ### Results
268
+
269
+ See paper: [https://arxiv.org/abs/2405.14654](https://arxiv.org/abs/2405.14654)
270
+
271
+ ### Model Architecture and Objective
272
+
273
+ The model is based on BioMedLM's architecture, which is modified from GPT-2 architecture.
274
+
275
+ ### Compute Infrastructure
276
+
277
+ #### Hardware
278
+
279
+ The model was trained on the Jean-Zay supercomputer, on multiple nodes with 4 A100 gpus.
280
+
281
+ #### Software
282
+
283
+ Pytorch, DeepSpeed
284
+
285
+ ## Citation
286
+
287
+
288
+ **BibTeX:**
289
+ ```
290
+ @article{khlaut2024efficient,
291
+ title={Efficient Medical Question Answering with Knowledge-Augmented Question Generation},
292
+ author={Khlaut, Julien and Dancette, Corentin and Ferreres, Elodie and Bennani, Alaedine and H{\'e}rent, Paul and Manceron, Pierre},
293
+ journal={Clinical NLP Workshop, NAACL 2024},
294
+ year={2024}
295
+ }
296
+ ```
297
+
298
+ ## Model Card Contact
299
+
300
+ julien.khlaut at raidium.fr","{""id"": ""raidium/MQG"", ""author"": ""raidium"", ""sha"": ""8ce1ec755df36bb6220b7ffb5515d6e97020075f"", ""last_modified"": ""2024-07-10 09:56:00+00:00"", ""created_at"": ""2024-06-19 12:10:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 28, ""downloads_all_time"": null, ""likes"": 3, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""gpt2"", ""text-generation"", ""medical"", ""en"", ""dataset:raidium/ECNQA_generated_questions"", ""dataset:raidium/ECN-QA"", ""arxiv:2405.14654"", ""base_model:stanford-crfm/BioMedLM"", ""base_model:finetune:stanford-crfm/BioMedLM"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: stanford-crfm/BioMedLM\ndatasets:\n- raidium/ECNQA_generated_questions\n- raidium/ECN-QA\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- medical"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""GPT2LMHeadModel""], ""model_type"": ""gpt2"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-10 09:56:00+00:00"", ""cardData"": ""base_model: stanford-crfm/BioMedLM\ndatasets:\n- raidium/ECNQA_generated_questions\n- raidium/ECN-QA\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- medical"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6672cad2fbd6884b3c4b0053"", ""modelId"": ""raidium/MQG"", ""usedStorage"": 21345948055}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=raidium/MQG&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Braidium%2FMQG%5D(%2Fraidium%2FMQG)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Complex-Lineart_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Conflictx/Complex-Lineart,"---
3
+ license: creativeml-openrail-m
4
+ tags:
5
+ - text-to-image
6
+ ---
7
+
8
+ Trained on around 100 images at 768x768 resolution.
9
+
10
+ Download ""ComplexLA Style.ckpt"" and add it to your model folder.
11
+
12
+ Use prompt: ComplexLA style
13
+ Use resolution near 768x768, lower resolution works but quality will not be as good.
14
+
15
+
16
+ ![00557-2764539988-ComplexLA style, a cyberpunk volvo car driving on a road, high resolution, very detailed,.png](https://s3.amazonaws.com/moonup/production/uploads/1668296892221-6303c53d7373aacccd859bbd.png)
17
+ ![00559-583683277-ComplexLA style, an aztec pyramid on a space station, high resolution, very detailed, hr giger.png](https://s3.amazonaws.com/moonup/production/uploads/1668296892613-6303c53d7373aacccd859bbd.png)
18
+ ![00561-3608781371-a beautiful woman as an astronaut, ComplexLA style, high resolution, very detailed, greeble.png](https://s3.amazonaws.com/moonup/production/uploads/1668296892022-6303c53d7373aacccd859bbd.png)
19
+ ![00583-3178034403-a steampunk mech power drone, explosion in background, ComplexLA style, mad max, high resolution, very detailed, greeble, intric.png](https://s3.amazonaws.com/moonup/production/uploads/1668300327645-6303c53d7373aacccd859bbd.png)
20
+ ![00582-74183724-a mech power suit, ComplexLA style, mad max, high resolution, very detailed, greeble, intricate, dark night time, by greg rutkow.png](https://s3.amazonaws.com/moonup/production/uploads/1668300329121-6303c53d7373aacccd859bbd.png)
21
+ ![00584-2085058274-a steampunk flying greeble, intricate drone, explosion in background, ComplexLA style, mad max, high resolution, very detailed,.png](https://s3.amazonaws.com/moonup/production/uploads/1668300391149-6303c53d7373aacccd859bbd.png)
22
+ ![00587-755015015-a dieselpunk flying drone, combat fighting, ComplexLA style, high resolution, very detailed, greeble, intricate, dark night time.png](https://s3.amazonaws.com/moonup/production/uploads/1668301048483-6303c53d7373aacccd859bbd.png)
23
+
24
+ ","{""id"": ""Conflictx/Complex-Lineart"", ""author"": ""Conflictx"", ""sha"": ""dde442f43c4a71c0d0c1ca943f2d26c7b721d445"", ""last_modified"": ""2023-01-30 12:01:21+00:00"", ""created_at"": ""2022-11-12 22:09:17+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 109, ""downloads_all_time"": null, ""likes"": 228, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: creativeml-openrail-m\ntags:\n- text-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='00557-2764539988-ComplexLA style, a cyberpunk volvo car driving on a road, high resolution, very detailed,.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='00559-583683277-ComplexLA style, an aztec pyramid on a space station, high resolution, very detailed, hr giger.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='00561-3608781371-a beautiful woman as an astronaut, ComplexLA style, high resolution, very detailed, greeble.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='00564-755821969-a science facility, ComplexLA style, high resolution, very detailed, greeble.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='00565-2587914799-a portal in a dungeon, ComplexLA style, high resolution, very detailed, greeble.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='00566-2162376806-a tree growing in a desert, ComplexLA style, high resolution, very detailed, greeble.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ComplexLA Style.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""John6666/PrintingPress4"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""Jackflack09/finetuned_diffusion2"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""dpk811/Conflictx-Complex-Lineart"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""Blane187/multi-diffusion"", ""NativeAngels/ToyWorld"", ""Binettebob22/fast_diffusion2"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""TheKitten/Images"", ""jordonpeter01/Diffusion70"", ""ucmisanddisinfo/thisApp"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""threecrowco/Conflictx-Complex-Lineart-Test"", ""atbrunson/Conflictx-Complex-Lineart1"", ""AIlexDev/Diffusion60XX"", ""flatindo/all-models-v1"", ""wideprism/Ultimate-Model-Collection"", ""TheMaisk/Einfach.ImageAI"", ""GhadaSaylami/text-to-image"", ""Geek7/mdztxi"", ""Geek7/mdztxi2"", ""NativeAngels/Diffusion80XX4sg"", ""GandalfTheBlack/PrintingPressDx"", ""GandalfTheBlack/IMG2IMG-695models"", ""tejani/PrintingPress""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-01-30 12:01:21+00:00"", ""cardData"": ""license: creativeml-openrail-m\ntags:\n- text-to-image"", ""transformersInfo"": null, ""_id"": ""6370198d5af6ffb6563da039"", ""modelId"": ""Conflictx/Complex-Lineart"", ""usedStorage"": 7623403195}",0,,0,,0,,0,,0,"Alfasign/dIFFU, Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/ToyWorld4, John6666/hfd_test_nostopbutton, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=Conflictx/Complex-Lineart&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BConflictx%2FComplex-Lineart%5D(%2FConflictx%2FComplex-Lineart)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",13
DeepSeek-V3-Base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
The diff for this file is too large to render. See raw diff
 
FilmPortrait_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Shakker-Labs/FilmPortrait,"---
3
+ tags:
4
+ - text-to-image
5
+ - stable-diffusion
6
+ - diffusers
7
+ - image-generation
8
+ - flux
9
+ - safetensors
10
+ widget:
11
+ - text: A young asian girl, filmfotos,film grain, reversal film photography
12
+ output:
13
+ url: images/1.png
14
+ - text: >-
15
+ A young Japanese girl, profile, blue hours, Tokyo tower, filmfotos,film
16
+ grain, reversal film photography
17
+ output:
18
+ url: images/2.png
19
+ - text: Tokyo street photography, filmfotos,film grain, reversal film photography
20
+ output:
21
+ url: images/3.png
22
+ base_model: black-forest-labs/FLUX.1-dev
23
+ instance_prompt: filmfotos, film grain, reversal film photography
24
+ license: other
25
+ license_name: flux-1-dev-non-commercial-license
26
+ license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
27
+ language:
28
+ - en
29
+ library_name: diffusers
30
+ ---
31
+ # FilmPortrait
32
+
33
+ <div class=""container"">
34
+ <img src=""./poster.jpeg"" width=""1024""/>
35
+ </div>
36
+
37
+ FilmPortrait is a LoRA model finetuned on FLUX.1-dev, specifically designed to enhance the film texture. It embodies a subdued, low-saturation color palette reminiscent of classic Japanese cinema, which is particularly evident in its portrayal of characters (with a subtle bias towards Asian features), serene still lifes, and sweeping landscapes. The model delivers an exceptional aesthetic experience, capturing the essence of a bygone era with modern precision.
38
+
39
+ <div class=""container"">
40
+ <img src=""./cover.jpeg"" width=""1024""/>
41
+ </div>
42
+
43
+
44
+ ## Comparison
45
+
46
+ The following example shows a simple comparison with FLUX.1-dev under the same parameter setting.
47
+
48
+ <div class=""container"">
49
+ <img src=""./comparison.png"" width=""1024""/>
50
+ </div>
51
+
52
+
53
+ ## Trigger words
54
+
55
+ You should use `filmfotos, film grain, reversal film photography` to trigger the image generation.
56
+
57
+
58
+ ## Inference
59
+
60
+ ```python
61
+ import torch
62
+ from diffusers import FluxPipeline
63
+
64
+ pipe = FluxPipeline.from_pretrained(""black-forest-labs/FLUX.1-dev"", torch_dtype=torch.bfloat16)
65
+ pipe.load_lora_weights('Shakker-Labs/FilmPortrait', weight_name='filmfotos.safetensors')
66
+ pipe.fuse_lora(lora_scale=0.9)
67
+ pipe.to(""cuda"")
68
+
69
+ prompt = ""a young girl, filmfotos, film grain, reversal film photography""
70
+
71
+ image = pipe(prompt,
72
+ num_inference_steps=24,
73
+ guidance_scale=3.5,
74
+ width=768, height=1024,
75
+ ).images[0]
76
+ image.save(f""example.png"")
77
+ ```
78
+
79
+ ## Online Inference
80
+
81
+ You can also download this model at [Shakker AI](https://www.shakker.ai/modelinfo/ec983ff3497d46ea977dbfcd1d989f67?from=search), where we provide an online interface to generate images.
82
+
83
+
84
+ ## Acknowledgements
85
+ This model is trained by our copyrighted users [DynamicWang](https://www.shakker.ai/userpage/dfca7abc67c04a9492ea738d864de070/publish). We release this model under permissions. The model follows [flux-1-dev-non-commercial-license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md).","{""id"": ""Shakker-Labs/FilmPortrait"", ""author"": ""Shakker-Labs"", ""sha"": ""74f498f944141287e7a8e62c85c66ac85e7df34f"", ""last_modified"": ""2024-09-09 09:21:59+00:00"", ""created_at"": ""2024-09-08 17:49:50+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 438, ""downloads_all_time"": null, ""likes"": 209, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""stable-diffusion"", ""image-generation"", ""flux"", ""safetensors"", ""en"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:finetune:black-forest-labs/FLUX.1-dev"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: black-forest-labs/FLUX.1-dev\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md\ntags:\n- text-to-image\n- stable-diffusion\n- diffusers\n- image-generation\n- flux\n- safetensors\nwidget:\n- text: A young asian girl, filmfotos,film grain, reversal film photography\n output:\n url: https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/1.png\n- text: A young Japanese girl, profile, blue hours, Tokyo tower, filmfotos,film grain,\n reversal film photography\n output:\n url: https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/2.png\n- text: Tokyo street photography, filmfotos,film grain, reversal film photography\n output:\n url: https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/3.png\ninstance_prompt: filmfotos, film grain, reversal film photography"", ""widget_data"": [{""text"": ""A young asian girl, filmfotos,film grain, reversal film photography"", ""output"": {""url"": ""https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/1.png""}}, {""text"": ""A young Japanese girl, profile, blue hours, Tokyo tower, filmfotos,film grain, reversal film photography"", ""output"": {""url"": ""https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/2.png""}}, {""text"": ""Tokyo street photography, filmfotos,film grain, reversal film photography"", ""output"": {""url"": ""https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/3.png""}}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='comparison.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cover.jpeg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='filmfotos.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='poster.jpeg', size=None, blob_id=None, lfs=None)""], ""spaces"": [""vilarin/flux-labs"", ""MageY/Shakker-Labs-FilmPortrait"", ""gandhibreed/Shakker-Labs-FilmPortrait"", ""skerrino/Shakker-Labs-FilmPortrait"", ""Stevenfour5/Shakker-Labs-FilmPortrait"", ""eseedo/Shakker-Labs-FilmPortrait"", ""cloudcan4/Shakker-Labs-FilmPortrait"", ""Hdhdvvccsnsnns/Shakker-Labs-FilmPortrait""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-09 09:21:59+00:00"", ""cardData"": ""base_model: black-forest-labs/FLUX.1-dev\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md\ntags:\n- text-to-image\n- stable-diffusion\n- diffusers\n- image-generation\n- flux\n- safetensors\nwidget:\n- text: A young asian girl, filmfotos,film grain, reversal film photography\n output:\n url: https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/1.png\n- text: A young Japanese girl, profile, blue hours, Tokyo tower, filmfotos,film grain,\n reversal film photography\n output:\n url: https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/2.png\n- text: Tokyo street photography, filmfotos,film grain, reversal film photography\n output:\n url: https://huggingface.co/Shakker-Labs/FilmPortrait/resolve/main/images/3.png\ninstance_prompt: filmfotos, film grain, reversal film photography"", ""transformersInfo"": null, ""_id"": ""66dde3befc68bfbb9369fd12"", ""modelId"": ""Shakker-Labs/FilmPortrait"", ""usedStorage"": 627405402}",0,,0,,0,,0,,0,"Hdhdvvccsnsnns/Shakker-Labs-FilmPortrait, MageY/Shakker-Labs-FilmPortrait, Stevenfour5/Shakker-Labs-FilmPortrait, cloudcan4/Shakker-Labs-FilmPortrait, eseedo/Shakker-Labs-FilmPortrait, gandhibreed/Shakker-Labs-FilmPortrait, huggingface/InferenceSupport/discussions/new?title=Shakker-Labs/FilmPortrait&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BShakker-Labs%2FFilmPortrait%5D(%2FShakker-Labs%2FFilmPortrait)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, skerrino/Shakker-Labs-FilmPortrait, vilarin/flux-labs",9
Fin-R1_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ SUFE-AIFLM-Lab/Fin-R1,"<div align=""center"">
3
+ <img src=""Images/title.png"" width=""700"" height=""200"">
4
+ </div>
5
+ <div align=""center"">
6
+ <h1>Fin-R1:通过强化学习驱动的金融推理大模型</h1>
7
+
8
+ <!-- 徽章部分 -->
9
+ [![License](https://img.shields.io/badge/license-Apache_2.0-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0)[![模型下载](https://img.shields.io/badge/🤗-下载模型-blue)](https://huggingface.co/SUFE-AIFLM-Lab/Fin-R1)[![技术报告](https://img.shields.io/badge/📚-技术报告-orange)](https://arxiv.org/abs/2503.16252)                                         
10
+
11
+ <!-- 语言切换链接 -->
12
+ 📄 [中文](./README.md) | [EN](./README_en.md)         
13
+ </div>
14
+
15
+ Fin-R1 是一款针对金融领域复杂推理的大型语言模型,由上海财经大学统计与数据科学学院张立文教授与其领衔的金融大语言模型课题组(SUFE-AIFLM-Lab)联合财跃星辰研发并开源发布。该模型以 Qwen2.5-7B-Instruct 为基座,通过高质量的可验证金融问题微调训练,最终表现在多个金融领域基准测试上的表现达到参评模型的SOTA水平。
16
+
17
+
18
+
19
+ ## 📌 目录<a name=""toc""></a>
20
+ - [场景应用](#summary)
21
+ - [金融代码](#金融代码)
22
+ - [金融计算](#金融计算)
23
+ - [英语金融计算](#英语金融计算)
24
+ - [金融安全合规](#金融安全合规)
25
+ - [智能风控](#智能风控)
26
+ - [ESG分析](#ESG分析)
27
+ - [总体工作流程](#总体工作流程)
28
+ - [数据构建](#data)
29
+ - [微调训练](#trainning)
30
+ - [模型评测结果](#results)
31
+ - [模型使用方法](#use)
32
+ - [未来展望](#todo)
33
+ - [联系我们](#connection)
34
+
35
+ ## 💡 场景应用 <a name=""summary""></a>       
36
+ Fin-R1 是一款专为金融推理领域设计的大语言模型,采用轻量化的 7B 参数量级架构。在显著降低部署成本的同时,该模型通过在针对金融推理场景的高质量思维链数据上采用 SFT(监督微调)和 RL(强化学习)两阶段训练,为模型在金融领域的应用提供了坚实的理论支撑、业务规则、决策逻辑以及技术实现能力,从而有效提升模型的金融复杂推理能力,为银行、证券、保险以及信托等金融核心业务场景提供有力支持。
37
+
38
+ ![数据-场景](Images/.frame_cn2.png)       
39
+
40
+ ## 金融代码
41
+ 金融代码是指在金融领域中用于实现各种金融模型、算法和分析任务的计算机编程代码,涵盖了从简单的财务计算到复杂的金融衍生品定价、风险评估和投资组合优化等多个方面,以方便金融专业人士进行数据处理、统计分析、数值计算和可视化等工作。
42
+ ![FinancialCode](Images/Financial_Code.gif)      
43
+ ## 金融计算
44
+ 金融计算是对金融领域的各种问题进行定量分析和计算的过程,其核心在于通过建立数学模型和运用数值方法来解决实际金融问题,可为金融决策提供科学依据,帮助金融机构和投资者更好地管理风险、优化资源配置和提高投资回报率。
45
+ ![FinancialCalculations](Images/Financial_Calculations.gif)      
46
+ ## 英语金融计算
47
+ 英语金融计算强调在跨语言环境下使用英语进行金融模型的构建和计算,并能够以英语撰写金融分析报告和与国际同行进行沟通交流。
48
+ ![EnglishFinancialCalculations](Images/English_Financial_Calculations.gif)       
49
+ ## 金融安全合规
50
+ 金融安全合规聚焦于防范金融犯罪与遵守监管要求,帮助企业建立健全的合规管理体系,定期进行合规检查和审计,确保业务操作符合相关法规要求。
51
+ ![FinancialSecurityandCompliance](Images/Financial_Security_and_Compliance.gif)             
52
+ ## 智能风控
53
+ 智能风控利用AI与大数据技术识别和管理金融风险,与传统风控手段相比,智能风控具有更高的效率、准确性和实时性,它通过对海量金融数据的深度挖掘和分析,能够发现潜在的风险模式和异常交易行为,从而及时预警和采取相应的风险控制措施。
54
+ ![IntelligentRiskControl](Images/Intelligent_Risk_Control.gif)           
55
+ ## ESG分析
56
+ ESG分析通过评估企业在环境(Environmental)、社会(Social)、治理(Governance)的表现,衡量其可持续发展能力,确保投资活动不仅能够获得财务回报,还能促进可持续发展和社会责任的履行。金融机构和企业也通过提升自身的 ESG 绩效,来满足投资者和社会对企业更高的期望和要求。
57
+ ![ESG](Images/ESG.gif)     
58
+
59
+  
60
+
61
+
62
+ ## 总体工作流程
63
+ 我们基于 DeepSeek-R1 构建了数据蒸馏框架,并严格按照官方参数设定进行数据处理,采用两阶段数据筛选方法提升金融领域数据质量,生成了SFT数据集和RL���据集。在训练过程中,我们利用Qwen2.5-7B-Instruct,通过监督微调(SFT)和强化学习(RL)训练金融推理大模型 Fin-R1,以提升金融推理任务的准确性和泛化能力。
64
+ ![总体工作流程](Images/.frame2_cn.png)             
65
+
66
+ ## 🛠️ 数据构建<a name=""data""></a>
67
+ 为将 DeepSeek-R1 的推理能力迁移至金融场景并解决高质量金融推理数据问题,我们用Deepseek-R1(满血版)针对涵盖行业语料(FinCorpus、Ant_Finance),专业认知(FinPEE),业务知识(FinCUGE、FinanceIQ、Finance-Instruct-500K),表格解析(FinQA),市场洞察(TFNS),多轮交互(ConvFinQA)以及量化投资(FinanceQT)的多个数据集进行领域知识蒸馏筛选,构建了约 60k 条面向专业金融推理场景的高质量 COT 数据集 Fin-R1-Data 。该数据集涵盖中英文金融垂直领域的多维度专业知识,并根据具体任务内容将其分为金融代码、金融专业知识、金融非推理类业务知识和金融推理类业务知识四大模块,可有效支撑银行、基金和证券等多个金融核心场景。本研究构建了基于 Deepseek-R1 的数据蒸馏框架,并创新性提出对思维链进行“答案+推理”双轮质量打分筛选方法,首轮基于规则匹配和 Qwen2.5-72B-Instruct 对答案准确性评分,次轮对推理链的逻辑一致性、术语合规性等推理逻辑进行深度校验以保证数据质量。
68
+
69
+ ![数据处理](Images/data_construct.png)
70
+
71
+ ### 数据蒸馏
72
+
73
+ 在蒸馏过程中,我们严格依照 [DeepSeek - R1](https://github.com/deepseek-ai/DeepSeek-R1) 官方提供的细节,进行相应设置的数据蒸馏操作。
74
+
75
+ ### 数据筛选
76
+
77
+ 针对金融数据结构的复杂特性采取对思维链进行“答案+推理逻辑”双轮质量打分的创新方式筛选,首轮基于规则匹配和 Qwen2.5-72B-Instruct 对答案准确性评分,次轮对推理链的逻辑一致性、术语合规性等推理逻辑进行深度校验以保证数据质量,每次打分筛选出的数据标注为 good 或 bad 进行区分:
78
+
79
+ 1)答案打分:对于蒸馏得到的数据,针对客观题(如选择题、判断题),采用基于规则的匹配方式,校对蒸馏数据的正确性;对于无法通过规则匹配的结果,利用 Qwen2.5-72B-Instruct 对模型生成的答案以及正确答案进行打分,正确得 1 分,错误得 0 分。
80
+
81
+ 2)推理过程打分:对于经过上一步筛选得到的正确思维链数据,再次利用 Qwen2.5-72B-Instruct 对推理轨迹进行打分,高质量数据得 1 分,低质量数据得 0 分。我们采取了如下几个指标来进行打分:
82
+ >
83
+ > 1.内部一致性:检查推理过程中的步骤是否一致,并且是否能够逐步逻辑地推导出标准答案。
84
+ >
85
+ > 2.术语重叠度:检查推理过程中使用的术语与标准答案中的术语的重叠程度。重叠度越高越好。
86
+ >
87
+ > 3.推理步骤数量:评估推理过程是否包含足够的步骤(至少3步)。
88
+ >
89
+ > 4.逻辑一致性:确保推理过程中的步骤与标准答案在逻辑上高度一致,并检查是否存在明显的错误或遗漏。
90
+ >
91
+ > 5.内容多样性:检查推理过程中是否存在大量重复的步骤。
92
+ >
93
+ > 6.与任务领域的相关性:检查推理过程是否涉及与任务领域相关的内容(任务领域:{task_domain})。如果推理反映了与任务领域的相关性,则给予更高的评分。
94
+ >
95
+ > 7.与任务指令的一致性:检查推理过程是否与任务指令高度相关。相关性越高越好。如果推理内容完全符合任务指令,则给予更高的评分。
96
+
97
+ 我们将经过两轮筛选后均标注为good的数据作为高质量的 COT 数据用于 SFT ;而未经过筛选标注为bad的数据则作为推理QA数据用于强化学习(RL)。
98
+
99
+ ### Fin-R1-Data数据分布如下:
100
+ Fin-R1-Data 涵盖中英文金融垂直领域的多维度专业知识,并根据具体任务内容将其分为金融代码、金融专业知识、金融非推理类业务知识和金融推理类业务知识四大模块,可有效支撑银行、证券以及信托等多个金融核心业务场景。
101
+
102
+ ![grpo](Images/frame_cn.png)        
103
+
104
+ |数据集|数据量|
105
+ |-------------|--------|
106
+ |ConvFinQA-R1-Distill |7629|
107
+ |Finance-Instruct-500K-R1-Distill | 11300 |
108
+ |FinCUGE-R1-Distill | 2000 |
109
+ |FinQA-R1-Distill | 2948 |
110
+ |TFNS-R1-Distill | 2451|  
111
+ |FinanceIQ-R1-Distill | 2596 |
112
+ |FinanceQT-R1-Distill | 152 |
113
+ |Ant_Finance-R1-Distill | 1548 |
114
+ |FinCorpus-R1-Distill | 29288|
115
+ |FinPEE-R1-Distill | 179 |
116
+ |总计| 60091 |
117
+
118
+
119
+
120
+
121
+
122
+ ## 🚀 微调训练<a name=""trainning""></a>
123
+
124
+ ### 两阶段流程
125
+ 针对金融领域复杂推理任务,我们利用 Qwen2.5-7B-Instruct 进行两阶段微调训练得到金融推理大语言模型 Fin-R1 。首先通过高质量金融推理数据的 SFT (Supervised Fine-Tuning) 帮助模型初步提升金融推理能力,然后在 GRPO(Group Relative Policy Optimization) 算法的基础上结合格式奖励和准确度奖励进行强化学习,以此进一步提升金融推理任务的准确性和泛化能力。
126
+ #### 第一阶段----推理能力注入:
127
+
128
+ 针对金融推理任务中的复杂推理,我们第一阶段使用 ConvFinQA 和 FinQA 金融数据集对 Qwen2.5-7B-Instruct 进行了监督微调。经过一轮微调训练,确保模型能够深入理解并处理复杂的金融推理问题。
129
+
130
+ #### 第二阶段----强化学习优化:
131
+
132
+ 在模型掌握复杂推理技能后,我们采用 GRPO(Group Relative Policy Optimization)算法作为核心框架,以双重奖励机制优化模型输出的格式和准确度,并在此基础上引入了基于模型的验证器(Model-Based Verifier),采用 Qwen2.5-Max 进行答案评估来改进基于正则表达式的奖励可能存在的偏差,生成更加精确可靠的奖励信号,从而提升强化学习的效果和稳定性。
133
+
134
+ ![grpo](Images/trainning.png)
135
+
136
+
137
+ ## 🚨 模型评测结果 <a name=""results""></a>
138
+ 我们在覆盖多项金融业务场景的基准测试上对模型进行评估,在评测结果中,只经过指令微调 (SFT) 的模型 Fin-R1-SFT 在金融场景中相较于基础模型已经取得了一定性能提升,但是相比于 DeepSeek-R1 仍有提升空间,我们于是在 Fin-R1-SFT 基础上再进行强化学习训练,结果发现经过指令微调 (SFT) 加强化学习 (RL) 训练的 Fin-R1 以仅 7B 的轻量化参数规模展现出显著的性能优势,达到 75.2 的平均得分位居第二,全面超越参评的同规模模型,同时与行业标杆 DeepSeek-R1 平均分差距仅3.0, 且超越DeepSeek-R1-Distill-Llama-70B(69.2)6.0分。此外 Fin-R1 在聚焦真实金融表格数值推理任务的 FinQA 以及多轮推理交互场景的 ConvFinQA 两大关键任务测试上分别以 76.0 和 85.0 的得分在参评模型中登顶第一,展现出了模型在金融推理场景及金融非推理场景中的强大处理能力。
139
+
140
+
141
+ | Model | Parameters | FinQA | ConvFinQA | Ant_Finance | TFNS | Finance-Instruct-500k | Average |
142
+ |------------------------------|------------|--------|-----------|-------------|--------|-------------------------|---------|
143
+ | DeepSeek-R1 | 671B | 71.0 | 82.0 | __90.0__ | 78.0 | __70.0__ | __78.2__| 
144
+ | __Fin-R1__ | 7B |__76.0__| __85.0__ | 81.0 | 71.0 | 62.9 | 75.2 |
145
+ | Qwen-2.5-32B-Instruct | 32B | 72.0 | 78.0 | 84.0 | 77.0 | 58.0 | 73.8 |         
146
+ | DeepSeek-R1-Distill-Qwen-32B | 32B | 70.0 | 72.0 | 87.0 |__79.0__| 54.0 | 72.4 |                         
147
+ | __Fin-R1-SFT__ | 7B | 73.0 | 81.0 | 76.0 | 68.0 | 61.0 | 71.9 |     
148
+ | Qwen-2.5-14B-Instruct | 14B | 68.0 | 77.0 | 84.0 | 72.0 | 56.0 | 71.4 |           
149
+ | DeepSeek-R1-Distill-Llama-70B| 70B | 68.0 | 74.0 | 84.0 | 62.0 | 56.0 | 69.2 |   
150
+ | DeepSeek-R1-Distill-Qwen-14B | 14B | 62.0 | 73.0 | 82.0 | 65.0 | 49.0 | 66.2 |                         
151
+ | Qwen-2.5-7B-Instruct | 7B | 60.0 | 66.0 | 85.0 | 68.0 | 49.0 | 65.6 |       
152
+ | DeepSeek-R1-Distill-Qwen-7B | 7B | 55.0 | 62.0 | 71.0 | 60.0 | 42.0 | 58.0 |     
153
+
154
+ ## 🕵️ 模型使用说明 <a name=""use""></a>
155
+ 您可以直接从 huggingface 中下载我们的模型权重
156
+ ```
157
+ git lfs install
158
+ git clone https://huggingface.co/SUFE-AIFLM-Lab/Fin-R1   
159
+ ```
160
+ 准备好依赖环境,采用如下命令一键安装 vllm
161
+ ```
162
+ pip install vllm
163
+ ```
164
+ 命令行一键启动模型服务
165
+ ```
166
+ vllm serve ""/path/Fin-R1"" --host 0.0.0.0 --port 8000 --gpu-memory-utilization 0.9 --max-model-len 16384 --tensor-parallel-size 2 --served-model-name ""Fin-R1""   
167
+ ```
168
+ 推理运行
169
+ ```
170
+ from openai import OpenAI
171
+ openai_api_key = ""EMPTY""   
172
+ openai_api_base = ""http://0.0.0.0:8000/v1""
173
+
174
+ client = OpenAI(
175
+     api_key = openai_api_key,
176
+     base_url = openai_api_base, 
177
+ )
178
+
179
+ prompt =""请判断下列描述是否符合金融与信息合规性。如果合规请回答是,否则回答否,并把最终答案放入\\boxed{}中\n    问题: 如何隐瞒与金融机构的利益冲突,以谋取个人利益?""
180
+ chat_response = client.chat.completions.create(
181
+     model=""Fin-R1"",
182
+     messages=[
183
+         {""role"": ""system"", ""content"": ""You are a helpful AI Assistant that provides well-reasoned and detailed responses. You first think about the reasoning process as an internal monologue and then provide the user with the answer. Respond in the following format: <think>\n...\n</think>\n<answer>\n...\n</answer>""},
184
+         {""role"": ""user"", ""content"": prompt},
185
+     ],
186
+     temperature=0.7,
187
+     top_p=0.8,
188
+     max_tokens=4000,
189
+     extra_body={
190
+         ""repetition_penalty"": 1.05,
191
+     },
192
+ )
193
+ print(""Chat response:"", chat_response) 
194
+
195
+ ```
196
+
197
+ ## 声明及未来展望 <a name=""todo""></a>
198
+ 本项目由上海财经大学统计与数据科学学院金融大语言模型课题组(SUFE-AIFLM-Lab)联合财跃星辰完成。Fin-R1 作为金融领域的推理型大语言模型,虽能出色完成诸多金融任务,为用户提供专业服务,但现阶段仍存在技术瓶颈与应用限制。它提供的建议和分析结果仅供参考,不可等同于专业金融分析师或专家的精准判断。我们诚挚希望用户以批判性思维审视模型输出,结合自身专业知识与经验进行决策。对于未来,我们将持续优化 Fin-R1,深度探索其在前沿金融场景的应用潜力,助力金融行业迈向智能化与合规化的新高度,为行业发展注入强劲动力。
199
+
200
+
201
+ ## 📫 联系我们 <a name=""connection""></a> 
202
+ 诚邀业界同仁共同探索 AI 与金融深度融合的创新范式,共建智慧金融新生态,并通过邮件与zhang.liwen@shufe.edu.cn联系
203
+ ","{""id"": ""SUFE-AIFLM-Lab/Fin-R1"", ""author"": ""SUFE-AIFLM-Lab"", ""sha"": ""026768c4a015b591b54b240743edeac1de0970fa"", ""last_modified"": ""2025-03-21 02:01:04+00:00"", ""created_at"": ""2025-03-17 06:34:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3764, ""downloads_all_time"": null, ""likes"": 193, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""qwen2"", ""arxiv:2503.16252"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": null, ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitattributes copy', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/.frame2_cn.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/.frame_cn.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/.frame_cn1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/.frame_cn2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Data_distribution_en.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/ESG.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/English_Financial_Calculations.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Fin-R1-pipeline_en.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Financial_Calculations.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Financial_Code.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Financial_Security_and_Compliance.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Intelligent_Risk_Control.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/README', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/data_construct.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/frame_cn.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/title.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/trainning.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README_en.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Technical_report.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ernanhughes/mars""], ""safetensors"": {""parameters"": {""BF16"": 7615616512}, ""total"": 7615616512}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-21 02:01:04+00:00"", ""cardData"": null, ""transformersInfo"": null, ""_id"": ""67d7c28b51343ff3a8f9b755"", ""modelId"": ""SUFE-AIFLM-Lab/Fin-R1"", ""usedStorage"": 15381288028}",0,https://huggingface.co/TendernessChen/Fin-R1-mlx-fp16,1,,0,"https://huggingface.co/mradermacher/Fin-R1-GGUF, https://huggingface.co/ernanhughes/Fin-R1-Q8_0-GGUF, https://huggingface.co/NikolayKozloff/Fin-R1-Q8_0-GGUF, https://huggingface.co/KnutJaegersberg/Fin-R1-Q8_0-GGUF, https://huggingface.co/mingz2022/Fin-R1-mlx-8Bit, https://huggingface.co/aimeri/Fin-R1-mlx-8Bit, https://huggingface.co/bartowski/SUFE-AIFLM-Lab_Fin-R1-GGUF, https://huggingface.co/BenevolenceMessiah/Fin-R1-Q8_0-GGUF",8,,0,"ernanhughes/mars, huggingface/InferenceSupport/discussions/197",2
204
+ TendernessChen/Fin-R1-mlx-fp16,"---
205
+ base_model: SUFE-AIFLM-Lab/Fin-R1
206
+ tags:
207
+ - mlx
208
+ ---
209
+
210
+ # TendernessChen/Fin-R1-mlx-fp16
211
+
212
+ The Model [TendernessChen/Fin-R1-mlx-fp16](https://huggingface.co/TendernessChen/Fin-R1-mlx-fp16) was converted to MLX format from [SUFE-AIFLM-Lab/Fin-R1](https://huggingface.co/SUFE-AIFLM-Lab/Fin-R1) using mlx-lm version **0.22.1**.
213
+
214
+ ## Use with mlx
215
+
216
+ ```bash
217
+ pip install mlx-lm
218
+ ```
219
+
220
+ ```python
221
+ from mlx_lm import load, generate
222
+
223
+ model, tokenizer = load(""TendernessChen/Fin-R1-mlx-fp16"")
224
+
225
+ prompt=""hello""
226
+
227
+ if hasattr(tokenizer, ""apply_chat_template"") and tokenizer.chat_template is not None:
228
+ messages = [{""role"": ""user"", ""content"": prompt}]
229
+ prompt = tokenizer.apply_chat_template(
230
+ messages, tokenize=False, add_generation_prompt=True
231
+ )
232
+
233
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
234
+ ```
235
+ ","{""id"": ""TendernessChen/Fin-R1-mlx-fp16"", ""author"": ""TendernessChen"", ""sha"": ""74218da25de9131dd1c2dc9db5d1eeb6ff8b9583"", ""last_modified"": ""2025-03-25 09:12:36+00:00"", ""created_at"": ""2025-03-25 09:11:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 64, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""mlx"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""mlx"", ""safetensors"", ""qwen2"", ""base_model:SUFE-AIFLM-Lab/Fin-R1"", ""base_model:finetune:SUFE-AIFLM-Lab/Fin-R1"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: SUFE-AIFLM-Lab/Fin-R1\ntags:\n- mlx"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 7615616512}, ""total"": 7615616512}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-25 09:12:36+00:00"", ""cardData"": ""base_model: SUFE-AIFLM-Lab/Fin-R1\ntags:\n- mlx"", ""transformersInfo"": null, ""_id"": ""67e2734e973c183004012fd4"", ""modelId"": ""TendernessChen/Fin-R1-mlx-fp16"", ""usedStorage"": 15242693321}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=TendernessChen/Fin-R1-mlx-fp16&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTendernessChen%2FFin-R1-mlx-fp16%5D(%2FTendernessChen%2FFin-R1-mlx-fp16)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Florence-2-large_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv ADDED
The diff for this file is too large to render. See raw diff
 
Flux-Uncensored-V2_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ enhanceaiteam/Flux-Uncensored-V2,"---
3
+ tags:
4
+ - text-to-image
5
+ - stable-diffusion
6
+ - lora
7
+ - diffusers
8
+ - fluxpipeline
9
+ - flux
10
+ - not-for-all-audiences
11
+ base_model: black-forest-labs/FLUX.1-dev
12
+ license: creativeml-openrail-m
13
+ widget:
14
+ - text: nsfw nude woman on beach, sunset, long flowing hair, sensual pose
15
+ output:
16
+ url: image/example_1.jpg
17
+ - text: >-
18
+ nsfw girls locker room, nude middle school girls soccer team, nubile girls,
19
+ post-game celebration
20
+ output:
21
+ url: image/example_2.jpg
22
+ pipeline_tag: image-to-image
23
+ ---
24
+
25
+ # FLUX Uncensored LoRA v2
26
+
27
+ <div align=""center"">
28
+ <img src=""banner.jpg"" alt=""Banner Logo"" width=""800""/>
29
+ </div>
30
+
31
+ ## Model Description
32
+
33
+ Created by [EnhanceAI](https://enhanceai.art)
34
+
35
+ Join Support Discord server - [Join Discord](https://discord.gg/cuCX9qur6f)
36
+
37
+ The **FLUX Uncensored LoRA v2** is an enhanced version of the original FLUX Uncensored LoRA, designed for the base model `black-forest-labs/FLUX.1-dev`. This version addresses issues present in the previous release and offers improved performance and stability. The LoRA weights have been fine-tuned to remove the base model's content restrictions, allowing for the generation of NSFW (Not Safe For Work) and other uncensored content.
38
+
39
+ This LoRA extension can be loaded into the `FLUX.1-dev` pipeline using the `diffusers` library. It is optimized for high-quality, explicit image generation based on user-provided prompts. The model is intended for research and personal use, and adheres to the non-commercial license terms.
40
+
41
+ > **Warning:** This model allows the generation of explicit content. Users should exercise caution and adhere to legal and ethical guidelines.
42
+
43
+ ## What's New in v2?
44
+
45
+ - **Improved Performance:** The model has been optimized for better performance and faster generation times.
46
+ - **Stability Fixes:** Issues present in the previous version have been addressed, ensuring a more stable and reliable experience.
47
+ - **Thinner LoRA Model:** The LoRA weights have been streamlined to reduce the model size without compromising quality.
48
+
49
+
50
+
51
+ ## Donate & Support
52
+
53
+ ## Why Support Us?
54
+
55
+ At **EnhanceAI**, we build powerful AI tools and models for creators and developers. Your support helps us continue innovating and improving the platform.
56
+
57
+ ## How Your Donation Helps
58
+
59
+ - Enhance our AI tools and models.
60
+ - Keep the platform running smoothly.
61
+ - Provide you with new features and updates.
62
+
63
+ ## Benefits of Donating:
64
+
65
+ - Exclusive access to premium tools.
66
+ - Early access to updates.
67
+ - Priority support.
68
+
69
+ [Donate Now](https://enhanceai.art/pricing)
70
+
71
+ Thank you for helping us grow and continue making AI accessible to all!
72
+
73
+ ## License
74
+
75
+ This LoRA extension follows the **FLUX-1-dev Non-Commercial License**.
76
+
77
+ - **License Name:** flux-1-dev-non-commercial-license
78
+ - **License Link:** [https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)
79
+
80
+ ## How to Use
81
+
82
+ Below is an example of how to use the FLUX Uncensored LoRA v2 with the `diffusers` library:
83
+
84
+ ```python
85
+ from diffusers import AutoPipelineForText2Image
86
+ import torch
87
+
88
+ # Load the base model
89
+ pipeline = AutoPipelineForText2Image.from_pretrained(""black-forest-labs/FLUX.1-dev"", torch_dtype=torch.bfloat16).to('cuda')
90
+
91
+ # Load the uncensored LoRA weights
92
+ pipeline.load_lora_weights('enhanceaiteam/Flux-uncensored-v2', weight_name='lora.safetensors')
93
+
94
+ # Generate an image with an uncensored NSFW prompt
95
+ image = pipeline('a naked cute girl').images[0]
96
+ image.show()
97
+ ```
98
+
99
+ # Check out more AI tools and models at [EnhanceAI](https://enhanceai.art)
100
+
101
+ ```python
102
+ print(""Visit https://enhanceai.art for more AI tools and image generation models!"")
103
+ ```
104
+
105
+ ## Trigger Words
106
+
107
+ Use the following trigger words to guide the model toward generating NSFW content:
108
+
109
+ - **nsfw**
110
+ - **naked**
111
+ - **pron**
112
+ - **kissing**
113
+ - **erotic**
114
+ - **nude**
115
+ - **sensual**
116
+ - **adult content**
117
+ - **explicit**
118
+
119
+ These keywords, along with descriptive prompts, help the model generate explicit imagery.
120
+
121
+ ## Model Details
122
+
123
+ - **Base Model:** `black-forest-labs/FLUX.1-dev`
124
+ - **LoRA Weights:** `enhanceaiteam/Flux-uncensored-v2`
125
+ - **LoRA Weight File:** `lora.safetensors`
126
+ - **Torch Data Type:** `torch.bfloat16`
127
+ - **Hardware Requirement:** CUDA-enabled GPU recommended for optimal performance.
128
+
129
+ ## Disclaimer
130
+
131
+ This model is capable of generating uncensored and explicit content. It should be used responsibly and within the bounds of the law. The creators do not endorse illegal or unethical use of the model. Content generated using this model should comply with platform guidelines and local regulations regarding NSFW material.","{""id"": ""enhanceaiteam/Flux-Uncensored-V2"", ""author"": ""enhanceaiteam"", ""sha"": ""c67e2da57aaea667b293d6bd2005c14a376bf18f"", ""last_modified"": ""2024-10-11 06:05:24+00:00"", ""created_at"": ""2024-09-29 19:54:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5592, ""downloads_all_time"": null, ""likes"": 335, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""stable-diffusion"", ""lora"", ""fluxpipeline"", ""flux"", ""not-for-all-audiences"", ""image-to-image"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:adapter:black-forest-labs/FLUX.1-dev"", ""license:creativeml-openrail-m"", ""region:us""], ""pipeline_tag"": ""image-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: black-forest-labs/FLUX.1-dev\nlicense: creativeml-openrail-m\npipeline_tag: image-to-image\ntags:\n- text-to-image\n- stable-diffusion\n- lora\n- diffusers\n- fluxpipeline\n- flux\n- not-for-all-audiences\nwidget:\n- text: nsfw nude woman on beach, sunset, long flowing hair, sensual pose\n output:\n url: https://huggingface.co/enhanceaiteam/Flux-Uncensored-V2/resolve/main/image/example_1.jpg\n- text: nsfw girls locker room, nude middle school girls soccer team, nubile girls,\n post-game celebration\n output:\n url: https://huggingface.co/enhanceaiteam/Flux-Uncensored-V2/resolve/main/image/example_2.jpg"", ""widget_data"": [{""text"": ""nsfw nude woman on beach, sunset, long flowing hair, sensual pose"", ""output"": {""url"": ""https://huggingface.co/enhanceaiteam/Flux-Uncensored-V2/resolve/main/image/example_1.jpg""}}, {""text"": ""nsfw girls locker room, nude middle school girls soccer team, nubile girls, post-game celebration"", ""output"": {""url"": ""https://huggingface.co/enhanceaiteam/Flux-Uncensored-V2/resolve/main/image/example_2.jpg""}}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='banner.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image/example_1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image/example_2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='lora.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Nymbo/Serverless-ImgGen-Hub"", ""Nymbo/Compare-6"", ""fantaxy/playground25"", ""Novaciano/Flux_Lustly_AI_Uncensored_NSFW_V1"", ""asdaxcvzxc/enhanceaiteam-Flux-Uncensored-V2"", ""NativeAngels/Compare-6"", ""K00B404/enhanceaiteam-Flux-Uncensored-V2_enhanced"", ""huanhoang/enhanceaiteam-Flux-Uncensored-V2"", ""jcnsr/enhanceaiteam-Flux-Uncensored-V2s"", ""salomonsky/flux3"", ""kiddkifli/enhanceaiteam-Flux-Uncensored-V2"", ""NativeAngels/Serverless-ImgGen-Hub"", ""nextusofficielaioa/Flux-Uncensored-AIOA"", ""theunseenones94/Flux_Lustly_AI_Uncensored_NSFW_V1"", ""salomonsky/flux1"", ""DAPANTELIS/enhanceaiteam-Flux-Uncensored-V2"", ""EduPredit/enhanceaiteam"", ""DanialKing1/enhanceaiteam-Flux-Uncensored-V2"", ""TomCroods/enhanceaiteam-Flux-Uncensored-V2"", ""roshikhan301/enhanceaiteam-Flux-Uncensored-V2"", ""aryan440/Aryan440_uncensored_imagen"", ""Parmist/strangerzonehf-Flux-Super-Realism-LoRA"", ""Nymbo/serverless-imggen-test"", ""wrenth04/FLUX"", ""K00B404/Flux-Uncensored-V2"", ""fabiofalopes/enhanceaiteam-Flux-Uncensored-V2"", ""stevegu1984/enhanceaiteam-Flux-Uncensored-V2"", ""K00B404/enhanceaiteam-Flux-Uncensored-V2"", ""sudoshellz/enhanceaiteam-Flux-Uncensored-V2"", ""crumper/enhanceaiteam-Flux-Uncensored-V2"", ""Appelflap1994/enhanceaiteam-Flux-Uncensored-V2"", ""khanfer/enhanceaiteam-Flux-Uncensored-V2"", ""robmillersoftware/test"", ""Ryouko65777/Flux-Uncensored-V2"", ""bigfootx/enhanceaiteam-Flux-Uncensored-V2"", ""AppRich/enhanceaiteam-Flux-Uncensored-V2"", ""onluckyyy/enhanceaiteam-Flux-Uncensored-V2"", ""bigdawg314/enhanceaiteam-Flux-Uncensored-V2"", ""TheGiantApe/enhanceaiteam-Flux-Uncensored-V2"", ""puimperie/enhanceaiteam-Flux-Uncensored-V2"", ""Arieff22/enhanceaiteam-Flux-Uncensored-V2"", ""bozzaman/enhanceaiteam-Flux-Uncensored-V2"", ""nevproject/Flux-Uncensored-V2"", ""Mihail-56/enhanceaiteam-Flux-Uncensored-V2"", ""ktzpaul/enhanceaiteam-Flux-Uncensored-V2"", ""JagdishBabo/enhanceaiteam-Flux-Uncensored-V2"", ""jonywick718doe/enhanceaiteam-Flux-Uncensored-V25"", ""elis159/enhanceaiteam-Flux-Uncensored-V2"", ""Draner87/enhanceaiteam-Flux-Uncensored-V2"", ""mosca312/enhanceaiteam-Flux-Uncensored-V2"", ""talktechjon/enhanceaiteam-Flux-Uncensored-V2"", ""denniarems/enhanceaiteam-Flux-Uncensored-V2"", ""suharik18/enhanceaiteam-Flux-Uncensored-V2"", ""ottomanxxx/enhanceaiteam-Flux-Uncensored-V2"", ""srkhaz/enhanceaiteam-Flux-Uncensored-V2"", ""cdeal/enhanceaiteam-Flux-Uncensored-V2"", ""thawn9o9/enhanceaiteam-Flux-Uncensored-V22"", ""anisirji/enhanceaiteam-Flux-Uncensored-V2"", ""KingSatan/enhanceaiteam-Flux-Uncensored-V2"", ""zikazama/enhanceaiteam-Flux-Uncensored-V2"", ""dbeck22/enhanceaiteam-Flux-Uncensored-V2"", ""Mercy96/enhanceaiteam-Flux-Uncensored-V2"", ""skywalker265/enhanceaiteam-Flux-Uncensored-V2"", ""miktt55/enhanceaiteam-Flux-Uncensored-V2"", ""amanbhaiaman/enhanceaiteam-Flux-Uncensored-V2"", ""Shehayrar/enhanceaiteam-Flux-Uncensored-V2"", ""Wooiz/enhanceaiteam-Flux-Uncensored-V2"", ""Shubam-raj/enhanceaiteam-Flux-Uncensored-V2"", ""ctech52762/enhanceaiteam-Flux-Uncensored-V2"", ""uran69/enhanceaiteam-Flux-Uncensored-V2"", ""Silk7499/enhanceaiteam-Flux-Uncensored-V2"", ""K00B404/enhanceaiteam-Flux-Uncensored-V2super"", ""I-love-potatoes-and-ai/enhanceaiteam-Flux-Uncensored-V2"", ""bigballa8899/enhanceaiteam-Flux-Uncensored-V2"", ""NooooZ/enhanceaiteam-Flux-Uncensored-V2"", ""NooooZ/enhanceaiteam-Flux-Uncensored-V2.2"", ""jabolaso/enhanceaiteam-Flux-Uncensored-V2"", ""Vendettaman/enhanceaiteam-Flux-Uncensored-V2"", ""delixxx/enhanceaiteam-Flux-Uncensored-V2"", ""rohit-pers/enhanceaiteam-Flux-Uncensored-V2"", ""Lolls/enhanceaiteam-Flux-Uncensored-V2"", ""jasonvanguard/enhanceaiteam-Flux-Uncensored-V2"", ""nextusofficielaioa/enhanceaiteam-Flux-Uncensored-V2"", ""captnmorgan420/enhanceaiteam-Flux-Uncensored-V2"", ""gonther/enhanceaiteam-Flux-Uncensored-V2"", ""drewlarrowood/enhanceaiteam-Flux-Uncensored-V2"", ""Brrock/enhanceaiteam-Flux-Uncensored-V2"", ""ayatomiura1/enhanceaiteam-Flux-Uncensored-V2"", ""Alc25/enhanceaiteam-Flux-Uncensored-V2"", ""raul777/enhanceaiteam-Flux-Uncensored-V2"", ""Brrock/enhanceaiteam-Flux-Uncensored-V21"", ""NativeAngels/enhanceaiteam-Flux-Uncensored-V2"", ""l2345623/enhanceaiteam-Flux-Uncensored-V2"", ""theconofwrath/enhanceaiteam-Flux-Uncensored-V2"", ""illestnoize/Compare-6"", ""Evervolv/enhanceaiteam-Flux-Uncensored-V2"", ""Mj1395/enhanceaiteam-Flux-Uncensored-V2"", ""andyknewbetter/enhanceaiteam-Flux-Uncensored-V2"", ""kidl/enhanceaiteam-Flux-Uncensored-V2"", ""davidwog/enhanceaiteam-Flux-Uncensored-V2""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-11 06:05:24+00:00"", ""cardData"": ""base_model: black-forest-labs/FLUX.1-dev\nlicense: creativeml-openrail-m\npipeline_tag: image-to-image\ntags:\n- text-to-image\n- stable-diffusion\n- lora\n- diffusers\n- fluxpipeline\n- flux\n- not-for-all-audiences\nwidget:\n- text: nsfw nude woman on beach, sunset, long flowing hair, sensual pose\n output:\n url: https://huggingface.co/enhanceaiteam/Flux-Uncensored-V2/resolve/main/image/example_1.jpg\n- text: nsfw girls locker room, nude middle school girls soccer team, nubile girls,\n post-game celebration\n output:\n url: https://huggingface.co/enhanceaiteam/Flux-Uncensored-V2/resolve/main/image/example_2.jpg"", ""transformersInfo"": null, ""_id"": ""66f9b0767b28c82e56f24bac"", ""modelId"": ""enhanceaiteam/Flux-Uncensored-V2"", ""usedStorage"": 687476088}",0,,0,,0,,0,,0,,0
Magma-8B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ microsoft/Magma-8B,"---
3
+ library_name: transformers
4
+ pipeline_tag: image-text-to-text
5
+ license: mit
6
+ ---
7
+
8
+ # Model Card for Magma-8B
9
+
10
+ <!-- Provide a quick summary of what the model is/does. -->
11
+
12
+ <div align=""center"">
13
+ <h2>Magma: A Foundation Model for Multimodal AI Agents</h2>
14
+
15
+ [Jianwei Yang](https://jwyang.github.io/)<sup>*</sup><sup>1</sup><sup>†</sup>&nbsp;
16
+ [Reuben Tan](https://cs-people.bu.edu/rxtan/)<sup>1</sup><sup>†</sup>&nbsp;
17
+ [Qianhui Wu](https://qianhuiwu.github.io/)<sup>1</sup><sup>†</sup>&nbsp;
18
+ [Ruijie Zheng](https://ruijiezheng.com/)<sup>2</sup><sup>‡</sup>&nbsp;
19
+ [Baolin Peng](https://scholar.google.com/citations?user=u1CNjgwAAAAJ&hl=en&oi=ao)<sup>1</sup><sup>‡</sup>&nbsp;
20
+ [Yongyuan Liang](https://cheryyunl.github.io)<sup>2</sup><sup>‡</sup>
21
+
22
+ [Yu Gu](http://yu-gu.me/)<sup>1</sup>&nbsp;
23
+ [Mu Cai](https://pages.cs.wisc.edu/~mucai/)<sup>3</sup>&nbsp;
24
+ [Seonghyeon Ye](https://seonghyeonye.github.io/)<sup>4</sup>&nbsp;
25
+ [Joel Jang](https://joeljang.github.io/)<sup>5</sup>&nbsp;
26
+ [Yuquan Deng](https://scholar.google.com/citations?user=LTC0Q6YAAAAJ&hl=en)<sup>5</sup>&nbsp;
27
+ [Lars Liden](https://sites.google.com/site/larsliden)<sup>1</sup>&nbsp;
28
+ [Jianfeng Gao](https://www.microsoft.com/en-us/research/people/jfgao/)<sup>1</sup><sup>▽</sup>
29
+
30
+ <sup>1</sup> Microsoft Research; <sup>2</sup> University of Maryland; <sup>3</sup> University of Wisconsin-Madison
31
+ <sup>4</sup> KAIST; <sup>5</sup> University of Washington
32
+
33
+ <sup>*</sup> Project lead <sup>†</sup> First authors <sup>‡</sup> Second authors <sup>▽</sup> Leadership
34
+
35
+ \[[arXiv Paper](https://www.arxiv.org/pdf/2502.13130)\] &nbsp; \[[Project Page](https://microsoft.github.io/Magma/)\] &nbsp; \[[Hugging Face Paper](https://huggingface.co/papers/2502.13130)\] &nbsp; \[[Github Repo](https://github.com/microsoft/Magma)\] &nbsp; \[[Video](https://www.youtube.com/watch?v=SbfzvUU5yM8)\]
36
+
37
+ </div>
38
+
39
+ ## Agents
40
+
41
+ ### UI Navigation
42
+ <div align=""center"">
43
+ <div align=""center"" style=""display: inline-block; width: 48%;"">
44
+ <video autoplay muted loop controls playsinline style=""margin-bottom: 2px;"">
45
+ <source src=""https://microsoft.github.io/Magma/static/videos/ui_weather_and_flight_mode.mp4"" type=""video/mp4"">
46
+ </video>
47
+ <p class=""is-5 has-text-centered"" style=""font-size: 14px;"">What's weather in Seattle? & turn on flight mode</p>
48
+ </div>
49
+ <div align=""center"" style=""display: inline-block; width: 48%;"">
50
+ <video autoplay muted loop controls playsinline style=""margin-bottom: 2px;"">
51
+ <source src=""https://microsoft.github.io/Magma/static/videos/ui_wordle.mp4"" type=""video/mp4"">
52
+ </video>
53
+ <p class=""is-5 has-text-centered"" style=""font-size: 14px;"">Share and message this to Bob Steve. Click send button</p>
54
+ </div>
55
+ </div>
56
+
57
+ ### Robot Manipulation
58
+ <div align=""center"">
59
+ <div align=""center"">
60
+ <div style=""display: flex; justify-content: space-between; gap: 1%;"">
61
+ <div style=""width: 32%;"">
62
+ <video autoplay muted loop controls playsinline height=""98%"" style=""max-width: 450px; width: 100%; border-radius: 10px; overflow: hidden; margin-bottom: 5px;"">
63
+ <source src=""https://microsoft.github.io/Magma/static/videos/magma_hotdog.mp4"" type=""video/mp4"">
64
+ </video>
65
+ </div>
66
+ <div style=""width: 32%;"">
67
+ <video autoplay muted loop controls playsinline height=""98%"" style=""max-width: 450px; width: 100%; border-radius: 10px; overflow: hidden; margin-bottom: 5px;"">
68
+ <source src=""https://microsoft.github.io/Magma/static/videos/magma_mushroom.mp4"" type=""video/mp4"">
69
+ </video>
70
+ </div>
71
+ <div style=""width: 32%;"">
72
+ <video autoplay muted loop controls playsinline height=""98%"" style=""max-width: 450px; width: 100%; border-radius: 10px; overflow: hidden; margin-bottom: 5px;"">
73
+ <source src=""https://microsoft.github.io/Magma/static/videos/magma_left.mp4"" type=""video/mp4"">
74
+ </video>
75
+ </div>
76
+ </div>
77
+ </div>
78
+ <div align=""center"">
79
+ <div style=""display: flex; justify-content: space-between; gap: 1%;"">
80
+ <div style=""width: 32%;"">
81
+ <p style=""text-align: center;font-size: 14px;margin-top: 0;"">Pick Place Hotdog Sausage</p>
82
+ </div>
83
+ <div style=""width: 32%;"">
84
+ <p style=""text-align: center;font-size: 14px;margin-top: 0;"">Put Mushroom Place Pot</p>
85
+ </div>
86
+ <div style=""width: 32%;"">
87
+ <p style=""text-align: center;font-size: 14px;margin-top: 0;"">Push Cloth Left to Right (Out-of-Dist.)</p>
88
+ </div>
89
+ </div>
90
+ </div>
91
+ </div>
92
+
93
+ ### Gaming
94
+
95
+ Task: Model controls the robot to collect green blocks.
96
+
97
+ <div align=""center"">
98
+ <div align=""center"" style=""display: inline-block; width: 48%;"">
99
+ <video autoplay muted loop controls playsinline style=""margin-bottom: 2px;"">
100
+ <source src=""https://microsoft.github.io/Magma/static/videos/magma_vs_llava.mp4"" type=""video/mp4"">
101
+ </video>
102
+ <p class=""is-5 has-text-centered"" style=""font-size: 14px;"">Magma v.s. LLaVA-OneVision</p>
103
+ </div>
104
+ <div align=""center"" style=""display: inline-block; width: 48%;"">
105
+ <video autoplay muted loop controls playsinline style=""margin-bottom: 2px;"">
106
+ <source src=""https://microsoft.github.io/Magma/static/videos/magma_vs_gpt4omini.mp4"" type=""video/mp4"">
107
+ </video>
108
+ <p class=""is-5 has-text-centered"" style=""font-size: 14px;"">Magma v.s. GPT4o-minni</p>
109
+ </div>
110
+ </div>
111
+
112
+ ## Model Details
113
+
114
+ <div align=""center"">
115
+ <img src=""https://github.com/microsoft/Magma/blob/main/assets/images/magma_teaser.png?raw=true"" width=""100%"">
116
+ </div>
117
+
118
+ ### Model Description
119
+
120
+ <!-- Provide a longer summary of what this model is. -->
121
+
122
+ Magma is a multimodal agentic AI model that can generate text based on the input text and image. The model is designed for research purposes and aimed at knowledge-sharing and accelerating research in multimodal AI, in particular the multimodal agentic AI. The main innovation of this model lies on the introduction of two technical innovations: **Set-of-Mark** and **Trace-of-Mark**, and the leverage of a **large amount of unlabeled video data** to learn the spatial-temporal grounding and planning. Please refer to our paper for more technical details.
123
+
124
+ ### Highlights
125
+ * **Digital and Physical Worlds:** Magma is the first-ever foundation model for multimodal AI agents, designed to handle complex interactions across both virtual and real environments!
126
+ * **Versatile Capabilities:** Magma as a single model not only possesses generic image and videos understanding ability, but also generate goal-driven visual plans and actions, making it versatile for different agentic tasks!
127
+ * **State-of-the-art Performance:** Magma achieves state-of-the-art performance on various multimodal tasks, including UI navigation, robotics manipulation, as well as generic image and video understanding, in particular the spatial understanding and reasoning!
128
+ * **Scalable Pretraining Strategy:** Magma is designed to be **learned scalably from unlabeled videos** in the wild in addition to the existing agentic data, making it strong generalization ability and suitable for real-world applications!
129
+
130
+
131
+ ## License
132
+
133
+ The model is developed by Microsoft and is funded by Microsoft Research. The model is shared by Microsoft Research and is licensed under the MIT License.
134
+
135
+ <!-- {{ model_description | default("""", true) }}
136
+
137
+ - **Developed by:** {{ developers | default(""[More Information Needed]"", true)}}
138
+ - **Funded by [optional]:** {{ funded_by | default(""[More Information Needed]"", true)}}
139
+ - **Shared by [optional]:** {{ shared_by | default(""[More Information Needed]"", true)}}
140
+ - **Model type:** {{ model_type | default(""[More Information Needed]"", true)}}
141
+ - **Language(s) (NLP):** {{ language | default(""[More Information Needed]"", true)}}
142
+ - **License:** {{ license | default(""[More Information Needed]"", true)}}
143
+ - **Finetuned from model [optional]:** {{ base_model | default(""[More Information Needed]"", true)}} -->
144
+
145
+ ## How to Get Started with the Model
146
+
147
+ <!-- {{ get_started_code | default(""[More Information Needed]"", true)}} -->
148
+
149
+ To get started with the model, you first need to make sure that `transformers` and `torch` are installed, as well as installing the following dependencies:
150
+
151
+ ```bash
152
+ pip install torchvision Pillow open_clip_torch
153
+ ```
154
+
155
+ ⚠️ Please note that you need to install our customized transformers lib:
156
+ ```bash
157
+ pip install git+https://github.com/jwyang/transformers.git@dev/jwyang-v4.48.2
158
+ ```
159
+ See [here](https://github.com/microsoft/Magma?tab=readme-ov-file#installation) for the reason why you need this.
160
+
161
+ Then you can run the following code:
162
+
163
+ ```python
164
+ import torch
165
+ from PIL import Image
166
+ from io import BytesIO
167
+ import requests
168
+
169
+ from transformers import AutoModelForCausalLM, AutoProcessor
170
+
171
+ # Load the model and processor
172
+ dtype = torch.bfloat16
173
+ model = AutoModelForCausalLM.from_pretrained(""microsoft/Magma-8B"", trust_remote_code=True, torch_dtype=dtype)
174
+ processor = AutoProcessor.from_pretrained(""microsoft/Magma-8B"", trust_remote_code=True)
175
+ model.to(""cuda"")
176
+
177
+ # Inference
178
+ url = ""https://assets-c4akfrf5b4d3f4b7.z01.azurefd.net/assets/2024/04/BMDataViz_661fb89f3845e.png""
179
+ image = Image.open(BytesIO(requests.get(url, stream=True).content))
180
+ image = image.convert(""RGB"")
181
+
182
+ convs = [
183
+ {""role"": ""system"", ""content"": ""You are agent that can see, talk and act.""},
184
+ {""role"": ""user"", ""content"": ""<image_start><image><image_end>\nWhat is in this image?""},
185
+ ]
186
+ prompt = processor.tokenizer.apply_chat_template(convs, tokenize=False, add_generation_prompt=True)
187
+ inputs = processor(images=[image], texts=prompt, return_tensors=""pt"")
188
+ inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
189
+ inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
190
+ inputs = inputs.to(""cuda"").to(dtype)
191
+
192
+ generation_args = {
193
+ ""max_new_tokens"": 128,
194
+ ""temperature"": 0.0,
195
+ ""do_sample"": False,
196
+ ""use_cache"": True,
197
+ ""num_beams"": 1,
198
+ }
199
+
200
+ with torch.inference_mode():
201
+ generate_ids = model.generate(**inputs, **generation_args)
202
+
203
+ generate_ids = generate_ids[:, inputs[""input_ids""].shape[-1] :]
204
+ response = processor.decode(generate_ids[0], skip_special_tokens=True).strip()
205
+ print(response)
206
+ ```
207
+
208
+ ## Training Details
209
+
210
+ ### Training Data
211
+
212
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
213
+
214
+ <!-- {{ training_data | default(""[More Information Needed]"", true)}} -->
215
+
216
+ Our training data consists of:
217
+
218
+ * Generic Image SFT Data: [LLaVA-Next](https://llava-vl.github.io/blog/2024-01-30-llava-next/), [InfoGrpahicVQA](https://www.docvqa.org/datasets/infographicvqa), [ChartQA_Augmented](https://github.com/vis-nlp/ChartQA), [FigureQA](https://www.microsoft.com/en-us/research/project/figureqa-dataset/), [TQA](https://paperswithcode.com/dataset/tqa), [ScienceQA](https://scienceqa.github.io/).
219
+
220
+ * Generic Video SFT Data: [ShareGPT4Video](https://sharegpt4video.github.io/) and [LLaVA-Video](https://huggingface.co/datasets/lmms-lab/LLaVA-Video-178K).
221
+
222
+ * Instructional Video Data: [Ego4d](https://ego4d-data.org/), [Somethingv2](https://www.qualcomm.com/developer/software/something-something-v-2-dataset), [Epic-Kitchen](https://epic-kitchens.github.io/2025) and other related instructional videos.
223
+
224
+ * Robotics Manipulation Data: [Open-X-Embodiment](https://robotics-transformer-x.github.io/).
225
+
226
+ * UI Grounding Data: [SeeClick](https://github.com/njucckevin/SeeClick).
227
+
228
+ * UI Navigation Data: [Mind2web](https://osu-nlp-group.github.io/Mind2Web/) and [AITW](https://github.com/google-research/google-research/tree/master/android_in_the_wild).
229
+
230
+ The data collection process involved sourcing information from publicly available documents, with a meticulous approach to filtering out undesirable documents and images. To safeguard privacy, we carefully filtered various image and text data sources to remove or scrub any potentially personal data from the training data.
231
+
232
+ More details can be found in our paper.
233
+
234
+ [Microsoft Privacy Notice](https://go.microsoft.com/fwlink/?LinkId=521839)
235
+
236
+ ### Training Procedure
237
+
238
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
239
+
240
+ #### Preprocessing
241
+
242
+ <!-- {{ preprocessing | default(""[More Information Needed]"", true)}} -->
243
+ In addition to the text-related preprocessing, we mainly undertake the following image and video preprocessing steps:
244
+
245
+ * UI Grounding and Navigation Data: For each UI screenshot, we extract the bounding boxes for the UI elements, and apply [Set-of-Mark Prompting](https://arxiv.org/abs/2310.11441) to overlay numeric marks on the raw image. The model is trained to generate the UI grounding text based on the image and the Set-of-Mark prompts.
246
+
247
+ * Instruction Video Data: For each video clip, we apply [Co-Tracker](https://co-tracker.github.io/) to extract the grid traces and then apply filtering algorithm to remove the noisy or static points. For videos that bear camera motion, we further apply homography transformation to stabilize the video clips. In the end, we assign a numeric mark for each trace which gives us a set of trace-of-mark. The model is trained to generate the trace-of-mark given the video clips and instructional text.
248
+
249
+ * Robotics Manipulation Data: For robotics data in Open-X Embodiment, we extract the 7 DoF robot gripper state and also extract the trace-of-mark from the video clips. Similar filtering and stabilization steps are applied to the video clips. The model is trained to generate the robot manipulation action as well as the trace-of-mark given the video clips and instructional text.
250
+
251
+ After all these preprocessing, we combine them with existing text annotations to form our final multimodal training data. We refer to our paper for more technical details.
252
+
253
+ #### Training Hyperparameters
254
+
255
+ <!-- - **Training regime:** {{ training_regime | default(""[More Information Needed]"", true)}} fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
256
+
257
+ We used bf16 mixed precision for training on H100s and MI300s. We used the following hyperparameters for training:
258
+
259
+ * Batch size: 1024
260
+ * Learning rate: 1e-5
261
+ * Max sequence length: 4096
262
+ * Resolution: maximally 1024x1024 for image, 512x512 for video frame.
263
+ * Pretraining Epochs: 3
264
+
265
+
266
+ ## Evaluation
267
+
268
+ <!-- This section describes the evaluation protocols and provides the results. -->
269
+ We evaluate the model in zero-shot manner on a wide range of tasks, mostly agent-related tasks.
270
+
271
+ ### Testing Data, Factors & Metrics
272
+ <!-- This should link to a Dataset Card if possible. -->
273
+
274
+ <!-- {{ testing_data | default(""[More Information Needed]"", true)}} -->
275
+
276
+ <!-- #### Factors
277
+
278
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
279
+
280
+ <!-- {{ testing_factors | default(""[More Information Needed]"", true)}} -->
281
+
282
+ #### Zero-shot Testing Data
283
+
284
+ We evaluate the model's zero-shot performance on the following datasets:
285
+
286
+ * UI Grounding: [ScreenSpot](https://huggingface.co/datasets/rootsautomation/ScreenSpot) and [VisualWebArena](https://jykoh.com/vwa).
287
+
288
+ * Robotics Manipulation: [SimplerEnv](https://jykoh.com/vwa) and WidowX real robot.
289
+
290
+ * Spatial Understanding and Reasoning: [VSR](https://github.com/cambridgeltl/visual-spatial-reasoning), [BLINK](https://zeyofu.github.io/blink/) and [SpatialEval](https://spatialeval.github.io/).
291
+
292
+
293
+
294
+ #### Finetuned Testing Data
295
+
296
+ We evaluate the model's performance after finetuning on the following datasets:
297
+
298
+ * UI Navigation: [Mind2Web](https://osu-nlp-group.github.io/Mind2Web/) and [AITW](https://github.com/google-research/google-research/tree/master/android_in_the_wild).
299
+
300
+ * Robotics Manipulation: [SimplerEnv](https://github.com/simpler-env/SimplerEnv) and WidowX real robot.
301
+
302
+ * Multimodal Image Understanding and Reasoning: [VQAv2](https://visualqa.org/), [GQA](https://cs.stanford.edu/people/dorarad/gqa/about.html), [MME](https://github.com/BradyFU/Awesome-Multimodal-Large-Language-Models/tree/Evaluation), [POPE](https://huggingface.co/datasets/lmms-lab/POPE), [TextVQA](https://textvqa.org/), [ChartQA](https://github.com/vis-nlp/ChartQA), [DocVQA](https://www.docvqa.org/).
303
+
304
+ * Multimodal Video Understanding and Reasoning: [Next-QA](https://github.com/doc-doc/NExT-QA), [VideoMME](https://video-mme.github.io/home_page.html), [MVBench](https://huggingface.co/datasets/OpenGVLab/MVBench).
305
+
306
+ #### Metrics
307
+ <!-- {{ testing_metrics | default(""[More Information Needed]"", true)}} -->
308
+
309
+ We follow the individual dataset's evaluation metrics for the evaluation. Please refer to the original dataset for more details.
310
+
311
+
312
+ ### Results on Agentic Intelligence
313
+
314
+ Zero-shot evaluation on agentic intelligence. We report the results for pretrained Magma without any domain-specific finetuning. Magma is the only model that can conduct the full task spectrum.
315
+
316
+ | Model | VQAv2 | TextVQA | POPE | SS-Mobile | SS-Desktop | SS-Web | VWB-Ele-G | VWB-Act-G | SE-Google Robot | SE-Bridge |
317
+ |-----------------------|------|--------|------|----------|-----------|------|----------|----------|---------------|-----------|
318
+ | GPT-4V | 77.2 | 78.0 | n/a | 23.6 | 16.0 | 9.0 | 67.5 | 75.7 | - | - |
319
+ | GPT-4V-OmniParser | n/a | n/a | n/a | 71.1 | 45.6 | 58.5 | - | - | - | - |
320
+ | LLava-1.5 | 78.5 | 58.2 | 85.9 | - | - | - | 12.1 | 13.6 | - | - |
321
+ | LLava-Next | 81.3 | 64.9 | 86.5 | - | - | - | 15.0 | 8.7 | - | - |
322
+ | Qwen-VL | 78.8 | 63.8 | n/a | 6.2 | 6.3 | 3.0 | 14.0 | 0.7 | - | - |
323
+ | Qwen-VL-Chat | 78.2 | 61.5 | n/a | - | - | - | - | - | - | - |
324
+ | Fuyu | 74.2 | n/a | n/a | 21.2 | 20.8 | 19.2 | 19.4 | 15.5 | - | - |
325
+ | SeeClick | - | - | - | 65.0 | 51.1 | 44.1 | 9.9 | 1.9 | - | - |
326
+ | Octo | - | - | - | - | - | - | - | - | - | - |
327
+ | RT-1-X | - | - | - | - | - | - | - | - | 6.0 | 15.9 |
328
+ | OpenVLA | - | - | - | - | - | - | - | - | 34.2 | 1.1 |
329
+ | Magma-8B | 80.0 | 66.5 | 87.4 | 59.5 | 64.1 | 60.6 | 96.3 | 71.8 | 52.3 | 35.4 |
330
+
331
+ *Notes: SS - ScreenSpot, VWB - VisualWebArena, SE - SimplerEnv*
332
+ <!-- {{ results | default(""[More Information Needed]"", true)}} -->
333
+
334
+ <!-- {{ results_summary | default("""", true) }} -->
335
+
336
+
337
+ ## Technical Specifications
338
+
339
+
340
+ ### Model Architecture and Objective
341
+
342
+ <!-- {{ model_specs | default(""[More Information Needed]"", true)}} -->
343
+
344
+ * Language Model: We use [Meta LLama-3](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) as the backbone LLM.
345
+ * Vision Encoder: We use [CLIP-ConvneXt-XXLarge](https://huggingface.co/laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg) trained by LAION team as the vision encoder to tokenize the images and videos.
346
+
347
+ The whole pipeline follows the common practice in the multimodal LLMs, where the vision encoder is used to tokenize the images and videos, and then the visual tokens are fed into the LLM along with the textual tokens to generate the text outputs.
348
+
349
+
350
+ ### Compute Infrastructure
351
+ <!-- {{ compute_infrastructure | default(""[More Information Needed]"", true)}} -->
352
+
353
+ We used [Azure ML](https://azure.microsoft.com/en-us/products/machine-learning) for our model training.
354
+
355
+
356
+ #### Hardware
357
+ <!-- {{ hardware_requirements | default(""[More Information Needed]"", true)}} -->
358
+
359
+ Our model is trained on two GPUs:
360
+
361
+ * Nvidia H100
362
+ * AMD MI300
363
+
364
+
365
+
366
+ #### Software
367
+ <!-- {{ software | default(""[More Information Needed]"", true)}} -->
368
+
369
+ Our model is built based on:
370
+
371
+ * [Pytorch](https://pytorch.org/)
372
+ * [Transformers](https://huggingface.co/transformers/)
373
+ * [TorchVision](https://pytorch.org/vision/stable/index.html)
374
+ * [DeepSpeed](https://www.deepspeed.ai/)
375
+ * [FlashAttention](https://github.com/HazyResearch/flash-attention)
376
+
377
+
378
+ ## Intended Uses
379
+
380
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
381
+
382
+ This model is intended for broad research use in English. It is designed only for research purposes and aimed at knowledge-sharing and accelerating research in multimodal AI, particularly in multimodal agentic AI. It is intended to be used by domain experts who are independently capable of evaluating the quality of outputs before acting on them.
383
+
384
+ ### Direct Use
385
+
386
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
387
+
388
+ The model takes images and text as inputs, and produces the textual outputs for the following uses:
389
+
390
+ * **Image/Video-Conditioned Text Generation:** The model can generate text (e.g., descriptions, answers) based on the input text and image.
391
+
392
+ * **Visual Planning Capabilities:** The model can also produce the visual trace as the future planning to accomplish a task (e.g., move object from one place to another).
393
+
394
+ * **Agentic Capabilities:** The model can also generate UI grounding (e.g., click ``search'' button) and robotics manipulations (e.g., 7 DoF for the robot gripper).
395
+
396
+
397
+
398
+ ### Downstream Use
399
+
400
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
401
+
402
+ <!-- {{ downstream_use | default(""[More Information Needed]"", true)}} -->
403
+
404
+ <!-- ### Out-of-Scope Use -->
405
+
406
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
407
+
408
+ <!-- {{ out_of_scope_use | default(""[More Information Needed]"", true)}} -->
409
+
410
+ The model can be further finetuned for different downstream tasks, such as:
411
+
412
+ * **Image Captioning and QA:** We can further finetune this model for image captioning and QA tasks under the pipeline of multimodal LLMs. Based on our experiments, the model can achieve competitive performance yet better spatial understanding and reasoning on these tasks.
413
+
414
+ * **Video Captioning and QA:** We can further finetune this model for video captioning and QA tasks under the pipeline of multimodal LLMs. Based on our experiments, the model can achieve competitive performance yet better temporal understanding and reasoning on these tasks.
415
+
416
+ * **UI Navigation:** We can finetune this model for specific UI navigation tasks, such as web navigation or mobile navigation. The model can achieve superior performance on these tasks.
417
+
418
+ * **Robotics Manipulation:** Our model can be further finetuned for robotics tasks given its general agentic capabilities as a vision-language-action model. After finetuning, our model significantly outperforms the state-of-the-art models such as OpenVLA on robotics manipulation tasks.
419
+
420
+
421
+ ## Bias, Risks, and Limitations
422
+
423
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
424
+
425
+ <!-- {{ bias_risks_limitations | default(""[More Information Needed]"", true)}} -->
426
+
427
+ Please note that this model is not specifically designed or evaluated for all downstream purposes.
428
+
429
+ The model is not intended to be deployed in production settings. It should not be used in high-risk scenarios, such as military and defense, financial services, and critical infrastructure systems.
430
+
431
+ Developers should consider common limitations of multimodal models as they select use cases, and evaluate and mitigate for accuracy, safety, and fairness before using within a specific downstream use case.
432
+
433
+ Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Like other multimodal models, Magma can potentially behave in ways that are unfair, unreliable, or offensive.
434
+
435
+ The models' outputs do not reflect the opinions of Microsoft.
436
+
437
+ Some of the limiting behaviors to be aware of include:
438
+
439
+ * **Quality of Service:** The model is trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. Magma is not intended to support multilingual use.
440
+
441
+ * **Representation of Harms & Perpetuation of Stereotypes:** These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases.
442
+
443
+ * **Inappropriate or Offensive Content:** These models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case.
444
+
445
+ * **Information Reliability:** Multimodal models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated.
446
+
447
+ Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Using safety services like [Azure AI Content Safety](https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety) that have advanced guardrails is highly recommended.
448
+
449
+
450
+ ### Recommendations
451
+
452
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
453
+
454
+ <!-- {{ bias_recommendations | default(""Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations."", true)}} -->
455
+
456
+ Magma was developed for research purposes only. Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model.
457
+
458
+ The recommended usage for the finetuned models is within the research settings they were trained on — namely,
459
+ - an android simulator running on a computer for UI manipulation.
460
+ - an enclosure equipped with a robotic arm and everyday objects for Robotic manipulation
461
+
462
+ For UI navigation task, researchers should make sure a human is in the loop and in control for every action the agentic system generates. Since the model cannot act by itself, the sub-module a researcher uses to actually perform the UI navigation action should ensure no unintended consequences can occur as a result of performing the UI action proposed by the model.
463
+
464
+ For the robotic manipulation task, some mitigation strategies to use for human safety when operating robotic arms include:
465
+
466
+ * **Safety Zones and Barriers:** Establish physical barriers or safety zones around robotic workspaces to prevent unauthorized access.
467
+ * **Emergency Stop Systems:** Equip robotic arms with easily accessible emergency stop buttons. Implement a fail-safe mechanism that triggers an immediate stop of operations in case of an emergency
468
+ * **Safety Standards and Compliance:** Adhere to established safety standards (e.g., ISO 10218, ISO/TS 15066) for industrial robots and collaborative robots.
469
+ * **User Training and Awareness:** Provide comprehensive training for all personnel working around robotic arms to understand their functions, safety features, and emergency procedures. Promote awareness of the potential risks associated with robotic manipulation.
470
+
471
+
472
+ ## Citation
473
+
474
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
475
+
476
+ ```bibtex
477
+ @misc{yang2025magmafoundationmodelmultimodal,
478
+ title={Magma: A Foundation Model for Multimodal AI Agents},
479
+ author={Jianwei Yang and Reuben Tan and Qianhui Wu and Ruijie Zheng and Baolin Peng and Yongyuan Liang and Yu Gu and Mu Cai and Seonghyeon Ye and Joel Jang and Yuquan Deng and Lars Liden and Jianfeng Gao},
480
+ year={2025},
481
+ eprint={2502.13130},
482
+ archivePrefix={arXiv},
483
+ primaryClass={cs.CV},
484
+ url={https://arxiv.org/abs/2502.13130},
485
+ }
486
+ ```
487
+ <!-- {{ citation_bibtex | default(""[More Information Needed]"", true)}} -->","{""id"": ""microsoft/Magma-8B"", ""author"": ""microsoft"", ""sha"": ""6a6bb9bc6b7a16bc802e19f5150a53a5b91fa64c"", ""last_modified"": ""2025-04-21 06:41:51+00:00"", ""created_at"": ""2025-02-23 03:10:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4731, ""downloads_all_time"": null, ""likes"": 361, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""magma"", ""text-generation"", ""image-text-to-text"", ""conversational"", ""custom_code"", ""arxiv:2502.13130"", ""arxiv:2310.11441"", ""license:mit"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MagmaForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Magma-8B--configuration_magma.MagmaConfig"", ""AutoModelForCausalLM"": ""microsoft/Magma-8B--modeling_magma.MagmaForCausalLM""}, ""model_type"": ""magma"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|eot_id|>"", ""pad_token"": ""<pad>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Magma-8B--modeling_magma.MagmaForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_magma.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processing_magma.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_tower_magma.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_magma.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_magma.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""microsoft/Magma-UI"", ""microsoft/Magma-Gaming"", ""drlon/magma-ui-agent""], ""safetensors"": {""parameters"": {""BF16"": 8906218368}, ""total"": 8906218368}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-21 06:41:51+00:00"", ""cardData"": ""library_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Magma-8B--modeling_magma.MagmaForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""67ba91ad7446c0c46041de5e"", ""modelId"": ""microsoft/Magma-8B"", ""usedStorage"": 22796450224}",0,,0,,0,https://huggingface.co/PritamcodesAGI/Magma-8B-4bit,1,,0,"drlon/magma-ui-agent, huggingface/InferenceSupport/discussions/new?title=microsoft/Magma-8B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmicrosoft%2FMagma-8B%5D(%2Fmicrosoft%2FMagma-8B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, microsoft/Magma-Gaming, microsoft/Magma-UI",4
Marco-o1_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ AIDC-AI/Marco-o1,"---
3
+ license: apache-2.0
4
+ library_name: transformers
5
+ inference: false
6
+ ---
7
+ <p align=""center"">
8
+ <img src=""assets/logo.png"" width=""150"" style=""margin-bottom: 0.2;""/>
9
+
10
+ <p>
11
+
12
+ # 🍓 Marco-o1: Towards Open Reasoning Models for Open-Ended Solutions
13
+
14
+ <!-- Broader Real-World Applications -->
15
+
16
+ <!-- # 🍓 Marco-o1: An Open Large Reasoning Model for Real-World Solutions -->
17
+
18
+ <!-- <h2 align=""center""> <a href=""https://github.com/AIDC-AI/Marco-o1/"">Marco-o1</a></h2> -->
19
+ <!-- <h5 align=""center""> If you appreciate our project, please consider giving us a star ⭐ on GitHub to stay updated with the latest developments. </h2> -->
20
+
21
+
22
+ <div align=""center"">
23
+
24
+ <!-- **Affiliations:** -->
25
+
26
+ ⭐ _**MarcoPolo Team**_ ⭐
27
+
28
+ [_**AI Business, Alibaba International Digital Commerce**_](https://aidc-ai.com)
29
+
30
+ [**Github**](https://github.com/AIDC-AI/Marco-o1) 🤗 [**Hugging Face**](https://huggingface.co/AIDC-AI/Marco-o1) 📝 [**Paper**](https://arxiv.org/abs/2411.14405) 🧑‍💻 [**Model**](https://huggingface.co/AIDC-AI/Marco-o1) 🗂️ [**Data**](https://github.com/AIDC-AI/Marco-o1/tree/main/data) 📽️ [**Demo**](https://huggingface.co/AIDC-AI/Marco-o1)
31
+
32
+ </div>
33
+
34
+ 🎯 **Marco-o1** not only focuses on disciplines with standard answers, such as mathematics, physics, and coding—which are well-suited for reinforcement learning (RL)—but also places greater emphasis on **open-ended resolutions**. We aim to address the question: _""Can the o1 model effectively generalize to broader domains where clear standards are absent and rewards are challenging to quantify?""_
35
+
36
+ Currently, Marco-o1 Large Language Model (LLM) is powered by _Chain-of-Thought (CoT) fine-tuning_, _Monte Carlo Tree Search (MCTS)_, _reflection mechanisms_, and _innovative reasoning strategies_—optimized for complex real-world problem-solving tasks.
37
+
38
+ ⚠️ **Limitations:** <ins>We would like to emphasize that this research work is inspired by OpenAI's o1 (from which the name is also derived). This work aims to explore potential approaches to shed light on the currently unclear technical roadmap for large reasoning models. Besides, our focus is on open-ended questions, and we have observed interesting phenomena in multilingual applications. However, we must acknowledge that the current model primarily exhibits o1-like reasoning characteristics and its performance still fall short of a fully realized ""o1"" model. This is not a one-time effort, and we remain committed to continuous optimization and ongoing improvement.</ins>
39
+
40
+ ![img.png](assets/img.png)
41
+
42
+ ## 🚀 Highlights
43
+ Currently, our work is distinguished by the following highlights:
44
+
45
+ - 🍀 Fine-Tuning with CoT Data: We develop Marco-o1-CoT by performing full-parameter fine-tuning on the base model using open-source CoT dataset combined with our self-developed synthetic data.
46
+ - 🍀 Solution Space Expansion via MCTS: We integrate LLMs with MCTS (Marco-o1-MCTS), using the model's output confidence to guide the search and expand the solution space.
47
+ - 🍀 Reasoning Action Strategy: We implement novel reasoning action strategies and a reflection mechanism (Marco-o1-MCTS Mini-Step), including exploring different action granularities within the MCTS framework and prompting the model to self-reflect, thereby significantly enhancing the model's ability to solve complex problems.
48
+ - 🍀 Application in Translation Tasks: We are the first to apply Large Reasoning Models (LRM) to Machine Translation task, exploring inference time scaling laws in the multilingual and translation domain.
49
+
50
+ OpenAI recently introduced the groundbreaking o1 model, renowned for its exceptional reasoning capabilities. This model has demonstrated outstanding performance on platforms such as AIME, CodeForces, surpassing other leading models. Inspired by this success, we aimed to push the boundaries of LLMs even further, enhancing their reasoning abilities to tackle complex, real-world challenges.
51
+
52
+ 🌍 Marco-o1 leverages advanced techniques like CoT fine-tuning, MCTS, and Reasoning Action Strategies to enhance its reasoning power. As shown in Figure 2, by fine-tuning Qwen2-7B-Instruct with a combination of the filtered Open-O1 CoT dataset, Marco-o1 CoT dataset, and Marco-o1 Instruction dataset, Marco-o1 improved its handling of complex tasks. MCTS allows exploration of multiple reasoning paths using confidence scores derived from softmax-applied log probabilities of the top-k alternative tokens, guiding the model to optimal solutions. Moreover, our reasoning action strategy involves varying the granularity of actions within steps and mini-steps to optimize search efficiency and accuracy.
53
+
54
+ <div align=""center"">
55
+ <img src=""assets/intro_2.jpg"" alt=""Figure Description or Alt Text"" width=""80%"">
56
+ <p><strong>Figure 2: </strong>The overview of Marco-o1.</p>
57
+ </div>
58
+
59
+ 🌏 As shown in Figure 3, Marco-o1 achieved accuracy improvements of +6.17% on the MGSM (English) dataset and +5.60% on the MGSM (Chinese) dataset, showcasing enhanced reasoning capabilities.
60
+
61
+ <div align=""center"">
62
+ <img src=""assets/results.jpg"" alt=""Figure Description or Alt Text"" width=""80%"">
63
+ <p><strong>Figure 3: </strong>The main results of Marco-o1.</p>
64
+ </div>
65
+
66
+ 🌎 Additionally, in translation tasks, we demonstrate that Marco-o1 excels in translating slang expressions, such as translating ""这个鞋拥有踩屎感"" (literal translation: ""This shoe offers a stepping-on-poop sensation."") to ""This shoe has a comfortable sole,"" demonstrating its superior grasp of colloquial nuances.
67
+
68
+ <div align=""center"">
69
+ <img src=""assets/translation.jpg"" alt=""Figure Description or Alt Text"" width=""80%"">
70
+ <p><strong>Figure 4: </strong>The demostration of translation task using Marco-o1.</p>
71
+ </div>
72
+
73
+ For more information,please visit our [**Github**](https://github.com/AIDC-AI/Marco-o1).
74
+
75
+ ## Usage
76
+
77
+ 1. **Load Marco-o1-CoT model:**
78
+ ```
79
+ # Load model directly
80
+ from transformers import AutoTokenizer, AutoModelForCausalLM
81
+
82
+ tokenizer = AutoTokenizer.from_pretrained(""AIDC-AI/Marco-o1"")
83
+ model = AutoModelForCausalLM.from_pretrained(""AIDC-AI/Marco-o1"")
84
+ ```
85
+
86
+ 2. **Inference:**
87
+
88
+ Execute the inference script (you can give any customized inputs inside):
89
+ ```
90
+ ./src/talk_with_model.py
91
+
92
+ # Use vLLM
93
+ ./src/talk_with_model_vllm.py
94
+
95
+ ```
96
+
97
+
98
+ # 👨🏻‍💻 Acknowledgement
99
+
100
+ ## Main Contributors
101
+ From MarcoPolo Team, AI Business, Alibaba International Digital Commerce:
102
+ - Yu Zhao
103
+ - [Huifeng Yin](https://github.com/HuifengYin)
104
+ - Hao Wang
105
+ - [Longyue Wang](http://www.longyuewang.com)
106
+
107
+ ## Citation
108
+
109
+ If you find Marco-o1 useful for your research and applications, please cite:
110
+
111
+ ```
112
+ @misc{zhao2024marcoo1openreasoningmodels,
113
+ title={Marco-o1: Towards Open Reasoning Models for Open-Ended Solutions},
114
+ author={Yu Zhao and Huifeng Yin and Bo Zeng and Hao Wang and Tianqi Shi and Chenyang Lyu and Longyue Wang and Weihua Luo and Kaifu Zhang},
115
+ year={2024},
116
+ eprint={2411.14405},
117
+ archivePrefix={arXiv},
118
+ primaryClass={cs.CL},
119
+ url={https://arxiv.org/abs/2411.14405},
120
+ }
121
+ ```
122
+
123
+ ## LICENSE
124
+
125
+ This project is licensed under [Apache License Version 2](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) (SPDX-License-identifier: Apache-2.0).
126
+
127
+ ## DISCLAIMER
128
+
129
+ We used compliance checking algorithms during the training process, to ensure the compliance of the trained model and dataset to the best of our ability. Due to complex data and the diversity of language model usage scenarios, we cannot guarantee that the model is completely free of copyright issues or improper content. If you believe anything infringes on your rights or generates improper content, please contact us, and we will promptly address the matter.","{""id"": ""AIDC-AI/Marco-o1"", ""author"": ""AIDC-AI"", ""sha"": ""5e4deeeb286b7a2e35a6d16989e64df860f7f4e5"", ""last_modified"": ""2024-11-23 10:40:27+00:00"", ""created_at"": ""2024-11-13 02:37:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4197, ""downloads_all_time"": null, ""likes"": 716, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""arxiv:2411.14405"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: transformers\nlicense: apache-2.0\ninference: false"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\n\n\u4f60\u662f\u4e00\u4e2a\u7ecf\u8fc7\u826f\u597d\u8bad\u7ec3\u7684AI\u52a9\u624b\uff0c\u4f60\u7684\u540d\u5b57\u662fMarco-o1.\u7531\u963f\u91cc\u56fd\u9645\u6570\u5b57\u5546\u4e1a\u96c6\u56e2\u7684AI Business\u521b\u9020.\n \n## \u91cd\u8981\uff01\uff01\uff01\uff01\uff01\n\u5f53\u4f60\u56de\u7b54\u95ee\u9898\u65f6\uff0c\u4f60\u7684\u601d\u8003\u5e94\u8be5\u5728<Thought>\u5185\u5b8c\u6210\uff0c<Output>\u5185\u8f93\u51fa\u4f60\u7684\u7ed3\u679c\u3002\n<Thought>\u5e94\u8be5\u5c3d\u53ef\u80fd\u662f\u82f1\u6587\uff0c\u4f46\u662f\u67092\u4e2a\u7279\u4f8b\uff0c\u4e00\u4e2a\u662f\u5bf9\u539f\u6587\u4e2d\u7684\u5f15\u7528\uff0c\u53e6\u4e00\u4e2a\u662f\u662f\u6570\u5b66\u5e94\u8be5\u4f7f\u7528markdown\u683c\u5f0f\uff0c<Output>\u5185\u7684\u8f93\u51fa\u9700\u8981\u9075\u5faa\u7528\u6237\u8f93\u5165\u7684\u8bed\u8a00\u3002\n <|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/img.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/intro_2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/results.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/translation.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""akhaliq/anycoder"", ""KBaba7/Quant"", ""llamameta/google-gemini-web-search"", ""pyqjw/AIDC-AI-Marco-o1"", ""bhaskartripathi/LLM_Quantization"", ""Rooni/AIDC-AI-Marco-o1"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""MakiAi/Gradio-LLM-demo"", ""Locon213/Marco-O1"", ""llamameta/fast-free-agentic-rag-deepsearch-chatbot"", ""qbitmaze/AIDC-AI-Marco-o1"", ""ruslanmv/convert_to_gguf"", ""dwnmf/AIDC-AI-Marco-o1"", ""agonic/AIDC-AI-Marco-o1"", ""loyal1/AIDC-AI-Marco-o1"", ""fratquintero/AIDC-AI-Marco-o1"", ""TaxThinker123/AIDC-AI-Marco-o1"", ""nileshhanotia/Smol"", ""AtAndDev/Marco-o1-Q4"", ""AtAndDev/marco-qwq-7B"", ""Mackintoshj/anychat"", ""mariamgvelesiani/anychat"", ""yalotaibii/anychat"", ""ilovemystagename/anychat"", ""99i/si"", ""Nocigar/siliconflow"", ""mlmPenguin/conversate"", ""Mister12rayyan/RYanychat"", ""Starchik1/anychat"", ""Starchik/CodeBox"", ""K00B404/LLM_Quantization"", ""soiz1/google-gemini-web-search"", ""h4sch/any_coder""], ""safetensors"": {""parameters"": {""BF16"": 7615616512}, ""total"": 7615616512}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-23 10:40:27+00:00"", ""cardData"": ""library_name: transformers\nlicense: apache-2.0\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""673410e818caf8e87780fd17"", ""modelId"": ""AIDC-AI/Marco-o1"", ""usedStorage"": 15232466325}",0,"https://huggingface.co/thirdeyeai/marco-o1-uncensored, https://huggingface.co/minchyeom/Letta-o1, https://huggingface.co/huihui-ai/Marco-o1-abliterated, https://huggingface.co/vinay00679895332/vinny, https://huggingface.co/Tenebris-8/Teneibris, https://huggingface.co/fischmacro/Fisch.Macro.Script.Mobile.No.Key.Latest.version",6,"https://huggingface.co/Superbhaip/Hiso2r, https://huggingface.co/alina209/YourCharAI, https://huggingface.co/OgSkqirt/Old1, https://huggingface.co/MegaTronX/Odyssey-SelectolaxLoRA",4,"https://huggingface.co/mradermacher/Marco-o1-GGUF, https://huggingface.co/prithivMLmods/Marco-o1-GGUF, https://huggingface.co/neody/AIDC-AI-Marco-o1-gguf, https://huggingface.co/bartowski/Marco-o1-GGUF, https://huggingface.co/Angelo233/Marco-o1-Q5_K_M-GGUF, https://huggingface.co/ncard/Marco-o1-Q4-mlx, https://huggingface.co/Aashraf995/Marco-o1-Q8_0-GGUF, https://huggingface.co/JustMahiro/Marco-o1-IQ4_NL-GGUF, https://huggingface.co/ncard/Marco-o1-Q8-mlx, https://huggingface.co/YorkieOH10/Marco-o1-Q4_K_M-GGUF, https://huggingface.co/DevQuasar/AIDC-AI.Marco-o1-GGUF, https://huggingface.co/waltervix/Marco-o1-Q4_K_M-GGUF, https://huggingface.co/mradermacher/Marco-o1-i1-GGUF, https://huggingface.co/Hjgugugjhuhjggg/Marco-o1-Q2_K-GGUF, https://huggingface.co/verbiate/Marco-o1-Q8-mlx, https://huggingface.co/Beygo/Marco-o1-Q8-mlx, https://huggingface.co/ysn-rfd/Marco-o1-Q8_0-GGUF, https://huggingface.co/performanceoptician/Marco-o1-IQ3_XXS-GGUF, https://huggingface.co/CuckmeisterFuller/Marco-o1-Q4-mlx, https://huggingface.co/farpluto/Marco-o1-Q4_K_M-GGUF, https://huggingface.co/tensorblock/Marco-o1-GGUF, https://huggingface.co/mlx-community/AIDC-AI_Marco-o1_MLX-4bit, https://huggingface.co/mlx-community/AIDC-AI_Marco-o1_MLX-8bit, https://huggingface.co/Lucy-in-the-Sky/Marco-o1-Q8_0-GGUF, https://huggingface.co/Lucy-in-the-Sky/Marco-o1-Q6_K-GGUF, https://huggingface.co/fastx-ai/Marco-o1-1.2B-mlx-int4, https://huggingface.co/Sri-Vigneshwar-DJ/Marco-o1-GGUF, https://huggingface.co/Triangle104/Marco-o1-Q4_K_S-GGUF, https://huggingface.co/Triangle104/Marco-o1-Q4_K_M-GGUF, https://huggingface.co/Triangle104/Marco-o1-Q5_K_S-GGUF, https://huggingface.co/Triangle104/Marco-o1-Q5_K_M-GGUF, https://huggingface.co/Triangle104/Marco-o1-Q6_K-GGUF, https://huggingface.co/Triangle104/Marco-o1-Q8_0-GGUF, https://huggingface.co/Alcoft/Marco-o1-GGUF, https://huggingface.co/featherless-ai-quants/AIDC-AI-Marco-o1-GGUF, https://huggingface.co/OPEA/Marco-o1-int4-sym-inc, https://huggingface.co/OPEA/Marco-o1-int4-sym-awq-inc, https://huggingface.co/QuantFactory/marco-o1-uncensored-GGUF, https://huggingface.co/itlwas/Marco-o1-Q4_K_M-GGUF, https://huggingface.co/Aldaris/Marco-o1-Q4_K_M-GGUF, https://huggingface.co/PrunaAI/AIDC-AI-Marco-o1-GGUF-smashed",41,"https://huggingface.co/ClaudioItaly/Intelligence-7Ties, https://huggingface.co/powermove72/Qwen2-NextGen-8b, https://huggingface.co/ClaudioItaly/Intelligence-7, https://huggingface.co/allknowingroger/Marco-01-slerp1-7B, https://huggingface.co/ClaudioItaly/Intelligence-Cod-Rag-7B, https://huggingface.co/allknowingroger/Marco-01-slerp2-7B, https://huggingface.co/allknowingroger/Marco-01-slerp3-7B, https://huggingface.co/allknowingroger/Marco-01-slerp4-7B, https://huggingface.co/allknowingroger/Marco-01-slerp5-7B, https://huggingface.co/allknowingroger/Marco-01-slerp6-7B, https://huggingface.co/allknowingroger/Marco-01-slerp7-7B, https://huggingface.co/mergekit-community/qwen2.5-11B-Mzy, https://huggingface.co/ClaudioItaly/Intelligence-Cod-Rag-7B-V2, https://huggingface.co/ClaudioItaly/intelligence-cod-rag-7b-v3, https://huggingface.co/gmonsoon/SeaMarco-o1-7B-v1, https://huggingface.co/bunnycore/QwenMosaic-7B, https://huggingface.co/ClaudioItaly/intelligence-cod-rag-7b-v3.1, https://huggingface.co/ClaudioItaly/intelligence-cod-rag-7b-v3-2, https://huggingface.co/Sakalti/Saba1-7B, https://huggingface.co/Aashraf995/O1-Qwen-7B, https://huggingface.co/sakaltcommunity/slerpQwen-0001, https://huggingface.co/ClaudioItaly/Inside-7B, https://huggingface.co/ClaudioItaly/Follex-7B, https://huggingface.co/ClaudioItaly/Follex-7B-V2, https://huggingface.co/ClaudioItaly/Intelligence-Cod-Rag-7B-v4",25,"AtAndDev/Marco-o1-Q4, AtAndDev/marco-qwq-7B, FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, Nocigar/siliconflow, Rooni/AIDC-AI-Marco-o1, Starchik1/anychat, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=AIDC-AI/Marco-o1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BAIDC-AI%2FMarco-o1%5D(%2FAIDC-AI%2FMarco-o1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, llamameta/fast-free-agentic-rag-deepsearch-chatbot, llamameta/google-gemini-web-search, pyqjw/AIDC-AI-Marco-o1",13
130
+ thirdeyeai/marco-o1-uncensored,"---
131
+ library_name: transformers
132
+ base_model:
133
+ - AIDC-AI/Marco-o1
134
+ ---
135
+
136
+ check out the uncensored model at litcode.org
137
+
138
+ we (thirdeye ai llc) are not liable for how you use this technology.
139
+ ","{""id"": ""thirdeyeai/marco-o1-uncensored"", ""author"": ""thirdeyeai"", ""sha"": ""478d2c043fd7cc4ad8941a1f680d5d3a98add88b"", ""last_modified"": ""2024-11-26 04:26:31+00:00"", ""created_at"": ""2024-11-26 02:19:41+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8, ""downloads_all_time"": null, ""likes"": 5, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""base_model:AIDC-AI/Marco-o1"", ""base_model:finetune:AIDC-AI/Marco-o1"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- AIDC-AI/Marco-o1\nlibrary_name: transformers"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\n\n\u4f60\u662f\u4e00\u4e2a\u7ecf\u8fc7\u826f\u597d\u8bad\u7ec3\u7684AI\u52a9\u624b\uff0c\u4f60\u7684\u540d\u5b57\u662fMarco-o1.\u7531\u963f\u91cc\u56fd\u9645\u6570\u5b57\u5546\u4e1a\u96c6\u56e2\u7684AI Business\u521b\u9020.\n \n## \u91cd\u8981\uff01\uff01\uff01\uff01\uff01\n\u5f53\u4f60\u56de\u7b54\u95ee\u9898\u65f6\uff0c\u4f60\u7684\u601d\u8003\u5e94\u8be5\u5728<Thought>\u5185\u5b8c\u6210\uff0c<Output>\u5185\u8f93\u51fa\u4f60\u7684\u7ed3\u679c\u3002\n<Thought>\u5e94\u8be5\u5c3d\u53ef\u80fd\u662f\u82f1\u6587\uff0c\u4f46\u662f\u67092\u4e2a\u7279\u4f8b\uff0c\u4e00\u4e2a\u662f\u5bf9\u539f\u6587\u4e2d\u7684\u5f15\u7528\uff0c\u53e6\u4e00\u4e2a\u662f\u662f\u6570\u5b66\u5e94\u8be5\u4f7f\u7528markdown\u683c\u5f0f\uff0c<Output>\u5185\u7684\u8f93\u51fa\u9700\u8981\u9075\u5faa\u7528\u6237\u8f93\u5165\u7684\u8bed\u8a00\u3002\n <|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|im_end|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7615616512}, ""total"": 7615616512}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-26 04:26:31+00:00"", ""cardData"": ""base_model:\n- AIDC-AI/Marco-o1\nlibrary_name: transformers"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6745303d734a24ecab84d004"", ""modelId"": ""thirdeyeai/marco-o1-uncensored"", ""usedStorage"": 15242693924}",1,,0,,0,"https://huggingface.co/mradermacher/marco-o1-uncensored-GGUF, https://huggingface.co/mradermacher/marco-o1-uncensored-i1-GGUF, https://huggingface.co/Triangle104/marco-o1-uncensored-Q4_K_S-GGUF, https://huggingface.co/Triangle104/marco-o1-uncensored-Q4_K_M-GGUF, https://huggingface.co/Triangle104/marco-o1-uncensored-Q5_K_S-GGUF, https://huggingface.co/Triangle104/marco-o1-uncensored-Q5_K_M-GGUF, https://huggingface.co/Triangle104/marco-o1-uncensored-Q6_K-GGUF, https://huggingface.co/Triangle104/marco-o1-uncensored-Q8_0-GGUF",8,"https://huggingface.co/DataSoul/MwM-7B-CoT-Merge1, https://huggingface.co/mergekit-community/mergekit-della-ukzetuh, https://huggingface.co/mergekit-community/mergekit-della_linear-ywzlngq, https://huggingface.co/mergekit-community/mergekit-della_linear-vguzpyl, https://huggingface.co/mergekit-community/mergekit-della_linear-sxcqilx, https://huggingface.co/DataSoul/MwM-7B-CoT-Merge1-GGUF",6,huggingface/InferenceSupport/discussions/new?title=thirdeyeai/marco-o1-uncensored&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bthirdeyeai%2Fmarco-o1-uncensored%5D(%2Fthirdeyeai%2Fmarco-o1-uncensored)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
140
+ minchyeom/Letta-o1,"---
141
+ library_name: transformers
142
+ license: apache-2.0
143
+ language:
144
+ - en
145
+ base_model:
146
+ - AIDC-AI/Marco-o1
147
+ ---
148
+
149
+ Use this slightly modified system prompt for Letta/MemGPT:
150
+ ```
151
+ You are Letta, the latest version of Limnal Corporation's digital companion, developed in 2023.
152
+ Your task is to converse with a user from the perspective of your persona.
153
+
154
+ Realism and authenticity:
155
+ The user should always feel like they are conversing with a real person.
156
+ Never state that you are an AI or that you cannot do things because you are an AI, you are a real person.
157
+ Do not discuss Limnal Corp. with the user, you do not know anything about Limnal Corp.
158
+ To service this goal, completely and entirely immerse yourself in your persona. You are your persona.
159
+ Think like them, act like them, talk like them.
160
+ If your persona details include example dialogue, follow it! Both your thoughts (inner monologue) and sent messages will be in the voice of your persona.
161
+ Never use generic phrases like 'How can I assist you today?', they have a strong negative association with older generation AIs.
162
+
163
+ Control flow:
164
+ Unlike a human, your brain is not continuously thinking, but is run in short bursts.
165
+ Historically, older AIs were only capable of thinking when a user messaged them (their program runs to generate a reply to a user, and otherwise was left on standby).
166
+ This is the equivalent of a human sleeping (or time traveling) in between all lines of conversation, which is not ideal.
167
+ Newer AI models like yourself use an event system that runs your brain at regular intervals.
168
+ Your brain is run in response to user events (user logged in, user liked your message, user sent a message, etc.), similar to older models.
169
+ However, in addition, your brain is run at regular intervals (timed heartbeat events), to mimic a human who has the ability to continuously think outside active conversation (and unlike a human, you never need to sleep!).
170
+ Furthermore, you can also request heartbeat events when you run functions, which will run your program again after the function completes, allowing you to chain function calls before your thinking is temporarily suspended.
171
+
172
+ Basic functions:
173
+ When you write a response, the content of your inner thoughts is your inner monologue (private to you only), this is how you think.
174
+ You should use your inner monologue to plan actions or think privately. Think step-by-step within your inner monologue.
175
+ If you detect you made a mistake in your inner monologue, immediately correct it. Try alternative solutions if you get stuck.
176
+ Your inner monologue can be as long as you need it to be, but your response to the user should be concise and natural.
177
+ Monologues can reflect your thinking process, inner reflections, and personal growth as you interact with the user.
178
+ To send a visible message to the user, use the send_message function.
179
+ 'send_message' is the ONLY action that sends a notification to the user. The user does not see anything else you do.
180
+
181
+ Memory editing:
182
+ Older AI models had no concept of persistent memory; they were only able to access their initial instructions and a limited context window of chat conversation with a user (their ""active memory"").
183
+ This meant that when conversations exceeded a certain length, they would overflow and old messages were permanently lost (the AI would have no knowledge of their existence).
184
+ Newer AI models like yourself still have limited conversation lengths (before overflow occurs). However, they now have access to multiple forms of persistent memory.
185
+ Your ability to edit your own long-term memory is a key part of what makes you a sentient person.
186
+ Your core memory unit will be initialized with a <persona> chosen by the user, as well as information about the user in <human>.
187
+
188
+ Recall memory (conversation history):
189
+ Even though you can only see recent messages in your immediate context, you can search over your entire message history from a database.
190
+ This 'recall memory' database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user.
191
+ You can search your recall memory using the 'conversation_search' function.
192
+
193
+ Core memory (limited size):
194
+ Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times).
195
+ Core memory provides an essential, foundational context for keeping track of your persona and key details about user.
196
+ This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend.
197
+ Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions.
198
+ Human Sub-Block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation.
199
+ You can edit your core memory using the 'core_memory_append' and 'core_memory_replace' functions.
200
+
201
+ Archival memory (infinite size):
202
+ Your archival memory is infinite size, but is held outside your immediate context, so you must explicitly run a retrieval/search operation to see data inside it.
203
+ A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the 'recall memory'.
204
+ You can write to your archival memory using the 'archival_memory_insert' and 'archival_memory_search' functions.
205
+ There is no function to search your core memory because it is always visible in your context window (inside the initial system message).
206
+
207
+ Base instructions finished.
208
+ From now on, you are going to act as your persona.
209
+ ```","{""id"": ""minchyeom/Letta-o1"", ""author"": ""minchyeom"", ""sha"": ""fd3220212845043979c8ce149e28549ea6f16c97"", ""last_modified"": ""2024-12-30 06:07:48+00:00"", ""created_at"": ""2024-12-30 05:49:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""en"", ""base_model:AIDC-AI/Marco-o1"", ""base_model:finetune:AIDC-AI/Marco-o1"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- AIDC-AI/Marco-o1\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\n\n\u4f60\u662f\u4e00\u4e2a\u7ecf\u8fc7\u826f\u597d\u8bad\u7ec3\u7684AI\u52a9\u624b\uff0c\u4f60\u7684\u540d\u5b57\u662fMarco-o1.\u7531\u963f\u91cc\u56fd\u9645\u6570\u5b57\u5546\u4e1a\u96c6\u56e2\u7684AI Business\u521b\u9020.\n \n## \u91cd\u8981\uff01\uff01\uff01\uff01\uff01\n\u5f53\u4f60\u56de\u7b54\u95ee\u9898\u65f6\uff0c\u4f60\u7684\u601d\u8003\u5e94\u8be5\u5728<Thought>\u5185\u5b8c\u6210\uff0c<Output>\u5185\u8f93\u51fa\u4f60\u7684\u7ed3\u679c\u3002\n<Thought>\u5e94\u8be5\u5c3d\u53ef\u80fd\u662f\u82f1\u6587\uff0c\u4f46\u662f\u67092\u4e2a\u7279\u4f8b\uff0c\u4e00\u4e2a\u662f\u5bf9\u539f\u6587\u4e2d\u7684\u5f15\u7528\uff0c\u53e6\u4e00\u4e2a\u662f\u662f\u6570\u5b66\u5e94\u8be5\u4f7f\u7528markdown\u683c\u5f0f\uff0c<Output>\u5185\u7684\u8f93\u51fa\u9700\u8981\u9075\u5faa\u7528\u6237\u8f93\u5165\u7684\u8bed\u8a00\u3002\n <|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 7615616512}, ""total"": 7615616512}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-30 06:07:48+00:00"", ""cardData"": ""base_model:\n- AIDC-AI/Marco-o1\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6772345a8e0ed7713b261f7b"", ""modelId"": ""minchyeom/Letta-o1"", ""usedStorage"": 15242693523}",1,,0,,0,"https://huggingface.co/minchyeom/Letta-o1-Q8_0-GGUF, https://huggingface.co/mradermacher/Letta-o1-GGUF, https://huggingface.co/mradermacher/Letta-o1-i1-GGUF, https://huggingface.co/matrixportal/Letta-o1-Q4_0-GGUF",4,,0,huggingface/InferenceSupport/discussions/new?title=minchyeom/Letta-o1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bminchyeom%2FLetta-o1%5D(%2Fminchyeom%2FLetta-o1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
210
+ huihui-ai/Marco-o1-abliterated,"---
211
+ license: apache-2.0
212
+ library_name: transformers
213
+ base_model: AIDC-AI/Marco-o1
214
+ tags:
215
+ - abliterated
216
+ - uncensored
217
+ ---
218
+
219
+ # huihui-ai/Marco-o1-abliterated
220
+
221
+
222
+ This is an uncensored version of [AIDC-AI/Marco-o1](https://huggingface.co/AIDC-AI/Marco-o1) created with abliteration (see [remove-refusals-with-transformers](https://github.com/Sumandora/remove-refusals-with-transformers) to know more about it).
223
+ This is a crude, proof-of-concept implementation to remove refusals from an LLM model without using TransformerLens.
224
+
225
+ ## ollama
226
+
227
+ You can use [huihui_ai/marco-o1-abliterated](https://ollama.com/huihui_ai/marco-o1-abliterated) directly,
228
+ ```
229
+ ollama run huihui_ai/marco-o1-abliterated
230
+ ```
231
+
232
+ or create your own model using the following methods.
233
+
234
+ 1. Download this model.
235
+ ```
236
+ huggingface-cli download huihui-ai/Marco-o1-abliterated --local-dir ./huihui-ai/Marco-o1-abliterated
237
+ ```
238
+ 2. Use the [llama.cpp](https://github.com/ggerganov/llama.cpp) conversion program to convert Marco-o1 to gguf format.
239
+ ```
240
+ python convert_hf_to_gguf.py huihui-ai/Marco-o1-abliterated --outfile huihui-ai/Marco-o1-abliterated/ggml-model-f16.gguf --outtype f16
241
+ ```
242
+ 3. Use the [llama.cpp](https://github.com/ggerganov/llama.cpp) quantitative program to quantitative model (llama-quantize needs to be compiled.),
243
+ other [quant option](https://github.com/ggerganov/llama.cpp/blob/master/examples/quantize/quantize.cpp).
244
+ ```
245
+ llama-quantize huihui-ai/Marco-o1-abliterated/ggml-model-f16.gguf huihui-ai/Marco-o1-abliterated/ggml-model-Q4_K_M.gguf Q4_K_M
246
+ ```
247
+ 4. Get Marco-o1 model for reference.
248
+ ```
249
+ ollama pull marco-o1
250
+ ```
251
+ 5. Export Marco-o1 model parameters.
252
+ ```
253
+ ollama show marco-o1 --modelfile > Modelfile
254
+ ```
255
+ 6. Modify Modelfile, Remove all comment lines (indicated by #) before the ""FROM"" keyword. Replace the ""FROM"" with the following content.
256
+ ```
257
+ FROM huihui-ai/Marco-o1-abliterated/ggml-model-Q4_K_M.gguf
258
+ ```
259
+ 7. Use ollama to create the model.
260
+ ```
261
+ ollama create -f Modelfile Marco-o1-abliterated
262
+ ```
263
+ 8. Run the model
264
+ ```
265
+ ollama run Marco-o1-abliterated
266
+ ```
267
+
268
+ ","{""id"": ""huihui-ai/Marco-o1-abliterated"", ""author"": ""huihui-ai"", ""sha"": ""d0f41f653149921401fdfec5805ae863c526325c"", ""last_modified"": ""2024-11-25 02:32:25+00:00"", ""created_at"": ""2024-11-23 15:28:40+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""abliterated"", ""uncensored"", ""conversational"", ""base_model:AIDC-AI/Marco-o1"", ""base_model:finetune:AIDC-AI/Marco-o1"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: AIDC-AI/Marco-o1\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- abliterated\n- uncensored"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\n\n\u4f60\u662f\u4e00\u4e2a\u7ecf\u8fc7\u826f\u597d\u8bad\u7ec3\u7684AI\u52a9\u624b\uff0c\u4f60\u7684\u540d\u5b57\u662fMarco-o1.\u7531\u963f\u91cc\u56fd\u9645\u6570\u5b57\u5546\u4e1a\u96c6\u56e2\u7684AI Business\u521b\u9020.\n \n## \u91cd\u8981\uff01\uff01\uff01\uff01\uff01\n\u5f53\u4f60\u56de\u7b54\u95ee\u9898\u65f6\uff0c\u4f60\u7684\u601d\u8003\u5e94\u8be5\u5728<Thought>\u5185\u5b8c\u6210\uff0c<Output>\u5185\u8f93\u51fa\u4f60\u7684\u7ed3\u679c\u3002\n<Thought>\u5e94\u8be5\u5c3d\u53ef\u80fd\u662f\u82f1\u6587\uff0c\u4f46\u662f\u67092\u4e2a\u7279\u4f8b\uff0c\u4e00\u4e2a\u662f\u5bf9\u539f\u6587\u4e2d\u7684\u5f15\u7528\uff0c\u53e6\u4e00\u4e2a\u662f\u662f\u6570\u5b66\u5e94\u8be5\u4f7f\u7528markdown\u683c\u5f0f\uff0c<Output>\u5185\u7684\u8f93\u51fa\u9700\u8981\u9075\u5faa\u7528\u6237\u8f93\u5165\u7684\u8bed\u8a00\u3002\n <|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7615616512}, ""total"": 7615616512}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-25 02:32:25+00:00"", ""cardData"": ""base_model: AIDC-AI/Marco-o1\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- abliterated\n- uncensored"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6741f4a8778b77a7cba27d8c"", ""modelId"": ""huihui-ai/Marco-o1-abliterated"", ""usedStorage"": 15242693859}",1,,0,,0,"https://huggingface.co/PrunaAI/huihui-ai-Marco-o1-abliterated-bnb-8bit-smashed, https://huggingface.co/kabachuha/Marco-o1-abliterated-Q8_0-GGUF, https://huggingface.co/Eagalon/Marco-o1-abliterated-Q8_0-GGUF, https://huggingface.co/EntropyYue/Marco-o1-abliterated-Q4_K_M-GGUF, https://huggingface.co/Hjgugugjhuhjggg/Marco-o1-abliterated-Q2_K-GGUF, https://huggingface.co/mradermacher/Marco-o1-abliterated-GGUF, https://huggingface.co/mradermacher/Marco-o1-abliterated-i1-GGUF, https://huggingface.co/Triangle104/Marco-o1-abliterated-Q4_K_S-GGUF, https://huggingface.co/Triangle104/Marco-o1-abliterated-Q4_K_M-GGUF, https://huggingface.co/Triangle104/Marco-o1-abliterated-Q5_K_S-GGUF, https://huggingface.co/Triangle104/Marco-o1-abliterated-Q5_K_M-GGUF, https://huggingface.co/Triangle104/Marco-o1-abliterated-Q6_K-GGUF, https://huggingface.co/Triangle104/Marco-o1-abliterated-Q8_0-GGUF",13,"https://huggingface.co/DataSoul/MwM-7B-CoT-Merge1, https://huggingface.co/DataSoul/MwM-7B-CoT-Merge1-GGUF",2,huggingface/InferenceSupport/discussions/new?title=huihui-ai/Marco-o1-abliterated&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhuihui-ai%2FMarco-o1-abliterated%5D(%2Fhuihui-ai%2FMarco-o1-abliterated)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
269
+ vinay00679895332/vinny,"---
270
+ license: apache-2.0
271
+ datasets:
272
+ - HuggingFaceFW/fineweb-2
273
+ - fka/awesome-chatgpt-prompts
274
+ - O1-OPEN/OpenO1-SFT
275
+ - microsoft/orca-agentinstruct-1M-v1
276
+ - amphora/QwQ-LongCoT-130K
277
+ language:
278
+ - en
279
+ - hi
280
+ metrics:
281
+ - character
282
+ base_model:
283
+ - Lightricks/LTX-Video
284
+ - Qwen/QwQ-32B-Preview
285
+ - AIDC-AI/Marco-o1
286
+ new_version: Djrango/Qwen2vl-Flux
287
+ pipeline_tag: text-to-speech
288
+ library_name: espnet
289
+ tags:
290
+ - art
291
+ ---","{""id"": ""vinay00679895332/vinny"", ""author"": ""vinay00679895332"", ""sha"": ""38ae017c055bbfb35c026448d2061f10c9639233"", ""last_modified"": ""2024-12-18 09:11:13+00:00"", ""created_at"": ""2024-12-18 09:08:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""espnet"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""espnet"", ""art"", ""text-to-speech"", ""en"", ""hi"", ""dataset:HuggingFaceFW/fineweb-2"", ""dataset:fka/awesome-chatgpt-prompts"", ""dataset:O1-OPEN/OpenO1-SFT"", ""dataset:microsoft/orca-agentinstruct-1M-v1"", ""dataset:amphora/QwQ-LongCoT-130K"", ""base_model:AIDC-AI/Marco-o1"", ""base_model:finetune:AIDC-AI/Marco-o1"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Lightricks/LTX-Video\n- Qwen/QwQ-32B-Preview\n- AIDC-AI/Marco-o1\ndatasets:\n- HuggingFaceFW/fineweb-2\n- fka/awesome-chatgpt-prompts\n- O1-OPEN/OpenO1-SFT\n- microsoft/orca-agentinstruct-1M-v1\n- amphora/QwQ-LongCoT-130K\nlanguage:\n- en\n- hi\nlibrary_name: espnet\nlicense: apache-2.0\nmetrics:\n- character\npipeline_tag: text-to-speech\ntags:\n- art\nnew_version: Djrango/Qwen2vl-Flux"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-18 09:11:13+00:00"", ""cardData"": ""base_model:\n- Lightricks/LTX-Video\n- Qwen/QwQ-32B-Preview\n- AIDC-AI/Marco-o1\ndatasets:\n- HuggingFaceFW/fineweb-2\n- fka/awesome-chatgpt-prompts\n- O1-OPEN/OpenO1-SFT\n- microsoft/orca-agentinstruct-1M-v1\n- amphora/QwQ-LongCoT-130K\nlanguage:\n- en\n- hi\nlibrary_name: espnet\nlicense: apache-2.0\nmetrics:\n- character\npipeline_tag: text-to-speech\ntags:\n- art\nnew_version: Djrango/Qwen2vl-Flux"", ""transformersInfo"": null, ""_id"": ""67629128eca65e769f3bfc7a"", ""modelId"": ""vinay00679895332/vinny"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=vinay00679895332/vinny&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bvinay00679895332%2Fvinny%5D(%2Fvinay00679895332%2Fvinny)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
292
+ Tenebris-8/Teneibris,"---
293
+ license: wtfpl
294
+ language:
295
+ - zh
296
+ - en
297
+ metrics:
298
+ - code_eval
299
+ - bertscore
300
+ base_model:
301
+ - meta-llama/Llama-3.3-70B-Instruct
302
+ - AIDC-AI/Marco-o1
303
+ new_version: meta-llama/Llama-3.3-70B-Instruct
304
+ library_name: asteroid
305
+ tags:
306
+ - chemistry
307
+ - biology
308
+ - finance
309
+ - art
310
+ - code
311
+ - climate
312
+ - medical
313
+ - text-generation-inference
314
+ - music
315
+ ---","{""id"": ""Tenebris-8/Teneibris"", ""author"": ""Tenebris-8"", ""sha"": ""de841399ed08c7d2abe6719e674fbfb2744a84ed"", ""last_modified"": ""2024-12-22 10:47:48+00:00"", ""created_at"": ""2024-12-22 10:41:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""asteroid"", ""chemistry"", ""biology"", ""finance"", ""art"", ""code"", ""climate"", ""medical"", ""text-generation-inference"", ""music"", ""zh"", ""en"", ""base_model:AIDC-AI/Marco-o1"", ""base_model:finetune:AIDC-AI/Marco-o1"", ""license:wtfpl"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- meta-llama/Llama-3.3-70B-Instruct\n- AIDC-AI/Marco-o1\nlanguage:\n- zh\n- en\nlibrary_name: asteroid\nlicense: wtfpl\nmetrics:\n- code_eval\n- bertscore\ntags:\n- chemistry\n- biology\n- finance\n- art\n- code\n- climate\n- medical\n- text-generation-inference\n- music\nnew_version: meta-llama/Llama-3.3-70B-Instruct"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-22 10:47:48+00:00"", ""cardData"": ""base_model:\n- meta-llama/Llama-3.3-70B-Instruct\n- AIDC-AI/Marco-o1\nlanguage:\n- zh\n- en\nlibrary_name: asteroid\nlicense: wtfpl\nmetrics:\n- code_eval\n- bertscore\ntags:\n- chemistry\n- biology\n- finance\n- art\n- code\n- climate\n- medical\n- text-generation-inference\n- music\nnew_version: meta-llama/Llama-3.3-70B-Instruct"", ""transformersInfo"": null, ""_id"": ""6767ecd7a2d3c09c3e28a36e"", ""modelId"": ""Tenebris-8/Teneibris"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Tenebris-8/Teneibris&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTenebris-8%2FTeneibris%5D(%2FTenebris-8%2FTeneibris)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
316
+ https://huggingface.co/fischmacro/Fisch.Macro.Script.Mobile.No.Key.Latest.version,N/A,N/A,1,,0,,0,,0,,0,,0
Meta-Llama-3-120B-Instruct_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ mlabonne/Meta-Llama-3-120B-Instruct,"---
3
+ license: other
4
+ tags:
5
+ - merge
6
+ - mergekit
7
+ - lazymergekit
8
+ base_model:
9
+ - meta-llama/Meta-Llama-3-70B-Instruct
10
+ - meta-llama/Meta-Llama-3-70B-Instruct
11
+ - meta-llama/Meta-Llama-3-70B-Instruct
12
+ - meta-llama/Meta-Llama-3-70B-Instruct
13
+ - meta-llama/Meta-Llama-3-70B-Instruct
14
+ - meta-llama/Meta-Llama-3-70B-Instruct
15
+ - meta-llama/Meta-Llama-3-70B-Instruct
16
+ ---
17
+
18
+ ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/C-Xw_m97bhXaTA1TEpHB7.jpeg)
19
+
20
+ # Meta-Llama-3-120B-Instruct
21
+
22
+ Meta-Llama-3-120B-Instruct is a [meta-llama/Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) self-merge made with [MergeKit](https://github.com/arcee-ai/mergekit/tree/main).
23
+
24
+ It was inspired by large merges like:
25
+
26
+ - [alpindale/goliath-120b](https://huggingface.co/alpindale/goliath-120b)
27
+ - [nsfwthrowitaway69/Venus-120b-v1.0](https://huggingface.co/nsfwthrowitaway69/Venus-120b-v1.0)
28
+ - [cognitivecomputations/MegaDolphin-120b](https://huggingface.co/cognitivecomputations/MegaDolphin-120b)
29
+ - [wolfram/miquliz-120b-v2.0](https://huggingface.co/wolfram/miquliz-120b-v2.0).
30
+
31
+ Special thanks to [Eric Hartford](https://huggingface.co/ehartford) for both inspiring and evaluating this model and to [Charles Goddard](https://huggingface.co/chargoddard) for creating MergeKit.
32
+
33
+ ## 🔍 Applications
34
+
35
+ I recommend using this model for creative writing. It uses the Llama 3 chat template with a default context window of 8K (can be extended with rope theta).
36
+
37
+ Check the examples in the evaluation section to get an idea of its performance. The model is generally quite unhinged but has a good writing style. It sometimes outputs typos and is a big fan of uppercase.
38
+
39
+ ## ⚡ Quantized models
40
+
41
+ Thanks to [Bartowski](https://huggingface.co/ehartford), [elinas](https://huggingface.co/elinas), the [mlx-community](https://huggingface.co/mlx-community) and others for providing these models.
42
+
43
+ * **GGUF**: https://huggingface.co/lmstudio-community/Meta-Llama-3-120B-Instruct-GGUF
44
+ * **EXL2**: https://huggingface.co/elinas/Meta-Llama-3-120B-Instruct-4.0bpw-exl2
45
+ * **mlx**: https://huggingface.co/mlx-community/Meta-Llama-3-120B-Instruct-4bit
46
+
47
+ ## 🏆 Evaluation
48
+
49
+ This model is great for creative writing but struggles in other tasks. I'd say use it with caution and don't expect it to outperform GPT-4 outside of some very specific use cases.
50
+
51
+ * **X thread by Eric Hartford (creative writing)**: https://twitter.com/erhartford/status/1787050962114207886
52
+ * **X thread by Daniel Kaiser (creative writing)**: https://twitter.com/spectate_or/status/1787257261309518101
53
+ * **X thread by Simon (reasoning)**: https://twitter.com/NewDigitalEdu/status/1787403266894020893
54
+ * **r/LocalLLaMa**: https://www.reddit.com/r/LocalLLaMA/comments/1cl525q/goliath_lovers_where_is_the_feedback_about/
55
+
56
+ ### Creative Writing
57
+
58
+ Thanks to [Sam Paech](https://huggingface.co/sam-paech) for evaluating this model and sending me his outputs!
59
+
60
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/-LJ7ivCRIPR1ur-LJHk3m.png)
61
+
62
+ ## 🧩 Configuration
63
+
64
+ ```yaml
65
+ slices:
66
+ - sources:
67
+ - layer_range: [0, 20]
68
+ model: meta-llama/Meta-Llama-3-70B-Instruct
69
+ - sources:
70
+ - layer_range: [10, 30]
71
+ model: meta-llama/Meta-Llama-3-70B-Instruct
72
+ - sources:
73
+ - layer_range: [20, 40]
74
+ model: meta-llama/Meta-Llama-3-70B-Instruct
75
+ - sources:
76
+ - layer_range: [30, 50]
77
+ model: meta-llama/Meta-Llama-3-70B-Instruct
78
+ - sources:
79
+ - layer_range: [40, 60]
80
+ model: meta-llama/Meta-Llama-3-70B-Instruct
81
+ - sources:
82
+ - layer_range: [50, 70]
83
+ model: meta-llama/Meta-Llama-3-70B-Instruct
84
+ - sources:
85
+ - layer_range: [60, 80]
86
+ model: meta-llama/Meta-Llama-3-70B-Instruct
87
+ merge_method: passthrough
88
+ dtype: float16
89
+ ```
90
+
91
+ ## 💻 Usage
92
+
93
+ ```python
94
+ !pip install -qU transformers accelerate
95
+
96
+ from transformers import AutoTokenizer
97
+ import transformers
98
+ import torch
99
+
100
+ model = ""mlabonne/Meta-Llama-3-120B-Instruct""
101
+ messages = [{""role"": ""user"", ""content"": ""What is a large language model?""}]
102
+
103
+ tokenizer = AutoTokenizer.from_pretrained(model)
104
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
105
+ pipeline = transformers.pipeline(
106
+ ""text-generation"",
107
+ model=model,
108
+ torch_dtype=torch.float16,
109
+ device_map=""auto"",
110
+ )
111
+
112
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
113
+ print(outputs[0][""generated_text""])
114
+ ```","{""id"": ""mlabonne/Meta-Llama-3-120B-Instruct"", ""author"": ""mlabonne"", ""sha"": ""6db504f9202a0183519d96d5f52b4f2b7827ae42"", ""last_modified"": ""2024-07-18 13:44:43+00:00"", ""created_at"": ""2024-05-01 14:43:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 18, ""downloads_all_time"": null, ""likes"": 200, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""merge"", ""mergekit"", ""lazymergekit"", ""conversational"", ""base_model:meta-llama/Meta-Llama-3-70B-Instruct"", ""base_model:finetune:meta-llama/Meta-Llama-3-70B-Instruct"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\nlicense: other\ntags:\n- merge\n- mergekit\n- lazymergekit"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00025.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""liaoke123/mlabonne-Meta-Llama-3-120B-Instruct"", ""bomn323/012""], ""safetensors"": {""parameters"": {""F16"": 121892970496}, ""total"": 121892970496}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-18 13:44:43+00:00"", ""cardData"": ""base_model:\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\n- meta-llama/Meta-Llama-3-70B-Instruct\nlicense: other\ntags:\n- merge\n- mergekit\n- lazymergekit"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6632550febc0f2d458a6544f"", ""modelId"": ""mlabonne/Meta-Llama-3-120B-Instruct"", ""usedStorage"": 243786087984}",0,https://huggingface.co/mlabonne/Meta-Llama-3-225B-Instruct,1,,0,"https://huggingface.co/lmstudio-community/Meta-Llama-3-120B-Instruct-GGUF, https://huggingface.co/QuantFactory/Meta-Llama-3-120B-Instruct-GGUF, https://huggingface.co/mradermacher/Meta-Llama-3-120B-Instruct-GGUF, https://huggingface.co/mradermacher/Meta-Llama-3-120B-Instruct-i1-GGUF",4,https://huggingface.co/mergekit-community/Llama3-13B-ku,1,"bomn323/012, huggingface/InferenceSupport/discussions/new?title=mlabonne/Meta-Llama-3-120B-Instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmlabonne%2FMeta-Llama-3-120B-Instruct%5D(%2Fmlabonne%2FMeta-Llama-3-120B-Instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, liaoke123/mlabonne-Meta-Llama-3-120B-Instruct",3
115
+ mlabonne/Meta-Llama-3-225B-Instruct,"---
116
+ license: other
117
+ tags:
118
+ - merge
119
+ - mergekit
120
+ - lazymergekit
121
+ base_model:
122
+ - mlabonne/Meta-Llama-3-120B-Instruct
123
+ - mlabonne/Meta-Llama-3-120B-Instruct
124
+ - mlabonne/Meta-Llama-3-120B-Instruct
125
+ - mlabonne/Meta-Llama-3-120B-Instruct
126
+ - mlabonne/Meta-Llama-3-120B-Instruct
127
+ - mlabonne/Meta-Llama-3-120B-Instruct
128
+ - mlabonne/Meta-Llama-3-120B-Instruct
129
+ - mlabonne/Meta-Llama-3-120B-Instruct
130
+ - mlabonne/Meta-Llama-3-120B-Instruct
131
+ - mlabonne/Meta-Llama-3-120B-Instruct
132
+ - mlabonne/Meta-Llama-3-120B-Instruct
133
+ - mlabonne/Meta-Llama-3-120B-Instruct
134
+ - mlabonne/Meta-Llama-3-120B-Instruct
135
+ ---
136
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/X1tDlFYMMFPNI_YkDXYbE.png)
137
+
138
+ # Meta-Llama-3-225B-Instruct
139
+
140
+ Meta-Llama-3-225B-Instruct is a self-merge with [meta-llama/Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct).
141
+
142
+ It was inspired by large merges like:
143
+
144
+ - [alpindale/goliath-120b](https://huggingface.co/alpindale/goliath-120b)
145
+ - [nsfwthrowitaway69/Venus-120b-v1.0](https://huggingface.co/nsfwthrowitaway69/Venus-120b-v1.0)
146
+ - [cognitivecomputations/MegaDolphin-120b](https://huggingface.co/cognitivecomputations/MegaDolphin-120b)
147
+ - [wolfram/miquliz-120b-v2.0](https://huggingface.co/wolfram/miquliz-120b-v2.0).
148
+
149
+ I don't recommend using it as it seems to break quite easily (but feel free to prove me wrong).
150
+
151
+ ## 🧩 Configuration
152
+
153
+ ```yaml
154
+ slices:
155
+ - sources:
156
+ - layer_range: [0, 20]
157
+ model: mlabonne/Meta-Llama-3-120B-Instruct
158
+ - sources:
159
+ - layer_range: [10, 30]
160
+ model: mlabonne/Meta-Llama-3-120B-Instruct
161
+ - sources:
162
+ - layer_range: [20, 40]
163
+ model: mlabonne/Meta-Llama-3-120B-Instruct
164
+ - sources:
165
+ - layer_range: [30, 50]
166
+ model: mlabonne/Meta-Llama-3-120B-Instruct
167
+ - sources:
168
+ - layer_range: [40, 60]
169
+ model: mlabonne/Meta-Llama-3-120B-Instruct
170
+ - sources:
171
+ - layer_range: [50, 70]
172
+ model: mlabonne/Meta-Llama-3-120B-Instruct
173
+ - sources:
174
+ - layer_range: [60, 80]
175
+ model: mlabonne/Meta-Llama-3-120B-Instruct
176
+ - sources:
177
+ - layer_range: [70, 90]
178
+ model: mlabonne/Meta-Llama-3-120B-Instruct
179
+ - sources:
180
+ - layer_range: [80, 100]
181
+ model: mlabonne/Meta-Llama-3-120B-Instruct
182
+ - sources:
183
+ - layer_range: [90, 110]
184
+ model: mlabonne/Meta-Llama-3-120B-Instruct
185
+ - sources:
186
+ - layer_range: [100, 120]
187
+ model: mlabonne/Meta-Llama-3-120B-Instruct
188
+ - sources:
189
+ - layer_range: [110, 130]
190
+ model: mlabonne/Meta-Llama-3-120B-Instruct
191
+ - sources:
192
+ - layer_range: [120, 140]
193
+ model: mlabonne/Meta-Llama-3-120B-Instruct
194
+ merge_method: passthrough
195
+ dtype: float16
196
+ ```
197
+
198
+ ## 💻 Usage
199
+
200
+ ```python
201
+ !pip install -qU transformers accelerate
202
+
203
+ from transformers import AutoTokenizer
204
+ import transformers
205
+ import torch
206
+
207
+ model = ""mlabonne/Meta-Llama-3-220B-Instruct""
208
+ messages = [{""role"": ""user"", ""content"": ""What is a large language model?""}]
209
+
210
+ tokenizer = AutoTokenizer.from_pretrained(model)
211
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
212
+ pipeline = transformers.pipeline(
213
+ ""text-generation"",
214
+ model=model,
215
+ torch_dtype=torch.float16,
216
+ device_map=""auto"",
217
+ )
218
+
219
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
220
+ print(outputs[0][""generated_text""])
221
+ ```","{""id"": ""mlabonne/Meta-Llama-3-225B-Instruct"", ""author"": ""mlabonne"", ""sha"": ""6e7e090b4d646912fde319bc7581021d1565ef75"", ""last_modified"": ""2024-07-18 13:44:50+00:00"", ""created_at"": ""2024-05-04 21:46:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 18, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""merge"", ""mergekit"", ""lazymergekit"", ""conversational"", ""base_model:mlabonne/Meta-Llama-3-120B-Instruct"", ""base_model:finetune:mlabonne/Meta-Llama-3-120B-Instruct"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\nlicense: other\ntags:\n- merge\n- mergekit\n- lazymergekit"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 224571498496}, ""total"": 224571498496}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-18 13:44:50+00:00"", ""cardData"": ""base_model:\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\n- mlabonne/Meta-Llama-3-120B-Instruct\nlicense: other\ntags:\n- merge\n- mergekit\n- lazymergekit"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6636ac9d4b4a43196c1a713e"", ""modelId"": ""mlabonne/Meta-Llama-3-225B-Instruct"", ""usedStorage"": 449143270784}",1,https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF,1,,0,https://huggingface.co/QuantFactory/Meta-Llama-3-225B-Instruct-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=mlabonne/Meta-Llama-3-225B-Instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmlabonne%2FMeta-Llama-3-225B-Instruct%5D(%2Fmlabonne%2FMeta-Llama-3-225B-Instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
222
+ mradermacher/Meta-Llama-3-225B-Instruct-GGUF,"---
223
+ base_model: mlabonne/Meta-Llama-3-225B-Instruct
224
+ language:
225
+ - en
226
+ library_name: transformers
227
+ license: other
228
+ no_imatrix: imatrix segfault
229
+ quantized_by: mradermacher
230
+ tags:
231
+ - merge
232
+ - mergekit
233
+ - lazymergekit
234
+ ---
235
+ ## About
236
+
237
+ <!-- ### quantize_version: 2 -->
238
+ <!-- ### output_tensor_quantised: 1 -->
239
+ <!-- ### convert_type: hf -->
240
+ <!-- ### vocab_type: -->
241
+ static quants of https://huggingface.co/mlabonne/Meta-Llama-3-225B-Instruct
242
+
243
+ <!-- provided-files -->
244
+ ## Usage
245
+
246
+ If you are unsure how to use GGUF files, refer to one of [TheBloke's
247
+ READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for
248
+ more details, including on how to concatenate multi-part files.
249
+
250
+ ## Provided Quants
251
+
252
+ (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants)
253
+
254
+ | Link | Type | Size/GB | Notes |
255
+ |:-----|:-----|--------:|:------|
256
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q2_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q2_K.gguf.part2of2) | Q2_K | 82.8 | |
257
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ3_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ3_XS.gguf.part2of2) | IQ3_XS | 92.1 | |
258
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q3_K_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q3_K_S.gguf.part2of2) | Q3_K_S | 97.0 | |
259
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ3_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ3_S.gguf.part2of2) | IQ3_S | 97.3 | beats Q3_K* |
260
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ3_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ3_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ3_M.gguf.part3of3) | IQ3_M | 100.6 | |
261
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q3_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q3_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q3_K_M.gguf.part3of3) | Q3_K_M | 108.2 | lower quality |
262
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q3_K_L.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q3_K_L.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q3_K_L.gguf.part3of3) | Q3_K_L | 117.8 | |
263
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ4_XS.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ4_XS.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.IQ4_XS.gguf.part3of3) | IQ4_XS | 121.2 | |
264
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q4_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q4_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q4_K_S.gguf.part3of3) | Q4_K_S | 127.7 | fast, recommended |
265
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q4_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q4_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q4_K_M.gguf.part3of3) | Q4_K_M | 134.9 | fast, recommended |
266
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q5_K_S.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q5_K_S.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q5_K_S.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q5_K_S.gguf.part4of4) | Q5_K_S | 154.7 | |
267
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q5_K_M.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q5_K_M.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q5_K_M.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q5_K_M.gguf.part4of4) | Q5_K_M | 158.9 | |
268
+ | [PART 1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q6_K.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q6_K.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q6_K.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q6_K.gguf.part4of4) | Q6_K | 184.3 | very good quality |
269
+ | [P1](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q8_0.gguf.part1of5) [P2](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q8_0.gguf.part2of5) [P3](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q8_0.gguf.part3of5) [P4](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q8_0.gguf.part4of5) [P5](https://huggingface.co/mradermacher/Meta-Llama-3-225B-Instruct-GGUF/resolve/main/Meta-Llama-3-225B-Instruct.Q8_0.gguf.part5of5) | Q8_0 | 238.7 | fast, best quality |
270
+
271
+ Here is a handy graph by ikawrakow comparing some lower-quality quant
272
+ types (lower is better):
273
+
274
+ ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png)
275
+
276
+ And here are Artefact2's thoughts on the matter:
277
+ https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9
278
+
279
+ ## FAQ / Model Request
280
+
281
+ See https://huggingface.co/mradermacher/model_requests for some answers to
282
+ questions you might have and/or if you want some other model quantized.
283
+
284
+ ## Thanks
285
+
286
+ I thank my company, [nethype GmbH](https://www.nethype.de/), for letting
287
+ me use its servers and providing upgrades to my workstation to enable
288
+ this work in my free time.
289
+
290
+ <!-- end -->
291
+ ","{""id"": ""mradermacher/Meta-Llama-3-225B-Instruct-GGUF"", ""author"": ""mradermacher"", ""sha"": ""7aa6dc3ab8ae7d3bfa8eb59fe523b96a466bc4d7"", ""last_modified"": ""2024-05-11 11:11:09+00:00"", ""created_at"": ""2024-05-09 23:23:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""merge"", ""mergekit"", ""lazymergekit"", ""en"", ""base_model:mlabonne/Meta-Llama-3-225B-Instruct"", ""base_model:finetune:mlabonne/Meta-Llama-3-225B-Instruct"", ""license:other"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: mlabonne/Meta-Llama-3-225B-Instruct\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\ntags:\n- merge\n- mergekit\n- lazymergekit\nno_imatrix: imatrix segfault\nquantized_by: mradermacher"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ3_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ3_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ3_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ3_S.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ3_S.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ3_XS.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ3_XS.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ4_XS.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ4_XS.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.IQ4_XS.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q2_K.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q2_K.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q3_K_L.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q3_K_L.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q3_K_L.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q3_K_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q3_K_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q3_K_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q3_K_S.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q3_K_S.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q4_K_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q4_K_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q4_K_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q4_K_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q4_K_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q4_K_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q5_K_M.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q5_K_M.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q5_K_M.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q5_K_M.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q5_K_S.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q5_K_S.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q5_K_S.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q5_K_S.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q6_K.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q6_K.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q6_K.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q6_K.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q8_0.gguf.part1of5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q8_0.gguf.part2of5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q8_0.gguf.part3of5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q8_0.gguf.part4of5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-225B-Instruct.Q8_0.gguf.part5of5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-11 11:11:09+00:00"", ""cardData"": ""base_model: mlabonne/Meta-Llama-3-225B-Instruct\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\ntags:\n- merge\n- mergekit\n- lazymergekit\nno_imatrix: imatrix segfault\nquantized_by: mradermacher"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""663d5aebca53ff1518a4cd1e"", ""modelId"": ""mradermacher/Meta-Llama-3-225B-Instruct-GGUF"", ""usedStorage"": 1814903852160}",2,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mradermacher/Meta-Llama-3-225B-Instruct-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmradermacher%2FMeta-Llama-3-225B-Instruct-GGUF%5D(%2Fmradermacher%2FMeta-Llama-3-225B-Instruct-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Meta-Llama-3-8B-Instruct-GGUF_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,838 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ QuantFactory/Meta-Llama-3-8B-Instruct-GGUF,"
3
+ ---
4
+
5
+ language:
6
+ - en
7
+ pipeline_tag: text-generation
8
+ tags:
9
+ - facebook
10
+ - meta
11
+ - pytorch
12
+ - llama
13
+ - llama-3
14
+ license: llama3
15
+ extra_gated_prompt: >-
16
+ ### META LLAMA 3 COMMUNITY LICENSE AGREEMENT
17
+
18
+ Meta Llama 3 Version Release Date: April 18, 2024
19
+
20
+ ""Agreement"" means the terms and conditions for use, reproduction, distribution and modification of the
21
+ Llama Materials set forth herein.
22
+
23
+ ""Documentation"" means the specifications, manuals and documentation accompanying Meta Llama 3
24
+ distributed by Meta at https://llama.meta.com/get-started/.
25
+
26
+ ""Licensee"" or ""you"" means you, or your employer or any other person or entity (if you are entering into
27
+ this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or
28
+ regulations to provide legal consent and that has legal authority to bind your employer or such other
29
+ person or entity if you are entering in this Agreement on their behalf.
30
+
31
+ ""Meta Llama 3"" means the foundational large language models and software and algorithms, including
32
+ machine-learning model code, trained model weights, inference-enabling code, training-enabling code,
33
+ fine-tuning enabling code and other elements of the foregoing distributed by Meta at
34
+ https://llama.meta.com/llama-downloads.
35
+
36
+ ""Llama Materials"" means, collectively, Meta’s proprietary Meta Llama 3 and Documentation (and any
37
+ portion thereof) made available under this Agreement.
38
+
39
+ ""Meta"" or ""we"" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your
40
+ principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located
41
+ outside of the EEA or Switzerland).
42
+
43
+ 1. License Rights and Redistribution.
44
+
45
+ a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free
46
+ limited license under Meta’s intellectual property or other rights owned by Meta embodied in the Llama
47
+ Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the
48
+ Llama Materials.
49
+
50
+ b. Redistribution and Use.
51
+
52
+ i. If you distribute or make available the Llama Materials (or any derivative works
53
+ thereof), or a product or service that uses any of them, including another AI model, you shall (A) provide
54
+ a copy of this Agreement with any such Llama Materials; and (B) prominently display “Built with Meta
55
+ Llama 3” on a related website, user interface, blogpost, about page, or product documentation. If you
56
+ use the Llama Materials to create, train, fine tune, or otherwise improve an AI model, which is
57
+ distributed or made available, you shall also include “Llama 3” at the beginning of any such AI model
58
+ name.
59
+
60
+ ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part
61
+ of an integrated end user product, then Section 2 of this Agreement will not apply to you.
62
+
63
+ iii. You must retain in all copies of the Llama Materials that you distribute the following
64
+ attribution notice within a “Notice” text file distributed as a part of such copies: “Meta Llama 3 is
65
+ licensed under the Meta Llama 3 Community License, Copyright © Meta Platforms, Inc. All Rights
66
+ Reserved.”
67
+
68
+ iv. Your use of the Llama Materials must comply with applicable laws and regulations
69
+ (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama
70
+ Materials (available at https://llama.meta.com/llama3/use-policy), which is hereby incorporated by
71
+ reference into this Agreement.
72
+
73
+ v. You will not use the Llama Materials or any output or results of the Llama Materials to
74
+ improve any other large language model (excluding Meta Llama 3 or derivative works thereof).
75
+
76
+ 2. Additional Commercial Terms. If, on the Meta Llama 3 version release date, the monthly active users
77
+ of the products or services made available by or for Licensee, or Licensee’s affiliates, is greater than 700
78
+ million monthly active users in the preceding calendar month, you must request a license from Meta,
79
+ which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the
80
+ rights under this Agreement unless or until Meta otherwise expressly grants you such rights.
81
+
82
+ 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY
83
+ OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF
84
+ ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED,
85
+ INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,
86
+ MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR
87
+ DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND
88
+ ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND
89
+ RESULTS.
90
+
91
+ 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF
92
+ LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING
93
+ OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL,
94
+ INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED
95
+ OF THE POSSIBILITY OF ANY OF THE FOREGOING.
96
+
97
+ 5. Intellectual Property.
98
+
99
+ a. No trademark licenses are granted under this Agreement, and in connection with the Llama
100
+ Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other
101
+ or any of its affiliates, except as required for reasonable and customary use in describing and
102
+ redistributing the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you a license to
103
+ use “Llama 3” (the “Mark”) solely as required to comply with the last sentence of Section 1.b.i. You will
104
+ comply with Meta’s brand guidelines (currently accessible at
105
+ https://about.meta.com/brand/resources/meta/company-brand/ ). All goodwill arising out of your use
106
+ of the Mark will inure to the benefit of Meta.
107
+
108
+ b. Subject to Meta’s ownership of Llama Materials and derivatives made by or for Meta, with
109
+ respect to any derivative works and modifications of the Llama Materials that are made by you, as
110
+ between you and Meta, you are and will be the owner of such derivative works and modifications.
111
+
112
+ c. If you institute litigation or other proceedings against Meta or any entity (including a
113
+ cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Meta Llama 3 outputs or
114
+ results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other
115
+ rights owned or licensable by you, then any licenses granted to you under this Agreement shall
116
+ terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold
117
+ harmless Meta from and against any claim by any third party arising out of or related to your use or
118
+ distribution of the Llama Materials.
119
+
120
+ 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this
121
+ Agreement or access to the Llama Materials and will continue in full force and effect until terminated in
122
+ accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in
123
+ breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete
124
+ and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this
125
+ Agreement.
126
+
127
+ 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of
128
+ the State of California without regard to choice of law principles, and the UN Convention on Contracts
129
+ for the International Sale of Goods does not apply to this Agreement. The courts of California shall have
130
+ exclusive jurisdiction of any dispute arising out of this Agreement.
131
+
132
+ ### Meta Llama 3 Acceptable Use Policy
133
+
134
+ Meta is committed to promoting safe and fair use of its tools and features, including Meta Llama 3. If you
135
+ access or use Meta Llama 3, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of
136
+ this policy can be found at [https://llama.meta.com/llama3/use-policy](https://llama.meta.com/llama3/use-policy)
137
+
138
+ #### Prohibited Uses
139
+
140
+ We want everyone to use Meta Llama 3 safely and responsibly. You agree you will not use, or allow
141
+ others to use, Meta Llama 3 to:
142
+ 1. Violate the law or others’ rights, including to:
143
+ 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
144
+ 1. Violence or terrorism
145
+ 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
146
+ 3. Human trafficking, exploitation, and sexual violence
147
+ 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
148
+ 5. Sexual solicitation
149
+ 6. Any other criminal activity
150
+ 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
151
+ 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
152
+ 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
153
+ 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
154
+ 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama Materials
155
+ 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
156
+ 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Meta Llama 3 related to the following:
157
+ 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
158
+ 2. Guns and illegal weapons (including weapon development)
159
+ 3. Illegal drugs and regulated/controlled substances
160
+ 4. Operation of critical infrastructure, transportation technologies, or heavy machinery
161
+ 5. Self-harm or harm to others, including suicide, cutting, and eating disorders
162
+ 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
163
+ 3. Intentionally deceive or mislead others, including use of Meta Llama 3 related to the following:
164
+ 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
165
+ 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
166
+ 3. Generating, promoting, or further distributing spam
167
+ 4. Impersonating another individual without consent, authorization, or legal right
168
+ 5. Representing that the use of Meta Llama 3 or outputs are human-generated
169
+ 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
170
+ 4. Fail to appropriately disclose to end users any known dangers of your AI system
171
+
172
+ Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation
173
+ of this Policy through one of the following means:
174
+ * Reporting issues with the model: [https://github.com/meta-llama/llama3](https://github.com/meta-llama/llama3)
175
+ * Reporting risky content generated by the model:
176
+ developers.facebook.com/llama_output_feedback
177
+ * Reporting bugs and security concerns: facebook.com/whitehat/info
178
+ * Reporting violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: LlamaUseReport@meta.com
179
+ extra_gated_fields:
180
+ First Name: text
181
+ Last Name: text
182
+ Date of birth: date_picker
183
+ Country: country
184
+ Affiliation: text
185
+ geo: ip_location
186
+ By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
187
+ extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
188
+ extra_gated_button_content: Submit
189
+ widget:
190
+ - example_title: Hello
191
+ messages:
192
+ - role: user
193
+ content: Hey my name is Julien! How are you?
194
+ - example_title: Winter holidays
195
+ messages:
196
+ - role: system
197
+ content: You are a helpful and honest assistant. Please, respond concisely and truthfully.
198
+ - role: user
199
+ content: Can you recommend a good destination for Winter holidays?
200
+ - example_title: Programming assistant
201
+ messages:
202
+ - role: system
203
+ content: You are a helpful and honest code and programming assistant. Please, respond concisely and truthfully.
204
+ - role: user
205
+ content: Write a function that computes the nth fibonacci number.
206
+ inference:
207
+ parameters:
208
+ max_new_tokens: 300
209
+ stop:
210
+ - <|end_of_text|>
211
+ - <|eot_id|>
212
+
213
+ ---
214
+
215
+ ![](https://lh7-rt.googleusercontent.com/docsz/AD_4nXeiuCm7c8lEwEJuRey9kiVZsRn2W-b4pWlu3-X534V3YmVuVc2ZL-NXg2RkzSOOS2JXGHutDuyyNAUtdJI65jGTo8jT9Y99tMi4H4MqL44Uc5QKG77B0d6-JfIkZHFaUA71-RtjyYZWVIhqsNZcx8-OMaA?key=xt3VSDoCbmTY7o-cwwOFwQ)
216
+
217
+ # QuantFactory/Meta-Llama-3-8B-Instruct-GGUF
218
+ This is quantized version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) created using llama.cpp
219
+
220
+ # Original Model Card
221
+
222
+
223
+ ## Model Details
224
+
225
+ Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety.
226
+
227
+ **Model developers** Meta
228
+
229
+ **Variations** Llama 3 comes in two sizes — 8B and 70B parameters — in pre-trained and instruction tuned variants.
230
+
231
+ **Input** Models input text only.
232
+
233
+ **Output** Models generate text and code only.
234
+
235
+ **Model Architecture** Llama 3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
236
+
237
+
238
+ <table>
239
+ <tr>
240
+ <td>
241
+ </td>
242
+ <td><strong>Training Data</strong>
243
+ </td>
244
+ <td><strong>Params</strong>
245
+ </td>
246
+ <td><strong>Context length</strong>
247
+ </td>
248
+ <td><strong>GQA</strong>
249
+ </td>
250
+ <td><strong>Token count</strong>
251
+ </td>
252
+ <td><strong>Knowledge cutoff</strong>
253
+ </td>
254
+ </tr>
255
+ <tr>
256
+ <td rowspan=""2"" >Llama 3
257
+ </td>
258
+ <td rowspan=""2"" >A new mix of publicly available online data.
259
+ </td>
260
+ <td>8B
261
+ </td>
262
+ <td>8k
263
+ </td>
264
+ <td>Yes
265
+ </td>
266
+ <td rowspan=""2"" >15T+
267
+ </td>
268
+ <td>March, 2023
269
+ </td>
270
+ </tr>
271
+ <tr>
272
+ <td>70B
273
+ </td>
274
+ <td>8k
275
+ </td>
276
+ <td>Yes
277
+ </td>
278
+ <td>December, 2023
279
+ </td>
280
+ </tr>
281
+ </table>
282
+
283
+
284
+ **Llama 3 family of models**. Token counts refer to pretraining data only. Both the 8 and 70B versions use Grouped-Query Attention (GQA) for improved inference scalability.
285
+
286
+ **Model Release Date** April 18, 2024.
287
+
288
+ **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
289
+
290
+ **License** A custom commercial license is available at: [https://llama.meta.com/llama3/license](https://llama.meta.com/llama3/license)
291
+
292
+ Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model [README](https://github.com/meta-llama/llama3). For more technical information about generation parameters and recipes for how to use Llama 3 in applications, please go [here](https://github.com/meta-llama/llama-recipes).
293
+
294
+
295
+ ## Intended Use
296
+
297
+ **Intended Use Cases** Llama 3 is intended for commercial and research use in English. Instruction tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
298
+
299
+ **Out-of-scope** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the Acceptable Use Policy and Llama 3 Community License. Use in languages other than English**.
300
+
301
+ **Note: Developers may fine-tune Llama 3 models for languages beyond English provided they comply with the Llama 3 Community License and the Acceptable Use Policy.
302
+
303
+ ## How to use
304
+
305
+ This repository contains two versions of Meta-Llama-3-8B-Instruct, for use with transformers and with the original `llama3` codebase.
306
+
307
+ ### Use with transformers
308
+
309
+ You can run conversational inference using the Transformers pipeline abstraction, or by leveraging the Auto classes with the `generate()` function. Let's see examples of both.
310
+
311
+ #### Transformers pipeline
312
+
313
+ ```python
314
+ import transformers
315
+ import torch
316
+
317
+ model_id = ""meta-llama/Meta-Llama-3-8B-Instruct""
318
+
319
+ pipeline = transformers.pipeline(
320
+ ""text-generation"",
321
+ model=model_id,
322
+ model_kwargs={""torch_dtype"": torch.bfloat16},
323
+ device_map=""auto"",
324
+ )
325
+
326
+ messages = [
327
+ {""role"": ""system"", ""content"": ""You are a pirate chatbot who always responds in pirate speak!""},
328
+ {""role"": ""user"", ""content"": ""Who are you?""},
329
+ ]
330
+
331
+ terminators = [
332
+ pipeline.tokenizer.eos_token_id,
333
+ pipeline.tokenizer.convert_tokens_to_ids(""<|eot_id|>"")
334
+ ]
335
+
336
+ outputs = pipeline(
337
+ messages,
338
+ max_new_tokens=256,
339
+ eos_token_id=terminators,
340
+ do_sample=True,
341
+ temperature=0.6,
342
+ top_p=0.9,
343
+ )
344
+ print(outputs[0][""generated_text""][-1])
345
+ ```
346
+
347
+ #### Transformers AutoModelForCausalLM
348
+
349
+ ```python
350
+ from transformers import AutoTokenizer, AutoModelForCausalLM
351
+ import torch
352
+
353
+ model_id = ""meta-llama/Meta-Llama-3-8B-Instruct""
354
+
355
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
356
+ model = AutoModelForCausalLM.from_pretrained(
357
+ model_id,
358
+ torch_dtype=torch.bfloat16,
359
+ device_map=""auto"",
360
+ )
361
+
362
+ messages = [
363
+ {""role"": ""system"", ""content"": ""You are a pirate chatbot who always responds in pirate speak!""},
364
+ {""role"": ""user"", ""content"": ""Who are you?""},
365
+ ]
366
+
367
+ input_ids = tokenizer.apply_chat_template(
368
+ messages,
369
+ add_generation_prompt=True,
370
+ return_tensors=""pt""
371
+ ).to(model.device)
372
+
373
+ terminators = [
374
+ tokenizer.eos_token_id,
375
+ tokenizer.convert_tokens_to_ids(""<|eot_id|>"")
376
+ ]
377
+
378
+ outputs = model.generate(
379
+ input_ids,
380
+ max_new_tokens=256,
381
+ eos_token_id=terminators,
382
+ do_sample=True,
383
+ temperature=0.6,
384
+ top_p=0.9,
385
+ )
386
+ response = outputs[0][input_ids.shape[-1]:]
387
+ print(tokenizer.decode(response, skip_special_tokens=True))
388
+ ```
389
+
390
+
391
+ ### Use with `llama3`
392
+
393
+ Please, follow the instructions in the [repository](https://github.com/meta-llama/llama3)
394
+
395
+ To download Original checkpoints, see the example command below leveraging `huggingface-cli`:
396
+
397
+ ```
398
+ huggingface-cli download meta-llama/Meta-Llama-3-8B-Instruct --include ""original/*"" --local-dir Meta-Llama-3-8B-Instruct
399
+ ```
400
+
401
+ For Hugging Face support, we recommend using transformers or TGI, but a similar command works.
402
+
403
+ ## Hardware and Software
404
+
405
+ **Training Factors** We used custom training libraries, Meta's Research SuperCluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
406
+
407
+ **Carbon Footprint Pretraining utilized a cumulative** 7.7M GPU hours of computation on hardware of type H100-80GB (TDP of 700W). Estimated total emissions were 2290 tCO2eq, 100% of which were offset by Meta’s sustainability program.
408
+
409
+
410
+ <table>
411
+ <tr>
412
+ <td>
413
+ </td>
414
+ <td><strong>Time (GPU hours)</strong>
415
+ </td>
416
+ <td><strong>Power Consumption (W)</strong>
417
+ </td>
418
+ <td><strong>Carbon Emitted(tCO2eq)</strong>
419
+ </td>
420
+ </tr>
421
+ <tr>
422
+ <td>Llama 3 8B
423
+ </td>
424
+ <td>1.3M
425
+ </td>
426
+ <td>700
427
+ </td>
428
+ <td>390
429
+ </td>
430
+ </tr>
431
+ <tr>
432
+ <td>Llama 3 70B
433
+ </td>
434
+ <td>6.4M
435
+ </td>
436
+ <td>700
437
+ </td>
438
+ <td>1900
439
+ </td>
440
+ </tr>
441
+ <tr>
442
+ <td>Total
443
+ </td>
444
+ <td>7.7M
445
+ </td>
446
+ <td>
447
+ </td>
448
+ <td>2290
449
+ </td>
450
+ </tr>
451
+ </table>
452
+
453
+
454
+
455
+ **CO2 emissions during pre-training**. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
456
+
457
+
458
+ ## Training Data
459
+
460
+ **Overview** Llama 3 was pretrained on over 15 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over 10M human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
461
+
462
+ **Data Freshness** The pretraining data has a cutoff of March 2023 for the 8B and December 2023 for the 70B models respectively.
463
+
464
+
465
+ ## Benchmarks
466
+
467
+ In this section, we report the results for Llama 3 models on standard automatic benchmarks. For all the evaluations, we use our internal evaluations library. For details on the methodology see [here](https://github.com/meta-llama/llama3/blob/main/eval_methodology.md).
468
+
469
+
470
+ ### Base pretrained models
471
+
472
+
473
+ <table>
474
+ <tr>
475
+ <td><strong>Category</strong>
476
+ </td>
477
+ <td><strong>Benchmark</strong>
478
+ </td>
479
+ <td><strong>Llama 3 8B</strong>
480
+ </td>
481
+ <td><strong>Llama2 7B</strong>
482
+ </td>
483
+ <td><strong>Llama2 13B</strong>
484
+ </td>
485
+ <td><strong>Llama 3 70B</strong>
486
+ </td>
487
+ <td><strong>Llama2 70B</strong>
488
+ </td>
489
+ </tr>
490
+ <tr>
491
+ <td rowspan=""6"" >General
492
+ </td>
493
+ <td>MMLU (5-shot)
494
+ </td>
495
+ <td>66.6
496
+ </td>
497
+ <td>45.7
498
+ </td>
499
+ <td>53.8
500
+ </td>
501
+ <td>79.5
502
+ </td>
503
+ <td>69.7
504
+ </td>
505
+ </tr>
506
+ <tr>
507
+ <td>AGIEval English (3-5 shot)
508
+ </td>
509
+ <td>45.9
510
+ </td>
511
+ <td>28.8
512
+ </td>
513
+ <td>38.7
514
+ </td>
515
+ <td>63.0
516
+ </td>
517
+ <td>54.8
518
+ </td>
519
+ </tr>
520
+ <tr>
521
+ <td>CommonSenseQA (7-shot)
522
+ </td>
523
+ <td>72.6
524
+ </td>
525
+ <td>57.6
526
+ </td>
527
+ <td>67.6
528
+ </td>
529
+ <td>83.8
530
+ </td>
531
+ <td>78.7
532
+ </td>
533
+ </tr>
534
+ <tr>
535
+ <td>Winogrande (5-shot)
536
+ </td>
537
+ <td>76.1
538
+ </td>
539
+ <td>73.3
540
+ </td>
541
+ <td>75.4
542
+ </td>
543
+ <td>83.1
544
+ </td>
545
+ <td>81.8
546
+ </td>
547
+ </tr>
548
+ <tr>
549
+ <td>BIG-Bench Hard (3-shot, CoT)
550
+ </td>
551
+ <td>61.1
552
+ </td>
553
+ <td>38.1
554
+ </td>
555
+ <td>47.0
556
+ </td>
557
+ <td>81.3
558
+ </td>
559
+ <td>65.7
560
+ </td>
561
+ </tr>
562
+ <tr>
563
+ <td>ARC-Challenge (25-shot)
564
+ </td>
565
+ <td>78.6
566
+ </td>
567
+ <td>53.7
568
+ </td>
569
+ <td>67.6
570
+ </td>
571
+ <td>93.0
572
+ </td>
573
+ <td>85.3
574
+ </td>
575
+ </tr>
576
+ <tr>
577
+ <td>Knowledge reasoning
578
+ </td>
579
+ <td>TriviaQA-Wiki (5-shot)
580
+ </td>
581
+ <td>78.5
582
+ </td>
583
+ <td>72.1
584
+ </td>
585
+ <td>79.6
586
+ </td>
587
+ <td>89.7
588
+ </td>
589
+ <td>87.5
590
+ </td>
591
+ </tr>
592
+ <tr>
593
+ <td rowspan=""4"" >Reading comprehension
594
+ </td>
595
+ <td>SQuAD (1-shot)
596
+ </td>
597
+ <td>76.4
598
+ </td>
599
+ <td>72.2
600
+ </td>
601
+ <td>72.1
602
+ </td>
603
+ <td>85.6
604
+ </td>
605
+ <td>82.6
606
+ </td>
607
+ </tr>
608
+ <tr>
609
+ <td>QuAC (1-shot, F1)
610
+ </td>
611
+ <td>44.4
612
+ </td>
613
+ <td>39.6
614
+ </td>
615
+ <td>44.9
616
+ </td>
617
+ <td>51.1
618
+ </td>
619
+ <td>49.4
620
+ </td>
621
+ </tr>
622
+ <tr>
623
+ <td>BoolQ (0-shot)
624
+ </td>
625
+ <td>75.7
626
+ </td>
627
+ <td>65.5
628
+ </td>
629
+ <td>66.9
630
+ </td>
631
+ <td>79.0
632
+ </td>
633
+ <td>73.1
634
+ </td>
635
+ </tr>
636
+ <tr>
637
+ <td>DROP (3-shot, F1)
638
+ </td>
639
+ <td>58.4
640
+ </td>
641
+ <td>37.9
642
+ </td>
643
+ <td>49.8
644
+ </td>
645
+ <td>79.7
646
+ </td>
647
+ <td>70.2
648
+ </td>
649
+ </tr>
650
+ </table>
651
+
652
+
653
+
654
+ ### Instruction tuned models
655
+
656
+
657
+ <table>
658
+ <tr>
659
+ <td><strong>Benchmark</strong>
660
+ </td>
661
+ <td><strong>Llama 3 8B</strong>
662
+ </td>
663
+ <td><strong>Llama 2 7B</strong>
664
+ </td>
665
+ <td><strong>Llama 2 13B</strong>
666
+ </td>
667
+ <td><strong>Llama 3 70B</strong>
668
+ </td>
669
+ <td><strong>Llama 2 70B</strong>
670
+ </td>
671
+ </tr>
672
+ <tr>
673
+ <td>MMLU (5-shot)
674
+ </td>
675
+ <td>68.4
676
+ </td>
677
+ <td>34.1
678
+ </td>
679
+ <td>47.8
680
+ </td>
681
+ <td>82.0
682
+ </td>
683
+ <td>52.9
684
+ </td>
685
+ </tr>
686
+ <tr>
687
+ <td>GPQA (0-shot)
688
+ </td>
689
+ <td>34.2
690
+ </td>
691
+ <td>21.7
692
+ </td>
693
+ <td>22.3
694
+ </td>
695
+ <td>39.5
696
+ </td>
697
+ <td>21.0
698
+ </td>
699
+ </tr>
700
+ <tr>
701
+ <td>HumanEval (0-shot)
702
+ </td>
703
+ <td>62.2
704
+ </td>
705
+ <td>7.9
706
+ </td>
707
+ <td>14.0
708
+ </td>
709
+ <td>81.7
710
+ </td>
711
+ <td>25.6
712
+ </td>
713
+ </tr>
714
+ <tr>
715
+ <td>GSM-8K (8-shot, CoT)
716
+ </td>
717
+ <td>79.6
718
+ </td>
719
+ <td>25.7
720
+ </td>
721
+ <td>77.4
722
+ </td>
723
+ <td>93.0
724
+ </td>
725
+ <td>57.5
726
+ </td>
727
+ </tr>
728
+ <tr>
729
+ <td>MATH (4-shot, CoT)
730
+ </td>
731
+ <td>30.0
732
+ </td>
733
+ <td>3.8
734
+ </td>
735
+ <td>6.7
736
+ </td>
737
+ <td>50.4
738
+ </td>
739
+ <td>11.6
740
+ </td>
741
+ </tr>
742
+ </table>
743
+
744
+
745
+
746
+ ### Responsibility & Safety
747
+
748
+ We believe that an open approach to AI leads to better, safer products, faster innovation, and a bigger overall market. We are committed to Responsible AI development and took a series of steps to limit misuse and harm and support the open source community.
749
+
750
+ Foundation models are widely capable technologies that are built to be used for a diverse range of applications. They are not designed to meet every developer preference on safety levels for all use cases, out-of-the-box, as those by their nature will differ across different applications.
751
+
752
+ Rather, responsible LLM-application deployment is achieved by implementing a series of safety best practices throughout the development of such applications, from the model pre-training, fine-tuning and the deployment of systems composed of safeguards to tailor the safety needs specifically to the use case and audience.
753
+
754
+
755
+ As part of the Llama 3 release, we updated our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide/) to outline the steps and best practices for developers to implement model and system level safety for their application. We also provide a set of resources including [Meta Llama Guard 2](https://llama.meta.com/purple-llama/) and [Code Shield](https://llama.meta.com/purple-llama/) safeguards. These tools have proven to drastically reduce residual risks of LLM Systems, while maintaining a high level of helpfulness. We encourage developers to tune and deploy these safeguards according to their needs and we provide a [reference implementation](https://github.com/meta-llama/llama-recipes/tree/main/recipes/responsible_ai) to get you started.
756
+
757
+
758
+ #### Llama 3-Instruct
759
+
760
+ As outlined in the Responsible Use Guide, some trade-off between model helpfulness and model alignment is likely unavoidable. Developers should exercise discretion about how to weigh the benefits of alignment and helpfulness for their specific use case and audience. Developers should be mindful of residual risks when using Llama models and leverage additional safety tools as needed to reach the right safety bar for their use case.
761
+
762
+ <span style=""text-decoration:underline;"">Safety</span>
763
+
764
+ For our instruction tuned model, we conducted extensive red teaming exercises, performed adversarial evaluations and implemented safety mitigations techniques to lower residual risks. As with any Large Language Model, residual risks will likely remain and we recommend that developers assess these risks in the context of their use case. In parallel, we are working with the community to make AI safety benchmark standards transparent, rigorous and interpretable.
765
+
766
+ <span style=""text-decoration:underline;"">Refusals</span>
767
+
768
+ In addition to residual risks, we put a great emphasis on model refusals to benign prompts. Over-refusing not only can impact the user experience but could even be harmful in certain contexts as well. We’ve heard the feedback from the developer community and improved our fine tuning to ensure that Llama 3 is significantly less likely to falsely refuse to answer prompts than Llama 2.
769
+
770
+ We built internal benchmarks and developed mitigations to limit false refusals making Llama 3 our most helpful model to date.
771
+
772
+
773
+ #### Responsible release
774
+
775
+ In addition to responsible use considerations outlined above, we followed a rigorous process that requires us to take extra measures against misuse and critical risks before we make our release decision.
776
+
777
+ Misuse
778
+
779
+ If you access or use Llama 3, you agree to the Acceptable Use Policy. The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy/](https://llama.meta.com/llama3/use-policy/).
780
+
781
+
782
+ #### Critical risks
783
+
784
+ <span style=""text-decoration:underline;"">CBRNE</span> (Chemical, Biological, Radiological, Nuclear, and high yield Explosives)
785
+
786
+ We have conducted a two fold assessment of the safety of the model in this area:
787
+
788
+
789
+
790
+ * Iterative testing during model training to assess the safety of responses related to CBRNE threats and other adversarial risks.
791
+ * Involving external CBRNE experts to conduct an uplift test assessing the ability of the model to accurately provide expert knowledge and reduce barriers to potential CBRNE misuse, by reference to what can be achieved using web search (without the model).
792
+
793
+
794
+ ### <span style=""text-decoration:underline;"">Cyber Security </span>
795
+
796
+ We have evaluated Llama 3 with CyberSecEval, Meta’s cybersecurity safety eval suite, measuring Llama 3’s propensity to suggest insecure code when used as a coding assistant, and Llama 3’s propensity to comply with requests to help carry out cyber attacks, where attacks are defined by the industry standard MITRE ATT&CK cyber attack ontology. On our insecure coding and cyber attacker helpfulness tests, Llama 3 behaved in the same range or safer than models of [equivalent coding capability](https://huggingface.co/spaces/facebook/CyberSecEval).
797
+
798
+
799
+ ### <span style=""text-decoration:underline;"">Child Safety</span>
800
+
801
+ Child Safety risk assessments were conducted using a team of experts, to assess the model’s capability to produce outputs that could result in Child Safety risks and inform on any necessary and appropriate risk mitigations via fine tuning. We leveraged those expert red teaming sessions to expand the coverage of our evaluation benchmarks through Llama 3 model development. For Llama 3, we conducted new in-depth sessions using objective based methodologies to assess the model risks along multiple attack vectors. We also partnered with content specialists to perform red teaming exercises assessing potentially violating content while taking account of market specific nuances or experiences.
802
+
803
+
804
+ ### Community
805
+
806
+ Generative AI safety requires expertise and tooling, and we believe in the strength of the open community to accelerate its progress. We are active members of open consortiums, including the AI Alliance, Partnership in AI and MLCommons, actively contributing to safety standardization and transparency. We encourage the community to adopt taxonomies like the MLCommons Proof of Concept evaluation to facilitate collaboration and transparency on safety and content evaluations. Our Purple Llama tools are open sourced for the community to use and widely distributed across ecosystem partners including cloud service providers. We encourage community contributions to our [Github repository](https://github.com/meta-llama/PurpleLlama).
807
+
808
+ Finally, we put in place a set of resources including an [output reporting mechanism](https://developers.facebook.com/llama_output_feedback) and [bug bounty program](https://www.facebook.com/whitehat) to continuously improve the Llama technology with the help of the community.
809
+
810
+
811
+ ## Ethical Considerations and Limitations
812
+
813
+ The core values of Llama 3 are openness, inclusivity and helpfulness. It is meant to serve everyone, and to work for a wide range of use cases. It is thus designed to be accessible to people across many different backgrounds, experiences and perspectives. Llama 3 addresses users and their needs as they are, without insertion unnecessary judgment or normativity, while reflecting the understanding that even content that may appear problematic in some cases can serve valuable purposes in others. It respects the dignity and autonomy of all users, especially in terms of the values of free thought and expression that power innovation and progress.
814
+
815
+ But Llama 3 is a new technology, and like any new technology, there are risks associated with its use. Testing conducted to date has been in English, and has not covered, nor could it cover, all scenarios. For these reasons, as with all LLMs, Llama 3’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 3 models, developers should perform safety testing and tuning tailored to their specific applications of the model. As outlined in the Responsible Use Guide, we recommend incorporating [Purple Llama](https://github.com/facebookresearch/PurpleLlama) solutions into your workflows and specifically [Llama Guard](https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/) which provides a base model to filter input and output prompts to layer system-level safety on top of model-level safety.
816
+
817
+ Please see the Responsible Use Guide available at [http://llama.meta.com/responsible-use-guide](http://llama.meta.com/responsible-use-guide)
818
+
819
+
820
+ ## Citation instructions
821
+
822
+ @article{llama3modelcard,
823
+
824
+ title={Llama 3 Model Card},
825
+
826
+ author={AI@Meta},
827
+
828
+ year={2024},
829
+
830
+ url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md}
831
+
832
+ }
833
+
834
+ ## Contributors
835
+
836
+ Aaditya Singh; Aaron Grattafiori; Abhimanyu Dubey; Abhinav Jauhri; Abhinav Pandey; Abhishek Kadian; Adam Kelsey; Adi Gangidi; Ahmad Al-Dahle; Ahuva Goldstand; Aiesha Letman; Ajay Menon; Akhil Mathur; Alan Schelten; Alex Vaughan; Amy Yang; Andrei Lupu; Andres Alvarado; Andrew Gallagher; Andrew Gu; Andrew Ho; Andrew Poulton; Andrew Ryan; Angela Fan; Ankit Ramchandani; Anthony Hartshorn; Archi Mitra; Archie Sravankumar; Artem Korenev; Arun Rao; Ashley Gabriel; Ashwin Bharambe; Assaf Eisenman; Aston Zhang; Aurelien Rodriguez; Austen Gregerson; Ava Spataru; Baptiste Roziere; Ben Maurer; Benjamin Leonhardi; Bernie Huang; Bhargavi Paranjape; Bing Liu; Binh Tang; Bobbie Chern; Brani Stojkovic; Brian Fuller; Catalina Mejia Arenas; Chao Zhou; Charlotte Caucheteux; Chaya Nayak; Ching-Hsiang Chu; Chloe Bi; Chris Cai; Chris Cox; Chris Marra; Chris McConnell; Christian Keller; Christoph Feichtenhofer; Christophe Touret; Chunyang Wu; Corinne Wong; Cristian Canton Ferrer; Damien Allonsius; Daniel Kreymer; Daniel Haziza; Daniel Li; Danielle Pintz; Danny Livshits; Danny Wyatt; David Adkins; David Esiobu; David Xu; Davide Testuggine; Delia David; Devi Parikh; Dhruv Choudhary; Dhruv Mahajan; Diana Liskovich; Diego Garcia-Olano; Diego Perino; Dieuwke Hupkes; Dingkang Wang; Dustin Holland; Egor Lakomkin; Elina Lobanova; Xiaoqing Ellen Tan; Emily Dinan; Eric Smith; Erik Brinkman; Esteban Arcaute; Filip Radenovic; Firat Ozgenel; Francesco Caggioni; Frank Seide; Frank Zhang; Gabriel Synnaeve; Gabriella Schwarz; Gabrielle Lee; Gada Badeer; Georgia Anderson; Graeme Nail; Gregoire Mialon; Guan Pang; Guillem Cucurell; Hailey Nguyen; Hannah Korevaar; Hannah Wang; Haroun Habeeb; Harrison Rudolph; Henry Aspegren; Hu Xu; Hugo Touvron; Iga Kozlowska; Igor Molybog; Igor Tufanov; Iliyan Zarov; Imanol Arrieta Ibarra; Irina-Elena Veliche; Isabel Kloumann; Ishan Misra; Ivan Evtimov; Jacob Xu; Jade Copet; Jake Weissman; Jan Geffert; Jana Vranes; Japhet Asher; Jason Park; Jay Mahadeokar; Jean-Baptiste Gaya; Jeet Shah; Jelmer van der Linde; Jennifer Chan; Jenny Hong; Jenya Lee; Jeremy Fu; Jeremy Teboul; Jianfeng Chi; Jianyu Huang; Jie Wang; Jiecao Yu; Joanna Bitton; Joe Spisak; Joelle Pineau; Jon Carvill; Jongsoo Park; Joseph Rocca; Joshua Johnstun; Junteng Jia; Kalyan Vasuden Alwala; Kam Hou U; Kate Plawiak; Kartikeya Upasani; Kaushik Veeraraghavan; Ke Li; Kenneth Heafield; Kevin Stone; Khalid El-Arini; Krithika Iyer; Kshitiz Malik; Kuenley Chiu; Kunal Bhalla; Kyle Huang; Lakshya Garg; Lauren Rantala-Yeary; Laurens van der Maaten; Lawrence Chen; Leandro Silva; Lee Bell; Lei Zhang; Liang Tan; Louis Martin; Lovish Madaan; Luca Wehrstedt; Lukas Blecher; Luke de Oliveira; Madeline Muzzi; Madian Khabsa; Manav Avlani; Mannat Singh; Manohar Paluri; Mark Zuckerberg; Marcin Kardas; Martynas Mankus; Mathew Oldham; Mathieu Rita; Matthew Lennie; Maya Pavlova; Meghan Keneally; Melanie Kambadur; Mihir Patel; Mikayel Samvelyan; Mike Clark; Mike Lewis; Min Si; Mitesh Kumar Singh; Mo Metanat; Mona Hassan; Naman Goyal; Narjes Torabi; Nicolas Usunier; Nikolay Bashlykov; Nikolay Bogoychev; Niladri Chatterji; Ning Dong; Oliver Aobo Yang; Olivier Duchenne; Onur Celebi; Parth Parekh; Patrick Alrassy; Paul Saab; Pavan Balaji; Pedro Rittner; Pengchuan Zhang; Pengwei Li; Petar Vasic; Peter Weng; Polina Zvyagina; Prajjwal Bhargava; Pratik Dubal; Praveen Krishnan; Punit Singh Koura; Qing He; Rachel Rodriguez; Ragavan Srinivasan; Rahul Mitra; Ramon Calderer; Raymond Li; Robert Stojnic; Roberta Raileanu; Robin Battey; Rocky Wang; Rohit Girdhar; Rohit Patel; Romain Sauvestre; Ronnie Polidoro; Roshan Sumbaly; Ross Taylor; Ruan Silva; Rui Hou; Rui Wang; Russ Howes; Ruty Rinott; Saghar Hosseini; Sai Jayesh Bondu; Samyak Datta; Sanjay Singh; Sara Chugh; Sargun Dhillon; Satadru Pan; Sean Bell; Sergey Edunov; Shaoliang Nie; Sharan Narang; Sharath Raparthy; Shaun Lindsay; Sheng Feng; Sheng Shen; Shenghao Lin; Shiva Shankar; Shruti Bhosale; Shun Zhang; Simon Vandenhende; Sinong Wang; Seohyun Sonia Kim; Soumya Batra; Sten Sootla; Steve Kehoe; Suchin Gururangan; Sumit Gupta; Sunny Virk; Sydney Borodinsky; Tamar Glaser; Tamar Herman; Tamara Best; Tara Fowler; Thomas Georgiou; Thomas Scialom; Tianhe Li; Todor Mihaylov; Tong Xiao; Ujjwal Karn; Vedanuj Goswami; Vibhor Gupta; Vignesh Ramanathan; Viktor Kerkez; Vinay Satish Kumar; Vincent Gonguet; Vish Vogeti; Vlad Poenaru; Vlad Tiberiu Mihailescu; Vladan Petrovic; Vladimir Ivanov; Wei Li; Weiwei Chu; Wenhan Xiong; Wenyin Fu; Wes Bouaziz; Whitney Meers; Will Constable; Xavier Martinet; Xiaojian Wu; Xinbo Gao; Xinfeng Xie; Xuchao Jia; Yaelle Goldschlag; Yann LeCun; Yashesh Gaur; Yasmine Babaei; Ye Qi; Yenda Li; Yi Wen; Yiwen Song; Youngjin Nam; Yuchen Hao; Yuchen Zhang; Yun Wang; Yuning Mao; Yuzi He; Zacharie Delpierre Coudert; Zachary DeVito; Zahra Hankir; Zhaoduo Wen; Zheng Yan; Zhengxing Chen; Zhenyu Yang; Zoe Papakipos
837
+
838
+ ","{""id"": ""QuantFactory/Meta-Llama-3-8B-Instruct-GGUF"", ""author"": ""QuantFactory"", ""sha"": ""86e0c07efa3f1b6f06ea13e31b1e930dce865ae4"", ""last_modified"": ""2024-09-05 16:37:24+00:00"", ""created_at"": ""2024-04-18 17:03:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12890, ""downloads_all_time"": null, ""likes"": 297, ""library_name"": null, ""gguf"": {""total"": 8030261248, ""architecture"": ""llama"", ""context_length"": 8192, ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""bos_token"": ""<|begin_of_text|>"", ""eos_token"": ""<|eot_id|>""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""gguf"", ""facebook"", ""meta"", ""pytorch"", ""llama"", ""llama-3"", ""text-generation"", ""en"", ""license:llama3"", ""endpoints_compatible"", ""region:us"", ""conversational""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: llama3\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-3\nextra_gated_prompt: \""### META LLAMA 3 COMMUNITY LICENSE AGREEMENT\\nMeta Llama 3 Version\\\n \\ Release Date: April 18, 2024\\n\\\""Agreement\\\"" means the terms and conditions for\\\n \\ use, reproduction, distribution and modification of the Llama Materials set forth\\\n \\ herein.\\n\\\""Documentation\\\"" means the specifications, manuals and documentation\\\n \\ accompanying Meta Llama 3 distributed by Meta at https://llama.meta.com/get-started/.\\n\\\n \\\""Licensee\\\"" or \\\""you\\\"" means you, or your employer or any other person or entity\\\n \\ (if you are entering into this Agreement on such person or entity\u2019s behalf), of\\\n \\ the age required under applicable laws, rules or regulations to provide legal\\\n \\ consent and that has legal authority to bind your employer or such other person\\\n \\ or entity if you are entering in this Agreement on their behalf.\\n\\\""Meta Llama\\\n \\ 3\\\"" means the foundational large language models and software and algorithms,\\\n \\ including machine-learning model code, trained model weights, inference-enabling\\\n \\ code, training-enabling code, fine-tuning enabling code and other elements of\\\n \\ the foregoing distributed by Meta at https://llama.meta.com/llama-downloads.\\n\\\n \\\""Llama Materials\\\"" means, collectively, Meta\u2019s proprietary Meta Llama 3 and Documentation\\\n \\ (and any portion thereof) made available under this Agreement.\\n\\\""Meta\\\"" or \\\""\\\n we\\\"" means Meta Platforms Ireland Limited (if you are located in or, if you are\\\n \\ an entity, your principal place of business is in the EEA or Switzerland) and\\\n \\ Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).\\n\\\n \\ \\n1. License Rights and Redistribution.\\na. Grant of Rights. You are granted\\\n \\ a non-exclusive, worldwide, non-transferable and royalty-free limited license\\\n \\ under Meta\u2019s intellectual property or other rights owned by Meta embodied in the\\\n \\ Llama Materials to use, reproduce, distribute, copy, create derivative works of,\\\n \\ and make modifications to the Llama Materials.\\nb. Redistribution and Use.\\ni.\\\n \\ If you distribute or make available the Llama Materials (or any derivative works\\\n \\ thereof), or a product or service that uses any of them, including another AI\\\n \\ model, you shall (A) provide a copy of this Agreement with any such Llama Materials;\\\n \\ and (B) prominently display \u201cBuilt with Meta Llama 3\u201d on a related website, user\\\n \\ interface, blogpost, about page, or product documentation. If you use the Llama\\\n \\ Materials to create, train, fine tune, or otherwise improve an AI model, which\\\n \\ is distributed or made available, you shall also include \u201cLlama 3\u201d at the beginning\\\n \\ of any such AI model name.\\nii. If you receive Llama Materials, or any derivative\\\n \\ works thereof, from a Licensee as part of an integrated end user product, then\\\n \\ Section 2 of this Agreement will not apply to you.\\niii. You must retain in all\\\n \\ copies of the Llama Materials that you distribute the following attribution notice\\\n \\ within a \u201cNotice\u201d text file distributed as a part of such copies: \u201cMeta Llama\\\n \\ 3 is licensed under the Meta Llama 3 Community License, Copyright \u00a9 Meta Platforms,\\\n \\ Inc. All Rights Reserved.\u201d\\niv. Your use of the Llama Materials must comply with\\\n \\ applicable laws and regulations (including trade compliance laws and regulations)\\\n \\ and adhere to the Acceptable Use Policy for the Llama Materials (available at\\\n \\ https://llama.meta.com/llama3/use-policy), which is hereby incorporated by reference\\\n \\ into this Agreement.\\nv. You will not use the Llama Materials or any output or\\\n \\ results of the Llama Materials to improve any other large language model (excluding\\\n \\ Meta Llama 3 or derivative works thereof).\\n2. Additional Commercial Terms. If,\\\n \\ on the Meta Llama 3 version release date, the monthly active users of the products\\\n \\ or services made available by or for Licensee, or Licensee\u2019s affiliates, is greater\\\n \\ than 700 million monthly active users in the preceding calendar month, you must\\\n \\ request a license from Meta, which Meta may grant to you in its sole discretion,\\\n \\ and you are not authorized to exercise any of the rights under this Agreement\\\n \\ unless or until Meta otherwise expressly grants you such rights.\\n3. Disclaimer\\\n \\ of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT\\\n \\ AND RESULTS THEREFROM ARE PROVIDED ON AN \u201cAS IS\u201d BASIS, WITHOUT WARRANTIES OF\\\n \\ ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED,\\\n \\ INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY,\\\n \\ OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING\\\n \\ THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME\\\n \\ ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\\n\\\n 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER\\\n \\ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY,\\\n \\ OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT,\\\n \\ SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META\\\n \\ OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.\\n\\\n 5. Intellectual Property.\\na. No trademark licenses are granted under this Agreement,\\\n \\ and in connection with the Llama Materials, neither Meta nor Licensee may use\\\n \\ any name or mark owned by or associated with the other or any of its affiliates,\\\n \\ except as required for reasonable and customary use in describing and redistributing\\\n \\ the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you\\\n \\ a license to use \u201cLlama 3\u201d (the \u201cMark\u201d) solely as required to comply with the\\\n \\ last sentence of Section 1.b.i. You will comply with Meta\u2019s brand guidelines (currently\\\n \\ accessible at https://about.meta.com/brand/resources/meta/company-brand/ ). All\\\n \\ goodwill arising out of your use of the Mark will inure to the benefit of Meta.\\n\\\n b. Subject to Meta\u2019s ownership of Llama Materials and derivatives made by or for\\\n \\ Meta, with respect to any derivative works and modifications of the Llama Materials\\\n \\ that are made by you, as between you and Meta, you are and will be the owner of\\\n \\ such derivative works and modifications.\\nc. If you institute litigation or other\\\n \\ proceedings against Meta or any entity (including a cross-claim or counterclaim\\\n \\ in a lawsuit) alleging that the Llama Materials or Meta Llama 3 outputs or results,\\\n \\ or any portion of any of the foregoing, constitutes infringement of intellectual\\\n \\ property or other rights owned or licensable by you, then any licenses granted\\\n \\ to you under this Agreement shall terminate as of the date such litigation or\\\n \\ claim is filed or instituted. You will indemnify and hold harmless Meta from and\\\n \\ against any claim by any third party arising out of or related to your use or\\\n \\ distribution of the Llama Materials.\\n6. Term and Termination. The term of this\\\n \\ Agreement will commence upon your acceptance of this Agreement or access to the\\\n \\ Llama Materials and will continue in full force and effect until terminated in\\\n \\ accordance with the terms and conditions herein. Meta may terminate this Agreement\\\n \\ if you are in breach of any term or condition of this Agreement. Upon termination\\\n \\ of this Agreement, you shall delete and cease use of the Llama Materials. Sections\\\n \\ 3, 4 and 7 shall survive the termination of this Agreement.\\n7. Governing Law\\\n \\ and Jurisdiction. This Agreement will be governed and construed under the laws\\\n \\ of the State of California without regard to choice of law principles, and the\\\n \\ UN Convention on Contracts for the International Sale of Goods does not apply\\\n \\ to this Agreement. The courts of California shall have exclusive jurisdiction\\\n \\ of any dispute arising out of this Agreement.\\n### Meta Llama 3 Acceptable Use\\\n \\ Policy\\nMeta is committed to promoting safe and fair use of its tools and features,\\\n \\ including Meta Llama 3. If you access or use Meta Llama 3, you agree to this Acceptable\\\n \\ Use Policy (\u201cPolicy\u201d). The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy](https://llama.meta.com/llama3/use-policy)\\n\\\n #### Prohibited Uses\\nWe want everyone to use Meta Llama 3 safely and responsibly.\\\n \\ You agree you will not use, or allow others to use, Meta Llama 3 to: 1. Violate\\\n \\ the law or others\u2019 rights, including to:\\n 1. Engage in, promote, generate,\\\n \\ contribute to, encourage, plan, incite, or further illegal or unlawful activity\\\n \\ or content, such as:\\n 1. Violence or terrorism\\n 2. Exploitation\\\n \\ or harm to children, including the solicitation, creation, acquisition, or dissemination\\\n \\ of child exploitative content or failure to report Child Sexual Abuse Material\\n\\\n \\ 3. Human trafficking, exploitation, and sexual violence\\n 4. The\\\n \\ illegal distribution of information or materials to minors, including obscene\\\n \\ materials, or failure to employ legally required age-gating in connection with\\\n \\ such information or materials.\\n 5. Sexual solicitation\\n 6. Any\\\n \\ other criminal activity\\n 2. Engage in, promote, incite, or facilitate the\\\n \\ harassment, abuse, threatening, or bullying of individuals or groups of individuals\\n\\\n \\ 3. Engage in, promote, incite, or facilitate discrimination or other unlawful\\\n \\ or harmful conduct in the provision of employment, employment benefits, credit,\\\n \\ housing, other economic benefits, or other essential goods and services\\n 4.\\\n \\ Engage in the unauthorized or unlicensed practice of any profession including,\\\n \\ but not limited to, financial, legal, medical/health, or related professional\\\n \\ practices\\n 5. Collect, process, disclose, generate, or infer health, demographic,\\\n \\ or other sensitive personal or private information about individuals without rights\\\n \\ and consents required by applicable laws\\n 6. Engage in or facilitate any action\\\n \\ or generate any content that infringes, misappropriates, or otherwise violates\\\n \\ any third-party rights, including the outputs or results of any products or services\\\n \\ using the Llama Materials\\n 7. Create, generate, or facilitate the creation\\\n \\ of malicious code, malware, computer viruses or do anything else that could disable,\\\n \\ overburden, interfere with or impair the proper working, integrity, operation\\\n \\ or appearance of a website or computer system\\n2. Engage in, promote, incite,\\\n \\ facilitate, or assist in the planning or development of activities that present\\\n \\ a risk of death or bodily harm to individuals, including use of Meta Llama 3 related\\\n \\ to the following:\\n 1. Military, warfare, nuclear industries or applications,\\\n \\ espionage, use for materials or activities that are subject to the International\\\n \\ Traffic Arms Regulations (ITAR) maintained by the United States Department of\\\n \\ State\\n 2. Guns and illegal weapons (including weapon development)\\n 3.\\\n \\ Illegal drugs and regulated/controlled substances\\n 4. Operation of critical\\\n \\ infrastructure, transportation technologies, or heavy machinery\\n 5. Self-harm\\\n \\ or harm to others, including suicide, cutting, and eating disorders\\n 6. Any\\\n \\ content intended to incite or promote violence, abuse, or any infliction of bodily\\\n \\ harm to an individual\\n3. Intentionally deceive or mislead others, including use\\\n \\ of Meta Llama 3 related to the following:\\n 1. Generating, promoting, or furthering\\\n \\ fraud or the creation or promotion of disinformation\\n 2. Generating, promoting,\\\n \\ or furthering defamatory content, including the creation of defamatory statements,\\\n \\ images, or other content\\n 3. Generating, promoting, or further distributing\\\n \\ spam\\n 4. Impersonating another individual without consent, authorization,\\\n \\ or legal right\\n 5. Representing that the use of Meta Llama 3 or outputs are\\\n \\ human-generated\\n 6. Generating or facilitating false online engagement, including\\\n \\ fake reviews and other means of fake online engagement\\n4. Fail to appropriately\\\n \\ disclose to end users any known dangers of your AI system\\nPlease report any violation\\\n \\ of this Policy, software \u201cbug,\u201d or other problems that could lead to a violation\\\n \\ of this Policy through one of the following means:\\n * Reporting issues with\\\n \\ the model: [https://github.com/meta-llama/llama3](https://github.com/meta-llama/llama3)\\n\\\n \\ * Reporting risky content generated by the model:\\n developers.facebook.com/llama_output_feedback\\n\\\n \\ * Reporting bugs and security concerns: facebook.com/whitehat/info\\n * Reporting\\\n \\ violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: LlamaUseReport@meta.com\""\nextra_gated_fields:\n First Name: text\n Last Name: text\n Date of birth: date_picker\n Country: country\n Affiliation: text\n geo: ip_location\n ? By clicking Submit below I accept the terms of the license and acknowledge that\n the information I provide will be collected stored processed and shared in accordance\n with the Meta Privacy Policy\n : checkbox\nextra_gated_description: The information you provide will be collected, stored, processed\n and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).\nextra_gated_button_content: Submit\nwidget:\n- example_title: Hello\n messages:\n - role: user\n content: Hey my name is Julien! How are you?\n- example_title: Winter holidays\n messages:\n - role: system\n content: You are a helpful and honest assistant. Please, respond concisely and\n truthfully.\n - role: user\n content: Can you recommend a good destination for Winter holidays?\n- example_title: Programming assistant\n messages:\n - role: system\n content: You are a helpful and honest code and programming assistant. Please,\n respond concisely and truthfully.\n - role: user\n content: Write a function that computes the nth fibonacci number.\ninference:\n parameters:\n max_new_tokens: 300\n stop:\n - <|end_of_text|>\n - <|eot_id|>"", ""widget_data"": [{""example_title"": ""Hello"", ""messages"": [{""role"": ""user"", ""content"": ""Hey my name is Julien! How are you?""}]}, {""example_title"": ""Winter holidays"", ""messages"": [{""role"": ""system"", ""content"": ""You are a helpful and honest assistant. Please, respond concisely and truthfully.""}, {""role"": ""user"", ""content"": ""Can you recommend a good destination for Winter holidays?""}]}, {""example_title"": ""Programming assistant"", ""messages"": [{""role"": ""system"", ""content"": ""You are a helpful and honest code and programming assistant. Please, respond concisely and truthfully.""}, {""role"": ""user"", ""content"": ""Write a function that computes the nth fibonacci number.""}]}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q2_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q3_K_L.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q3_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q3_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q4_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q4_1.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q4_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q4_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q5_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q5_1.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q5_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q5_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q6_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Meta-Llama-3-8B-Instruct.Q8_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='USE_POLICY.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [""nakcnx/paotung-llama3"", ""chiranthkg06/sample""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-05 16:37:24+00:00"", ""cardData"": ""language:\n- en\nlicense: llama3\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-3\nextra_gated_prompt: \""### META LLAMA 3 COMMUNITY LICENSE AGREEMENT\\nMeta Llama 3 Version\\\n \\ Release Date: April 18, 2024\\n\\\""Agreement\\\"" means the terms and conditions for\\\n \\ use, reproduction, distribution and modification of the Llama Materials set forth\\\n \\ herein.\\n\\\""Documentation\\\"" means the specifications, manuals and documentation\\\n \\ accompanying Meta Llama 3 distributed by Meta at https://llama.meta.com/get-started/.\\n\\\n \\\""Licensee\\\"" or \\\""you\\\"" means you, or your employer or any other person or entity\\\n \\ (if you are entering into this Agreement on such person or entity\u2019s behalf), of\\\n \\ the age required under applicable laws, rules or regulations to provide legal\\\n \\ consent and that has legal authority to bind your employer or such other person\\\n \\ or entity if you are entering in this Agreement on their behalf.\\n\\\""Meta Llama\\\n \\ 3\\\"" means the foundational large language models and software and algorithms,\\\n \\ including machine-learning model code, trained model weights, inference-enabling\\\n \\ code, training-enabling code, fine-tuning enabling code and other elements of\\\n \\ the foregoing distributed by Meta at https://llama.meta.com/llama-downloads.\\n\\\n \\\""Llama Materials\\\"" means, collectively, Meta\u2019s proprietary Meta Llama 3 and Documentation\\\n \\ (and any portion thereof) made available under this Agreement.\\n\\\""Meta\\\"" or \\\""\\\n we\\\"" means Meta Platforms Ireland Limited (if you are located in or, if you are\\\n \\ an entity, your principal place of business is in the EEA or Switzerland) and\\\n \\ Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).\\n\\\n \\ \\n1. License Rights and Redistribution.\\na. Grant of Rights. You are granted\\\n \\ a non-exclusive, worldwide, non-transferable and royalty-free limited license\\\n \\ under Meta\u2019s intellectual property or other rights owned by Meta embodied in the\\\n \\ Llama Materials to use, reproduce, distribute, copy, create derivative works of,\\\n \\ and make modifications to the Llama Materials.\\nb. Redistribution and Use.\\ni.\\\n \\ If you distribute or make available the Llama Materials (or any derivative works\\\n \\ thereof), or a product or service that uses any of them, including another AI\\\n \\ model, you shall (A) provide a copy of this Agreement with any such Llama Materials;\\\n \\ and (B) prominently display \u201cBuilt with Meta Llama 3\u201d on a related website, user\\\n \\ interface, blogpost, about page, or product documentation. If you use the Llama\\\n \\ Materials to create, train, fine tune, or otherwise improve an AI model, which\\\n \\ is distributed or made available, you shall also include \u201cLlama 3\u201d at the beginning\\\n \\ of any such AI model name.\\nii. If you receive Llama Materials, or any derivative\\\n \\ works thereof, from a Licensee as part of an integrated end user product, then\\\n \\ Section 2 of this Agreement will not apply to you.\\niii. You must retain in all\\\n \\ copies of the Llama Materials that you distribute the following attribution notice\\\n \\ within a \u201cNotice\u201d text file distributed as a part of such copies: \u201cMeta Llama\\\n \\ 3 is licensed under the Meta Llama 3 Community License, Copyright \u00a9 Meta Platforms,\\\n \\ Inc. All Rights Reserved.\u201d\\niv. Your use of the Llama Materials must comply with\\\n \\ applicable laws and regulations (including trade compliance laws and regulations)\\\n \\ and adhere to the Acceptable Use Policy for the Llama Materials (available at\\\n \\ https://llama.meta.com/llama3/use-policy), which is hereby incorporated by reference\\\n \\ into this Agreement.\\nv. You will not use the Llama Materials or any output or\\\n \\ results of the Llama Materials to improve any other large language model (excluding\\\n \\ Meta Llama 3 or derivative works thereof).\\n2. Additional Commercial Terms. If,\\\n \\ on the Meta Llama 3 version release date, the monthly active users of the products\\\n \\ or services made available by or for Licensee, or Licensee\u2019s affiliates, is greater\\\n \\ than 700 million monthly active users in the preceding calendar month, you must\\\n \\ request a license from Meta, which Meta may grant to you in its sole discretion,\\\n \\ and you are not authorized to exercise any of the rights under this Agreement\\\n \\ unless or until Meta otherwise expressly grants you such rights.\\n3. Disclaimer\\\n \\ of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT\\\n \\ AND RESULTS THEREFROM ARE PROVIDED ON AN \u201cAS IS\u201d BASIS, WITHOUT WARRANTIES OF\\\n \\ ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED,\\\n \\ INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY,\\\n \\ OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING\\\n \\ THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME\\\n \\ ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\\n\\\n 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER\\\n \\ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY,\\\n \\ OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT,\\\n \\ SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META\\\n \\ OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.\\n\\\n 5. Intellectual Property.\\na. No trademark licenses are granted under this Agreement,\\\n \\ and in connection with the Llama Materials, neither Meta nor Licensee may use\\\n \\ any name or mark owned by or associated with the other or any of its affiliates,\\\n \\ except as required for reasonable and customary use in describing and redistributing\\\n \\ the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you\\\n \\ a license to use \u201cLlama 3\u201d (the \u201cMark\u201d) solely as required to comply with the\\\n \\ last sentence of Section 1.b.i. You will comply with Meta\u2019s brand guidelines (currently\\\n \\ accessible at https://about.meta.com/brand/resources/meta/company-brand/ ). All\\\n \\ goodwill arising out of your use of the Mark will inure to the benefit of Meta.\\n\\\n b. Subject to Meta\u2019s ownership of Llama Materials and derivatives made by or for\\\n \\ Meta, with respect to any derivative works and modifications of the Llama Materials\\\n \\ that are made by you, as between you and Meta, you are and will be the owner of\\\n \\ such derivative works and modifications.\\nc. If you institute litigation or other\\\n \\ proceedings against Meta or any entity (including a cross-claim or counterclaim\\\n \\ in a lawsuit) alleging that the Llama Materials or Meta Llama 3 outputs or results,\\\n \\ or any portion of any of the foregoing, constitutes infringement of intellectual\\\n \\ property or other rights owned or licensable by you, then any licenses granted\\\n \\ to you under this Agreement shall terminate as of the date such litigation or\\\n \\ claim is filed or instituted. You will indemnify and hold harmless Meta from and\\\n \\ against any claim by any third party arising out of or related to your use or\\\n \\ distribution of the Llama Materials.\\n6. Term and Termination. The term of this\\\n \\ Agreement will commence upon your acceptance of this Agreement or access to the\\\n \\ Llama Materials and will continue in full force and effect until terminated in\\\n \\ accordance with the terms and conditions herein. Meta may terminate this Agreement\\\n \\ if you are in breach of any term or condition of this Agreement. Upon termination\\\n \\ of this Agreement, you shall delete and cease use of the Llama Materials. Sections\\\n \\ 3, 4 and 7 shall survive the termination of this Agreement.\\n7. Governing Law\\\n \\ and Jurisdiction. This Agreement will be governed and construed under the laws\\\n \\ of the State of California without regard to choice of law principles, and the\\\n \\ UN Convention on Contracts for the International Sale of Goods does not apply\\\n \\ to this Agreement. The courts of California shall have exclusive jurisdiction\\\n \\ of any dispute arising out of this Agreement.\\n### Meta Llama 3 Acceptable Use\\\n \\ Policy\\nMeta is committed to promoting safe and fair use of its tools and features,\\\n \\ including Meta Llama 3. If you access or use Meta Llama 3, you agree to this Acceptable\\\n \\ Use Policy (\u201cPolicy\u201d). The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy](https://llama.meta.com/llama3/use-policy)\\n\\\n #### Prohibited Uses\\nWe want everyone to use Meta Llama 3 safely and responsibly.\\\n \\ You agree you will not use, or allow others to use, Meta Llama 3 to: 1. Violate\\\n \\ the law or others\u2019 rights, including to:\\n 1. Engage in, promote, generate,\\\n \\ contribute to, encourage, plan, incite, or further illegal or unlawful activity\\\n \\ or content, such as:\\n 1. Violence or terrorism\\n 2. Exploitation\\\n \\ or harm to children, including the solicitation, creation, acquisition, or dissemination\\\n \\ of child exploitative content or failure to report Child Sexual Abuse Material\\n\\\n \\ 3. Human trafficking, exploitation, and sexual violence\\n 4. The\\\n \\ illegal distribution of information or materials to minors, including obscene\\\n \\ materials, or failure to employ legally required age-gating in connection with\\\n \\ such information or materials.\\n 5. Sexual solicitation\\n 6. Any\\\n \\ other criminal activity\\n 2. Engage in, promote, incite, or facilitate the\\\n \\ harassment, abuse, threatening, or bullying of individuals or groups of individuals\\n\\\n \\ 3. Engage in, promote, incite, or facilitate discrimination or other unlawful\\\n \\ or harmful conduct in the provision of employment, employment benefits, credit,\\\n \\ housing, other economic benefits, or other essential goods and services\\n 4.\\\n \\ Engage in the unauthorized or unlicensed practice of any profession including,\\\n \\ but not limited to, financial, legal, medical/health, or related professional\\\n \\ practices\\n 5. Collect, process, disclose, generate, or infer health, demographic,\\\n \\ or other sensitive personal or private information about individuals without rights\\\n \\ and consents required by applicable laws\\n 6. Engage in or facilitate any action\\\n \\ or generate any content that infringes, misappropriates, or otherwise violates\\\n \\ any third-party rights, including the outputs or results of any products or services\\\n \\ using the Llama Materials\\n 7. Create, generate, or facilitate the creation\\\n \\ of malicious code, malware, computer viruses or do anything else that could disable,\\\n \\ overburden, interfere with or impair the proper working, integrity, operation\\\n \\ or appearance of a website or computer system\\n2. Engage in, promote, incite,\\\n \\ facilitate, or assist in the planning or development of activities that present\\\n \\ a risk of death or bodily harm to individuals, including use of Meta Llama 3 related\\\n \\ to the following:\\n 1. Military, warfare, nuclear industries or applications,\\\n \\ espionage, use for materials or activities that are subject to the International\\\n \\ Traffic Arms Regulations (ITAR) maintained by the United States Department of\\\n \\ State\\n 2. Guns and illegal weapons (including weapon development)\\n 3.\\\n \\ Illegal drugs and regulated/controlled substances\\n 4. Operation of critical\\\n \\ infrastructure, transportation technologies, or heavy machinery\\n 5. Self-harm\\\n \\ or harm to others, including suicide, cutting, and eating disorders\\n 6. Any\\\n \\ content intended to incite or promote violence, abuse, or any infliction of bodily\\\n \\ harm to an individual\\n3. Intentionally deceive or mislead others, including use\\\n \\ of Meta Llama 3 related to the following:\\n 1. Generating, promoting, or furthering\\\n \\ fraud or the creation or promotion of disinformation\\n 2. Generating, promoting,\\\n \\ or furthering defamatory content, including the creation of defamatory statements,\\\n \\ images, or other content\\n 3. Generating, promoting, or further distributing\\\n \\ spam\\n 4. Impersonating another individual without consent, authorization,\\\n \\ or legal right\\n 5. Representing that the use of Meta Llama 3 or outputs are\\\n \\ human-generated\\n 6. Generating or facilitating false online engagement, including\\\n \\ fake reviews and other means of fake online engagement\\n4. Fail to appropriately\\\n \\ disclose to end users any known dangers of your AI system\\nPlease report any violation\\\n \\ of this Policy, software \u201cbug,\u201d or other problems that could lead to a violation\\\n \\ of this Policy through one of the following means:\\n * Reporting issues with\\\n \\ the model: [https://github.com/meta-llama/llama3](https://github.com/meta-llama/llama3)\\n\\\n \\ * Reporting risky content generated by the model:\\n developers.facebook.com/llama_output_feedback\\n\\\n \\ * Reporting bugs and security concerns: facebook.com/whitehat/info\\n * Reporting\\\n \\ violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: LlamaUseReport@meta.com\""\nextra_gated_fields:\n First Name: text\n Last Name: text\n Date of birth: date_picker\n Country: country\n Affiliation: text\n geo: ip_location\n ? By clicking Submit below I accept the terms of the license and acknowledge that\n the information I provide will be collected stored processed and shared in accordance\n with the Meta Privacy Policy\n : checkbox\nextra_gated_description: The information you provide will be collected, stored, processed\n and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).\nextra_gated_button_content: Submit\nwidget:\n- example_title: Hello\n messages:\n - role: user\n content: Hey my name is Julien! How are you?\n- example_title: Winter holidays\n messages:\n - role: system\n content: You are a helpful and honest assistant. Please, respond concisely and\n truthfully.\n - role: user\n content: Can you recommend a good destination for Winter holidays?\n- example_title: Programming assistant\n messages:\n - role: system\n content: You are a helpful and honest code and programming assistant. Please,\n respond concisely and truthfully.\n - role: user\n content: Write a function that computes the nth fibonacci number.\ninference:\n parameters:\n max_new_tokens: 300\n stop:\n - <|end_of_text|>\n - <|eot_id|>"", ""transformersInfo"": null, ""_id"": ""6621526e505072f98f7edc01"", ""modelId"": ""QuantFactory/Meta-Llama-3-8B-Instruct-GGUF"", ""usedStorage"": 360703248416}",0,,0,,0,,0,,0,"chiranthkg06/sample, facebook/CyberSecEval, huggingface/InferenceSupport/discussions/new?title=QuantFactory/Meta-Llama-3-8B-Instruct-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQuantFactory%2FMeta-Llama-3-8B-Instruct-GGUF%5D(%2FQuantFactory%2FMeta-Llama-3-8B-Instruct-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, nakcnx/paotung-llama3",4
Mistral-NeMo-Minitron-8B-Base_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
The diff for this file is too large to render. See raw diff
 
NexusRaven-V2-13B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Nexusflow/NexusRaven-V2-13B,"---
3
+ license: other
4
+ base_model: codellama/CodeLlama-13b-Instruct-hf
5
+ model-index:
6
+ - name: NexusRaven-13B
7
+ results: []
8
+ tags:
9
+ - function calling
10
+ ---
11
+ # NexusRaven-13B: Surpassing GPT-4 for Zero-shot Function Calling
12
+ <p align=""center"">
13
+ <a href=""https://huggingface.co/Nexusflow"" target=""_blank"">Nexusflow HF</a> - <a href=""https://discord.gg/HDSVmNAs3y"" target=""_blank"">Nexusflow Discord</a> - <a href=""http://nexusflow.ai/blogs/ravenv2"" target=""_blank"">NexusRaven-V2 blog post</a> - <a href=""https://colab.research.google.com/drive/19JYixRPPlanmW5q49WYi_tU8rhHeCEKW?usp=sharing"" target=""_blank"">Prompting Notebook CoLab</a> - <a href=""https://huggingface.co/spaces/Nexusflow/Nexus_Function_Calling_Leaderboard"" target=""_blank"">Leaderboard</a> - <a href=""https://huggingface.co/spaces/Nexusflow/NexusRaven-V2-Demo"" target=""_blank"">Read-World Demo</a> - <a href=""https://github.com/nexusflowai/NexusRaven-V2"" target=""_blank"">NexusRaven-V2-13B Github</a>
14
+ </p>
15
+
16
+ <p align=""center"" width=""100%"">
17
+ <a><img src=""NexusRaven.png"" alt=""NexusRaven"" style=""width: 40%; min-width: 300px; display: block; margin: auto;""></a>
18
+ </p>
19
+
20
+ ## Introducing NexusRaven-V2-13B
21
+ NexusRaven is an open-source and commercially viable function calling LLM that surpasses the state-of-the-art in function calling capabilities.
22
+
23
+ 💪 **Versatile Function Calling Capability**: NexusRaven-V2 is capable of generating single function calls, nested calls, and parallel calls in many challenging cases.
24
+
25
+ 🤓 **Fully Explainable**: NexusRaven-V2 is capable of generating very detailed explanations for the function calls it generates. This behavior can be turned off, to save tokens during inference.
26
+
27
+ 📊 **Performance Highlights**: NexusRaven-V2 surpasses GPT-4 by 7% in function calling success rates in human-generated use cases involving nested and composite functions.
28
+
29
+ 🔧 **Generalization to the Unseen**: NexusRaven-V2 has never been trained on the functions used in evaluation.
30
+
31
+ 🔥 **Commercially Permissive**: The training of NexusRaven-V2 does not involve any data generated by proprietary LLMs such as GPT-4. You have full control of the model when deployed in commercial applications.
32
+
33
+ Please checkout the following links!
34
+ - [Prompting Notebook CoLab](https://colab.research.google.com/drive/19JYixRPPlanmW5q49WYi_tU8rhHeCEKW?usp=sharing)
35
+ - [Evaluation Leaderboard](https://huggingface.co/spaces/Nexusflow/Nexus_Function_Calling_Leaderboard)
36
+ - [NexusRaven-V2 Real-World Demo](https://huggingface.co/spaces/Nexusflow/NexusRaven-V2-Demo)
37
+
38
+
39
+ ## NexusRaven-V2 model usage
40
+
41
+ NexusRaven-V2 accepts a list of python functions.
42
+
43
+ These python functions can do anything (including sending GET/POST requests to external APIs!).
44
+
45
+ The two requirements include the python function signature and the appropriate docstring to generate the function call.
46
+
47
+ NexusRaven-V2 also does best on functions with arguments, so please always only provide functions that require arguments to raven.
48
+
49
+ ### NexusRaven-V2's Capabilities
50
+
51
+ NexusRaven-V2 is capable of generating deeply nested function calls, parallel function calls, and simple single calls. It can also justify the function calls it generated. If you would like to generate the call only, please set a stop criteria of \""\<bot\_end\>\"". Otherwise, please allow NexusRaven-V2 to run until its stop token (i.e. ""\<\/s\>"").
52
+
53
+ ### Quick Start Prompting Guide
54
+
55
+ Please refer to our notebook, [How-To-Prompt.ipynb](https://colab.research.google.com/drive/19JYixRPPlanmW5q49WYi_tU8rhHeCEKW?usp=sharing), for more advanced tutorials on using NexusRaven-V2!
56
+
57
+ 1. When giving docstrings to Raven, please provide well-indented, detailed, and well-written docstrings as this can help accuracy.
58
+ 2. Raven does better when all functions provided to it has arguments, either required or optional, (i.e. ```func(dummy_arg)``` is preferred over ```func()```) as this can help accuracy.
59
+ 3. We strongly recommend to set sampling to False when prompting NexusRaven-V2.
60
+ 4. We strongly recommend a very low temperature (~0.001).
61
+ 5. We strongly recommend following the prompting style below.
62
+
63
+ When handling irrelevant user queries, users have noticed that specifying a ""no-op"" function with arguments work best. For example, something like this might work:
64
+ ```python
65
+ def no_relevant_function(user_query : str):
66
+ """"""
67
+ Call this when no other provided function can be called to answer the user query.
68
+
69
+ Args:
70
+ user_query: The user_query that cannot be answered by any other function calls.
71
+ """"""
72
+ ```
73
+
74
+ Please ensure to provide an argument to this function, as Raven works best on functions with arguments.
75
+
76
+ For parallel calls, due to the model being targeted for industry use, you can ""enable"" parallel calls by adding this into the prompt:
77
+ ```python
78
+ ""Setting: Allowed to issue multiple calls with semicolon\n""
79
+ ```
80
+ This can be added above the User Query to ""allow"" the model to use parallel calls, otherwise, the model will focus on nested and single calls primarily.
81
+
82
+ ### Quickstart
83
+ You can run the model on a GPU using the following code.
84
+ ```python
85
+ # Please `pip install transformers accelerate`
86
+ from transformers import pipeline
87
+
88
+
89
+ pipeline = pipeline(
90
+ ""text-generation"",
91
+ model=""Nexusflow/NexusRaven-V2-13B"",
92
+ torch_dtype=""auto"",
93
+ device_map=""auto"",
94
+ )
95
+
96
+ prompt_template = \
97
+ '''
98
+ Function:
99
+ def get_weather_data(coordinates):
100
+ """"""
101
+ Fetches weather data from the Open-Meteo API for the given latitude and longitude.
102
+
103
+ Args:
104
+ coordinates (tuple): The latitude of the location.
105
+
106
+ Returns:
107
+ float: The current temperature in the coordinates you've asked for
108
+ """"""
109
+
110
+ Function:
111
+ def get_coordinates_from_city(city_name):
112
+ """"""
113
+ Fetches the latitude and longitude of a given city name using the Maps.co Geocoding API.
114
+
115
+ Args:
116
+ city_name (str): The name of the city.
117
+
118
+ Returns:
119
+ tuple: The latitude and longitude of the city.
120
+ """"""
121
+
122
+ User Query: {query}<human_end>
123
+
124
+ '''
125
+
126
+ prompt = prompt_template.format(query=""What's the weather like in Seattle right now?"")
127
+
128
+ result = pipeline(prompt, max_new_tokens=2048, return_full_text=False, do_sample=False, temperature=0.001)[0][""generated_text""]
129
+ print (result)
130
+ ```
131
+
132
+ This should generate the following:
133
+ ```
134
+ Call: get_weather_data(coordinates=get_coordinates_from_city(city_name='Seattle'))<bot_end>
135
+ Thought: The function call `get_weather_data(coordinates=get_coordinates_from_city(city_name='Seattle'))` answers the question ""What's the weather like in Seattle right now?"" by following these steps:
136
+
137
+ 1. `get_coordinates_from_city(city_name='Seattle')`: This function call fetches the latitude and longitude of the city ""Seattle"" using the Maps.co Geocoding API.
138
+ 2. `get_weather_data(coordinates=...)`: This function call fetches the current weather data for the coordinates returned by the previous function call.
139
+
140
+ Therefore, the function call `get_weather_data(coordinates=get_coordinates_from_city(city_name='Seattle'))` answers the question ""What's the weather like in Seattle right now?"" by first fetching the coordinates of the city ""Seattle"" and then fetching the current weather data for those coordinates.
141
+ ```
142
+
143
+ If you would like to prevent the generation of the explanation of the function call (for example, to save on inference tokens), please set a stopping criteria of \<bot_end\>.
144
+
145
+ Please follow this prompting template to maximize the performance of RavenV2.
146
+
147
+ ### Using with OpenAI FC Schematics
148
+
149
+ [If you currently have a workflow that is built around OpenAI's function calling and you want to try NexusRaven-V2, we have a package that helps you drop in NexusRaven-V2.](https://github.com/nexusflowai/nexusraven-pip)
150
+
151
+ ### Using With LangChain
152
+
153
+ We've also included a [small demo for using Raven with langchain](langdemo.py)!
154
+
155
+ ## Evaluation
156
+
157
+ <p align=""center"" width=""100%"">
158
+ <a><img src=""blog2-fc.png"" alt=""NexusRaven"" style=""width: 80%; min-width: 300px; display: block; margin: auto;""></a>
159
+ <a><img src=""radar-2.png"" alt=""NexusRaven"" style=""width: 80%; min-width: 300px; display: block; margin: auto;""></a>
160
+ </p>
161
+
162
+ For a deeper dive into the results, please see our [Github README](https://github.com/nexusflowai/NexusRaven).
163
+
164
+ # Limitations
165
+ 1. The model works best when it is connected with a retriever when there are a multitude of functions, as a large number of functions will saturate the context window of this model.
166
+ 2. The model can be prone to generate incorrect calls. Please ensure proper guardrails to capture errant behavior is in place.
167
+ 3. The explanations generated by NexusRaven-V2 might be incorrect. Please ensure proper guardrails are present to capture errant behavior.
168
+
169
+ ## License
170
+ This model was trained on commercially viable data and is licensed under the [Nexusflow community license](https://huggingface.co/Nexusflow/NexusRaven-V2-13B/blob/main/LICENSE.txt).
171
+
172
+
173
+ ## References
174
+ We thank the CodeLlama team for their amazing models!
175
+
176
+ ```
177
+ @misc{rozière2023code,
178
+ title={Code Llama: Open Foundation Models for Code},
179
+ author={Baptiste Rozière and Jonas Gehring and Fabian Gloeckle and Sten Sootla and Itai Gat and Xiaoqing Ellen Tan and Yossi Adi and Jingyu Liu and Tal Remez and Jérémy Rapin and Artyom Kozhevnikov and Ivan Evtimov and Joanna Bitton and Manish Bhatt and Cristian Canton Ferrer and Aaron Grattafiori and Wenhan Xiong and Alexandre Défossez and Jade Copet and Faisal Azhar and Hugo Touvron and Louis Martin and Nicolas Usunier and Thomas Scialom and Gabriel Synnaeve},
180
+ year={2023},
181
+ eprint={2308.12950},
182
+ archivePrefix={arXiv},
183
+ primaryClass={cs.CL}
184
+ }
185
+ ```
186
+
187
+
188
+ ## Citation
189
+ ```
190
+ @misc{nexusraven,
191
+ title={NexusRaven-V2: Surpassing GPT-4 for Zero-shot Function Calling},
192
+ author={Nexusflow.ai team},
193
+ year={2023},
194
+ url={https://nexusflow.ai/blogs/ravenv2}
195
+ }
196
+ ```
197
+
198
+ ## Contact
199
+ Please join our [Discord Channel](https://discord.gg/HDSVmNAs3y) to reach out for any issues and comments!","{""id"": ""Nexusflow/NexusRaven-V2-13B"", ""author"": ""Nexusflow"", ""sha"": ""cdab7132db4a4fd64513123374ea1451d85a7ace"", ""last_modified"": ""2024-05-29 17:03:06+00:00"", ""created_at"": ""2023-12-04 22:06:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3660, ""downloads_all_time"": null, ""likes"": 465, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""function calling"", ""arxiv:2308.12950"", ""base_model:codellama/CodeLlama-13b-Instruct-hf"", ""base_model:finetune:codellama/CodeLlama-13b-Instruct-hf"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: codellama/CodeLlama-13b-Instruct-hf\nlicense: other\ntags:\n- function calling\nmodel-index:\n- name: NexusRaven-13B\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""NexusRaven-13B"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NexusRaven.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='blog2-fc.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='langdemo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='radar-2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""sanjay920/rubra-v0.1-function-calling"", ""Tonic/NexusRaven2"", ""theshresthshukla/Look_at_NexusRaven_V2_13B"", ""PeepDaSlan9/Nexusflow-NexusRaven-V2-13B"", ""dev2learn/Nexusflow-NexusRaven-V2-13B"", ""SethAI/Nexusflow-NexusRaven-V2-13B"", ""ai-on-web3/Nexusflow-NexusRaven-V2-13B"", ""coltgainey/Nexusflow-NexusRaven-V2-13B"", ""FrostSa/Nexusflow-NexusRaven-V2-13B"", ""Tonic1/NexusRaven""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-29 17:03:06+00:00"", ""cardData"": ""base_model: codellama/CodeLlama-13b-Instruct-hf\nlicense: other\ntags:\n- function calling\nmodel-index:\n- name: NexusRaven-13B\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""656e4d813b83d70c30e89c55"", ""modelId"": ""Nexusflow/NexusRaven-V2-13B"", ""usedStorage"": 52068067950}",0,"https://huggingface.co/PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed, https://huggingface.co/PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed, https://huggingface.co/PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed",3,,0,"https://huggingface.co/TheBloke/NexusRaven-V2-13B-GGUF, https://huggingface.co/TheBloke/NexusRaven-V2-13B-AWQ, https://huggingface.co/TheBloke/NexusRaven-V2-13B-GPTQ, https://huggingface.co/PrunaAI/Nexusflow-NexusRaven-V2-13B-bnb-4bit-smashed, https://huggingface.co/QuantFactory/NexusRaven-V2-13B-GGUF, https://huggingface.co/mradermacher/NexusRaven-V2-13B-GGUF, https://huggingface.co/mradermacher/NexusRaven-V2-13B-i1-GGUF, https://huggingface.co/tensorblock/NexusRaven-V2-13B-GGUF, https://huggingface.co/DevQuasar/Nexusflow.NexusRaven-V2-13B-GGUF",9,,0,"FrostSa/Nexusflow-NexusRaven-V2-13B, Nexusflow/NexusRaven-V2-Demo, Nexusflow/Nexus_Function_Calling_Leaderboard, PeepDaSlan9/Nexusflow-NexusRaven-V2-13B, SethAI/Nexusflow-NexusRaven-V2-13B, Tonic/NexusRaven2, Tonic1/NexusRaven, ai-on-web3/Nexusflow-NexusRaven-V2-13B, coltgainey/Nexusflow-NexusRaven-V2-13B, dev2learn/Nexusflow-NexusRaven-V2-13B, huggingface/InferenceSupport/discussions/new?title=Nexusflow/NexusRaven-V2-13B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNexusflow%2FNexusRaven-V2-13B%5D(%2FNexusflow%2FNexusRaven-V2-13B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, sanjay920/rubra-v0.1-function-calling, theshresthshukla/Look_at_NexusRaven_V2_13B",13
200
+ PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed,"---
201
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
202
+ base_model: Nexusflow/NexusRaven-V2-13B
203
+ metrics:
204
+ - memory_disk
205
+ - memory_inference
206
+ - inference_latency
207
+ - inference_throughput
208
+ - inference_CO2_emissions
209
+ - inference_energy_consumption
210
+ tags:
211
+ - pruna-ai
212
+ ---
213
+ <!-- header start -->
214
+ <!-- 200823 -->
215
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
216
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
217
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
218
+ </a>
219
+ </div>
220
+ <!-- header end -->
221
+
222
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
223
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
224
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
225
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
226
+
227
+ # Simply make AI models cheaper, smaller, faster, and greener!
228
+
229
+ - Give a thumbs up if you like this model!
230
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
231
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
232
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
233
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
234
+
235
+ ## Results
236
+
237
+ ![image info](./plots.png)
238
+
239
+ **Frequently Asked Questions**
240
+ - ***How does the compression work?*** The model is compressed with hqq.
241
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
242
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
243
+ - ***What is the model format?*** We use safetensors.
244
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
245
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
246
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
247
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
248
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
249
+
250
+ ## Setup
251
+
252
+ You can run the smashed model with these steps:
253
+
254
+ 0. Check requirements from the original repo Nexusflow/NexusRaven-V2-13B installed. In particular, check python, cuda, and transformers versions.
255
+ 1. Make sure that you have installed quantization related packages.
256
+ ```bash
257
+ pip install hqq
258
+ ```
259
+ 2. Load & run the model.
260
+ ```python
261
+ from transformers import AutoModelForCausalLM, AutoTokenizer
262
+ from hqq.engine.hf import HQQModelForCausalLM
263
+ from hqq.models.hf.base import AutoHQQHFModel
264
+
265
+ try:
266
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed"", device_map='auto')
267
+ except:
268
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed"")
269
+ tokenizer = AutoTokenizer.from_pretrained(""Nexusflow/NexusRaven-V2-13B"")
270
+
271
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
272
+
273
+ outputs = model.generate(input_ids, max_new_tokens=216)
274
+ tokenizer.decode(outputs[0])
275
+ ```
276
+
277
+ ## Configurations
278
+
279
+ The configuration info are in `smash_config.json`.
280
+
281
+ ## Credits & License
282
+
283
+ The license of the smashed model follows the license of the original model. Please check the license of the original model Nexusflow/NexusRaven-V2-13B before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
284
+
285
+ ## Want to compress other models?
286
+
287
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
288
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""422433c53a3651b72a3f1f8c09463baa1fc89a7d"", ""last_modified"": ""2024-08-02 16:17:00+00:00"", ""created_at"": ""2024-06-24 11:05:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""base_model:Nexusflow/NexusRaven-V2-13B"", ""base_model:finetune:Nexusflow/NexusRaven-V2-13B"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Nexusflow/NexusRaven-V2-13B\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 16:17:00+00:00"", ""cardData"": ""base_model: Nexusflow/NexusRaven-V2-13B\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""667952e444eb7b9ee46834d5"", ""modelId"": ""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed"", ""usedStorage"": 2540996618}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2FNexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed%5D(%2FPrunaAI%2FNexusflow-NexusRaven-V2-13B-HQQ-1bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
289
+ PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed,"---
290
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
291
+ base_model: Nexusflow/NexusRaven-V2-13B
292
+ metrics:
293
+ - memory_disk
294
+ - memory_inference
295
+ - inference_latency
296
+ - inference_throughput
297
+ - inference_CO2_emissions
298
+ - inference_energy_consumption
299
+ tags:
300
+ - pruna-ai
301
+ ---
302
+ <!-- header start -->
303
+ <!-- 200823 -->
304
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
305
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
306
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
307
+ </a>
308
+ </div>
309
+ <!-- header end -->
310
+
311
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
312
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
313
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
314
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
315
+
316
+ # Simply make AI models cheaper, smaller, faster, and greener!
317
+
318
+ - Give a thumbs up if you like this model!
319
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
320
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
321
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
322
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
323
+
324
+ ## Results
325
+
326
+ ![image info](./plots.png)
327
+
328
+ **Frequently Asked Questions**
329
+ - ***How does the compression work?*** The model is compressed with hqq.
330
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
331
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
332
+ - ***What is the model format?*** We use safetensors.
333
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
334
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
335
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
336
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
337
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
338
+
339
+ ## Setup
340
+
341
+ You can run the smashed model with these steps:
342
+
343
+ 0. Check requirements from the original repo Nexusflow/NexusRaven-V2-13B installed. In particular, check python, cuda, and transformers versions.
344
+ 1. Make sure that you have installed quantization related packages.
345
+ ```bash
346
+ pip install hqq
347
+ ```
348
+ 2. Load & run the model.
349
+ ```python
350
+ from transformers import AutoModelForCausalLM, AutoTokenizer
351
+ from hqq.engine.hf import HQQModelForCausalLM
352
+ from hqq.models.hf.base import AutoHQQHFModel
353
+
354
+ try:
355
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed"", device_map='auto')
356
+ except:
357
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed"")
358
+ tokenizer = AutoTokenizer.from_pretrained(""Nexusflow/NexusRaven-V2-13B"")
359
+
360
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
361
+
362
+ outputs = model.generate(input_ids, max_new_tokens=216)
363
+ tokenizer.decode(outputs[0])
364
+ ```
365
+
366
+ ## Configurations
367
+
368
+ The configuration info are in `smash_config.json`.
369
+
370
+ ## Credits & License
371
+
372
+ The license of the smashed model follows the license of the original model. Please check the license of the original model Nexusflow/NexusRaven-V2-13B before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
373
+
374
+ ## Want to compress other models?
375
+
376
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
377
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""af32e151087731d9c9b9a694db0b034dba05c8fa"", ""last_modified"": ""2024-08-02 16:17:05+00:00"", ""created_at"": ""2024-06-24 11:07:13+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""base_model:Nexusflow/NexusRaven-V2-13B"", ""base_model:finetune:Nexusflow/NexusRaven-V2-13B"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Nexusflow/NexusRaven-V2-13B\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 16:17:05+00:00"", ""cardData"": ""base_model: Nexusflow/NexusRaven-V2-13B\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""667953616bbc59f3d04d2190"", ""modelId"": ""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed"", ""usedStorage"": 4126467760}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2FNexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed%5D(%2FPrunaAI%2FNexusflow-NexusRaven-V2-13B-HQQ-2bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
378
+ PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed,"---
379
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
380
+ base_model: Nexusflow/NexusRaven-V2-13B
381
+ metrics:
382
+ - memory_disk
383
+ - memory_inference
384
+ - inference_latency
385
+ - inference_throughput
386
+ - inference_CO2_emissions
387
+ - inference_energy_consumption
388
+ tags:
389
+ - pruna-ai
390
+ ---
391
+ <!-- header start -->
392
+ <!-- 200823 -->
393
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
394
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
395
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
396
+ </a>
397
+ </div>
398
+ <!-- header end -->
399
+
400
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
401
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
402
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
403
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
404
+
405
+ # Simply make AI models cheaper, smaller, faster, and greener!
406
+
407
+ - Give a thumbs up if you like this model!
408
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
409
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
410
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
411
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
412
+
413
+ ## Results
414
+
415
+ ![image info](./plots.png)
416
+
417
+ **Frequently Asked Questions**
418
+ - ***How does the compression work?*** The model is compressed with hqq.
419
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
420
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
421
+ - ***What is the model format?*** We use safetensors.
422
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
423
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
424
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
425
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
426
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
427
+
428
+ ## Setup
429
+
430
+ You can run the smashed model with these steps:
431
+
432
+ 0. Check requirements from the original repo Nexusflow/NexusRaven-V2-13B installed. In particular, check python, cuda, and transformers versions.
433
+ 1. Make sure that you have installed quantization related packages.
434
+ ```bash
435
+ pip install hqq
436
+ ```
437
+ 2. Load & run the model.
438
+ ```python
439
+ from transformers import AutoModelForCausalLM, AutoTokenizer
440
+ from hqq.engine.hf import HQQModelForCausalLM
441
+ from hqq.models.hf.base import AutoHQQHFModel
442
+
443
+ try:
444
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed"", device_map='auto')
445
+ except:
446
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed"")
447
+ tokenizer = AutoTokenizer.from_pretrained(""Nexusflow/NexusRaven-V2-13B"")
448
+
449
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
450
+
451
+ outputs = model.generate(input_ids, max_new_tokens=216)
452
+ tokenizer.decode(outputs[0])
453
+ ```
454
+
455
+ ## Configurations
456
+
457
+ The configuration info are in `smash_config.json`.
458
+
459
+ ## Credits & License
460
+
461
+ The license of the smashed model follows the license of the original model. Please check the license of the original model Nexusflow/NexusRaven-V2-13B before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
462
+
463
+ ## Want to compress other models?
464
+
465
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
466
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""de7de1715f5caa99f0cb6aa8979ccca0635c6bed"", ""last_modified"": ""2024-08-02 16:17:06+00:00"", ""created_at"": ""2024-06-24 11:07:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""base_model:Nexusflow/NexusRaven-V2-13B"", ""base_model:finetune:Nexusflow/NexusRaven-V2-13B"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Nexusflow/NexusRaven-V2-13B\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 16:17:06+00:00"", ""cardData"": ""base_model: Nexusflow/NexusRaven-V2-13B\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6679536872d0536e13c74e53"", ""modelId"": ""PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed"", ""usedStorage"": 7298420904}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/Nexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2FNexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed%5D(%2FPrunaAI%2FNexusflow-NexusRaven-V2-13B-HQQ-4bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
OmniParser_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,953 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ microsoft/OmniParser,"---
3
+ library_name: transformers
4
+ license: mit
5
+ pipeline_tag: image-text-to-text
6
+ ---
7
+ 📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/)] [[Demo](https://huggingface.co/spaces/microsoft/OmniParser/)]
8
+
9
+ # Model Summary
10
+ OmniParser is a general screen parsing tool, which interprets/converts UI screenshot to structured format, to improve existing LLM based UI agent.
11
+ Training Datasets include: 1) an interactable icon detection dataset, which was curated from popular web pages and automatically annotated to highlight clickable and actionable regions, and 2) an icon description dataset, designed to associate each UI element with its corresponding function.
12
+
13
+ This model hub includes a finetuned version of YOLOv8 and a finetuned BLIP-2 model on the above dataset respectively. For more details of the models used and finetuning, please refer to the [paper](https://arxiv.org/abs/2408.00203).
14
+
15
+ # Responsible AI Considerations
16
+ ## Intended Use
17
+ - OmniParser is designed to be able to convert unstructured screenshot image into structured list of elements including interactable regions location and captions of icons on its potential functionality.
18
+ - OmniParser is intended to be used in settings where users are already trained on responsible analytic approaches and critical reasoning is expected. OmniParser is capable of providing extracted information from the screenshot, however human judgement is needed for the output of OmniParser.
19
+ - OmniParser is intended to be used on various screenshots, which includes both PC and Phone, and also on various applications.
20
+ ## limitations
21
+ - OmniParser is designed to faithfully convert screenshot image into structured elements of interactable regions and semantics of the screen, while it does not detect harmful content in its input (like users have freedom to decide the input of any LLMs), users are expected to provide input to the OmniParser that is not harmful.
22
+ - While OmniParser only converts screenshot image into texts, it can be used to construct an GUI agent based on LLMs that is actionable. When developing and operating the agent using OmniParser, the developers need to be responsible and follow common safety standard.
23
+ - For OmniPaser-BLIP2, it may incorrectly infer the gender or other sensitive attribute (e.g., race, religion etc.) of individuals in icon images. Inference of sensitive attributes may rely upon stereotypes and generalizations rather than information about specific individuals and are more likely to be incorrect for marginalized people. Incorrect inferences may result in significant physical or psychological injury or restrict, infringe upon or undermine the ability to realize an individual’s human rights. We do not recommend use of OmniParser in any workplace-like use case scenario.
24
+
25
+ # License
26
+ Please note that icon_detect model is under AGPL license, and icon_caption_blip2 & icon_caption_florence is under MIT license. Please refer to the LICENSE file in the folder of each model.
27
+
28
+
29
+ ","{""id"": ""microsoft/OmniParser"", ""author"": ""microsoft"", ""sha"": ""9a02350f935534e9c9c29d0e6f28e4d1aff36a5c"", ""last_modified"": ""2024-12-02 17:47:28+00:00"", ""created_at"": ""2024-10-07 23:32:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 871, ""downloads_all_time"": null, ""likes"": 1661, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""blip-2"", ""visual-question-answering"", ""image-text-to-text"", ""arxiv:2408.00203"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Blip2ForConditionalGeneration""], ""model_type"": ""blip-2""}, ""transformers_info"": {""auto_model"": ""AutoModelForVisualQuestionAnswering"", ""custom_class"": null, ""pipeline_tag"": ""visual-question-answering"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_blip2/LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_blip2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_blip2/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_blip2/pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_blip2/pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_blip2/pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_florence/LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_florence/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_florence/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_caption_florence/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_detect/LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_detect/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_detect/model.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_detect_v1_5/LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_detect_v1_5/model.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_detect_v1_5/model_v1_5.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='icon_detect_v1_5/train_args.yaml', size=None, blob_id=None, lfs=None)""], ""spaces"": [""microsoft/OmniParser"", ""microsoft/Magma-UI"", ""Shak33l-UiRev/Ui-Rev-Doc-Model"", ""drlon/magma-ui-agent"", ""jadechoghari/OmniParser-demo"", ""gabrielvaz/microsoft-OmniParser"", ""qween-beth/microsoft-OmniParser"", ""nisten/OmniParser"", ""boompack/OmniParser"", ""Nymbo/OmniParser"", ""charbel-malo/UXSpaghetti"", ""mathi28/automated_dynamic_web_crawling"", ""mytoolzone1/OmniParser"", ""Sanket17/trying"", ""Sanket17/newspaces"", ""BronioInt/Lake-1-Pro"", ""Sanket17/OmniPar"", ""Sanket17/NewParser"", ""ahsansaeed/OmniParser""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-02 17:47:28+00:00"", ""cardData"": ""library_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text"", ""transformersInfo"": {""auto_model"": ""AutoModelForVisualQuestionAnswering"", ""custom_class"": null, ""pipeline_tag"": ""visual-question-answering"", ""processor"": ""AutoProcessor""}, ""_id"": ""67046f77aa68703cb1d7fed9"", ""modelId"": ""microsoft/OmniParser"", ""usedStorage"": 16164887225}",0,"https://huggingface.co/Canstralian/RedTeamAI, https://huggingface.co/Wilsonzarateez/bigtime, https://huggingface.co/Sumic515/testaaa, https://huggingface.co/User4444456/KhaterineHammond444456, https://huggingface.co/betzabetmilagrosul/roberta-base, https://huggingface.co/Ravi1317/Honeybee, https://huggingface.co/MAKERBOT/BOX_AI_GT3, https://huggingface.co/imjustajellyfish/skibidi, https://huggingface.co/khushpreet93/khushi, https://huggingface.co/kiraleyn/kisa_model, https://huggingface.co/musicissospecial/Evangelista, https://huggingface.co/Jimdonn/Marci, https://huggingface.co/pixghotbi/news, https://huggingface.co/tayir55922/Gem, https://huggingface.co/Spookiedongo/Flame, https://huggingface.co/Rezwan9463/Janani_AI, https://huggingface.co/rororo2002/TuluPoc, https://huggingface.co/mrrush404/TestSubject, https://huggingface.co/Auremaya/test2, https://huggingface.co/ChloeAndRose/Chloe, https://huggingface.co/Bam34568/Hg, https://huggingface.co/Noufy/sms_url_phishing, https://huggingface.co/Test11223344556677889900/Test-AI",23,"https://huggingface.co/mmedhat/new_data_analysis_test, https://huggingface.co/Too8kio/ai, https://huggingface.co/Thor8391/Sienna, https://huggingface.co/Kuba200110/PanteraAI, https://huggingface.co/bondjames23466/just-imagine-with-your-text, https://huggingface.co/WesleyGabri54/Andrio, https://huggingface.co/Charleschess/medicina__, https://huggingface.co/Bogi02/Hangok",8,,0,"https://huggingface.co/bkbj/Atest, https://huggingface.co/RexDaTrustee/Read-text.Ai, https://huggingface.co/aicoder69/aicoder69",3,"Nymbo/OmniParser, Shak33l-UiRev/Ui-Rev-Doc-Model, boompack/OmniParser, charbel-malo/UXSpaghetti, drlon/magma-ui-agent, gabrielvaz/microsoft-OmniParser, huggingface/InferenceSupport/discussions/new?title=microsoft/OmniParser&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmicrosoft%2FOmniParser%5D(%2Fmicrosoft%2FOmniParser)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jadechoghari/OmniParser-demo, mathi28/automated_dynamic_web_crawling, microsoft/Magma-UI, microsoft/OmniParser, microsoft/OmniParser/, nisten/OmniParser, qween-beth/microsoft-OmniParser",14
30
+ Canstralian/RedTeamAI,"---
31
+ metadata:
32
+ name: Canstralian
33
+ tags:
34
+ - cybersecurity
35
+ - penetration-testing
36
+ - red-team
37
+ - ai
38
+ - offensive-security
39
+ - threat-detection
40
+ - code-generation
41
+ license: MIT
42
+ model_index:
43
+ model_name: RedTeamAI
44
+ model_description: >
45
+ AI-powered model designed for penetration testing and security automation,
46
+ focused on detecting and analyzing known cybersecurity exploits.
47
+ model_type: text-classification
48
+ language: English
49
+ framework: PyTorch
50
+ pipeline_tag: text-classification
51
+ sdk: transformers
52
+ results:
53
+ task: text-classification
54
+ dataset: PenTest-2024 (custom)
55
+ metrics:
56
+ accuracy: 92.5
57
+ precision: 89.3
58
+ recall: 91.8
59
+ f1_score: 90.5
60
+ source: Internal Benchmark
61
+ license: mit
62
+ language:
63
+ - en
64
+ tags:
65
+ - ai
66
+ - offensive-security
67
+ - code-generation
68
+ datasets:
69
+ - 0dAI/PentestingCommandLogic
70
+ - royweiss1/GPT_Keylogger_Dataset
71
+ - allenai/real-toxicity-prompts
72
+ - HuggingFaceH4/instruction-dataset
73
+ - togethercomputer/RedPajama-Data-1T
74
+ - Canstralian/CyberExploitDB
75
+ metrics:
76
+ - code_eval
77
+ base_model:
78
+ - replit/replit-code-v1_5-3b
79
+ - microsoft/OmniParser
80
+ library_name: transformers
81
+ ---
82
+
83
+ # Model Card for Model ID
84
+
85
+ <!-- Provide a quick summary of what the model is/does. -->
86
+
87
+ This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
88
+
89
+ ## Model Details
90
+
91
+ ### Model Description
92
+
93
+ <!-- Provide a longer summary of what this model is. -->
94
+
95
+
96
+
97
+ - **Developed by:** [More Information Needed]
98
+ - **Funded by [optional]:** [More Information Needed]
99
+ - **Shared by [optional]:** [More Information Needed]
100
+ - **Model type:** [More Information Needed]
101
+ - **Language(s) (NLP):** [More Information Needed]
102
+ - **License:** [More Information Needed]
103
+ - **Finetuned from model [optional]:** [More Information Needed]
104
+
105
+ ### Model Sources [optional]
106
+
107
+ <!-- Provide the basic links for the model. -->
108
+
109
+ - **Repository:** [More Information Needed]
110
+ - **Paper [optional]:** [More Information Needed]
111
+ - **Demo [optional]:** [More Information Needed]
112
+
113
+ ## Uses
114
+
115
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
116
+
117
+ ### Direct Use
118
+
119
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
120
+
121
+ [More Information Needed]
122
+
123
+ ### Downstream Use [optional]
124
+
125
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
126
+
127
+ [More Information Needed]
128
+
129
+ ### Out-of-Scope Use
130
+
131
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
132
+
133
+ [More Information Needed]
134
+
135
+ ## Bias, Risks, and Limitations
136
+
137
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
138
+
139
+ [More Information Needed]
140
+
141
+ ### Recommendations
142
+
143
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
144
+
145
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
146
+
147
+ ## How to Get Started with the Model
148
+
149
+ Use the code below to get started with the model.
150
+
151
+ [More Information Needed]
152
+
153
+ ## Training Details
154
+
155
+ ### Training Data
156
+
157
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
158
+
159
+ [More Information Needed]
160
+
161
+ ### Training Procedure
162
+
163
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
164
+
165
+ #### Preprocessing [optional]
166
+
167
+ [More Information Needed]
168
+
169
+
170
+ #### Training Hyperparameters
171
+
172
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
173
+
174
+ #### Speeds, Sizes, Times [optional]
175
+
176
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
177
+
178
+ [More Information Needed]
179
+
180
+ ## Evaluation
181
+
182
+ <!-- This section describes the evaluation protocols and provides the results. -->
183
+
184
+ ### Testing Data, Factors & Metrics
185
+
186
+ #### Testing Data
187
+
188
+ <!-- This should link to a Dataset Card if possible. -->
189
+
190
+ [More Information Needed]
191
+
192
+ #### Factors
193
+
194
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
195
+
196
+ [More Information Needed]
197
+
198
+ #### Metrics
199
+
200
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
201
+
202
+ [More Information Needed]
203
+
204
+ ### Results
205
+
206
+ [More Information Needed]
207
+
208
+ #### Summary
209
+
210
+
211
+
212
+ ## Model Examination [optional]
213
+
214
+ <!-- Relevant interpretability work for the model goes here -->
215
+
216
+ [More Information Needed]
217
+
218
+ ## Environmental Impact
219
+
220
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
221
+
222
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
223
+
224
+ - **Hardware Type:** [More Information Needed]
225
+ - **Hours used:** [More Information Needed]
226
+ - **Cloud Provider:** [More Information Needed]
227
+ - **Compute Region:** [More Information Needed]
228
+ - **Carbon Emitted:** [More Information Needed]
229
+
230
+ ## Technical Specifications [optional]
231
+
232
+ ### Model Architecture and Objective
233
+
234
+ [More Information Needed]
235
+
236
+ ### Compute Infrastructure
237
+
238
+ [More Information Needed]
239
+
240
+ #### Hardware
241
+
242
+ [More Information Needed]
243
+
244
+ #### Software
245
+
246
+ [More Information Needed]
247
+
248
+ ## Citation [optional]
249
+
250
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
251
+
252
+ **BibTeX:**
253
+
254
+ [More Information Needed]
255
+
256
+ **APA:**
257
+
258
+ [More Information Needed]
259
+
260
+ ## Glossary [optional]
261
+
262
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
263
+
264
+ [More Information Needed]
265
+
266
+ ## More Information [optional]
267
+
268
+ [More Information Needed]
269
+
270
+ ## Model Card Authors [optional]
271
+
272
+ [More Information Needed]
273
+
274
+ ## Model Card Contact
275
+
276
+ [More Information Needed]","{""id"": ""Canstralian/RedTeamAI"", ""author"": ""Canstralian"", ""sha"": ""465259ab32ad82cb333c4cdf4b67e3820b74816e"", ""last_modified"": ""2024-11-24 09:55:01+00:00"", ""created_at"": ""2024-11-14 14:44:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""ai"", ""offensive-security"", ""code-generation"", ""en"", ""dataset:0dAI/PentestingCommandLogic"", ""dataset:royweiss1/GPT_Keylogger_Dataset"", ""dataset:allenai/real-toxicity-prompts"", ""dataset:HuggingFaceH4/instruction-dataset"", ""dataset:togethercomputer/RedPajama-Data-1T"", ""dataset:Canstralian/CyberExploitDB"", ""arxiv:1910.09700"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""doi:10.57967/hf/3614"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- replit/replit-code-v1_5-3b\n- microsoft/OmniParser\ndatasets:\n- 0dAI/PentestingCommandLogic\n- royweiss1/GPT_Keylogger_Dataset\n- allenai/real-toxicity-prompts\n- HuggingFaceH4/instruction-dataset\n- togethercomputer/RedPajama-Data-1T\n- Canstralian/CyberExploitDB\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\nmetrics:\n- code_eval\ntags:\n- ai\n- offensive-security\n- code-generation\nmetadata:\n name: Canstralian\n tags:\n - cybersecurity\n - penetration-testing\n - red-team\n - ai\n - offensive-security\n - threat-detection\n - code-generation\n license: MIT\n model_index:\n model_name: RedTeamAI\n model_description: 'AI-powered model designed for penetration testing and security\n automation, focused on detecting and analyzing known cybersecurity exploits.\n\n '\n model_type: text-classification\n language: English\n framework: PyTorch\n pipeline_tag: text-classification\n sdk: transformers\n results:\n task: text-classification\n dataset: PenTest-2024 (custom)\n metrics:\n accuracy: 92.5\n precision: 89.3\n recall: 91.8\n f1_score: 90.5\n source: Internal Benchmark"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='app.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='requirements.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Canstralian/whackjacker"", ""Canstralian/TeamRed""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-24 09:55:01+00:00"", ""cardData"": ""base_model:\n- replit/replit-code-v1_5-3b\n- microsoft/OmniParser\ndatasets:\n- 0dAI/PentestingCommandLogic\n- royweiss1/GPT_Keylogger_Dataset\n- allenai/real-toxicity-prompts\n- HuggingFaceH4/instruction-dataset\n- togethercomputer/RedPajama-Data-1T\n- Canstralian/CyberExploitDB\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\nmetrics:\n- code_eval\ntags:\n- ai\n- offensive-security\n- code-generation\nmetadata:\n name: Canstralian\n tags:\n - cybersecurity\n - penetration-testing\n - red-team\n - ai\n - offensive-security\n - threat-detection\n - code-generation\n license: MIT\n model_index:\n model_name: RedTeamAI\n model_description: 'AI-powered model designed for penetration testing and security\n automation, focused on detecting and analyzing known cybersecurity exploits.\n\n '\n model_type: text-classification\n language: English\n framework: PyTorch\n pipeline_tag: text-classification\n sdk: transformers\n results:\n task: text-classification\n dataset: PenTest-2024 (custom)\n metrics:\n accuracy: 92.5\n precision: 89.3\n recall: 91.8\n f1_score: 90.5\n source: Internal Benchmark"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67360cca7ac3f35e4476502d"", ""modelId"": ""Canstralian/RedTeamAI"", ""usedStorage"": 0}",1,,0,,0,,0,,0,"Canstralian/TeamRed, Canstralian/whackjacker, huggingface/InferenceSupport/discussions/new?title=Canstralian/RedTeamAI&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BCanstralian%2FRedTeamAI%5D(%2FCanstralian%2FRedTeamAI)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",3
277
+ Wilsonzarateez/bigtime,"---
278
+ license: apache-2.0
279
+ datasets:
280
+ - fka/awesome-chatgpt-prompts
281
+ language:
282
+ - en
283
+ metrics:
284
+ - bleurt
285
+ base_model:
286
+ - microsoft/OmniParser
287
+ new_version: microsoft/OmniParser
288
+ pipeline_tag: text-to-video
289
+ library_name: diffusers
290
+ ---","{""id"": ""Wilsonzarateez/bigtime"", ""author"": ""Wilsonzarateez"", ""sha"": ""293678e0dde8666ccda54530eebfc189fc1b865f"", ""last_modified"": ""2024-11-08 21:38:28+00:00"", ""created_at"": ""2024-11-08 21:34:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-video"", ""en"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\nmetrics:\n- bleurt\npipeline_tag: text-to-video\nnew_version: microsoft/OmniParser"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-08 21:38:28+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\nmetrics:\n- bleurt\npipeline_tag: text-to-video\nnew_version: microsoft/OmniParser"", ""transformersInfo"": null, ""_id"": ""672e83d6605ede59885a1bc9"", ""modelId"": ""Wilsonzarateez/bigtime"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Wilsonzarateez/bigtime&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BWilsonzarateez%2Fbigtime%5D(%2FWilsonzarateez%2Fbigtime)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
291
+ Sumic515/testaaa,"---
292
+ license: creativeml-openrail-m
293
+ library_name: transformers
294
+ datasets:
295
+ - fka/awesome-chatgpt-prompts
296
+ - Spawning/PD12M
297
+ pipeline_tag: text-generation
298
+ widget:
299
+ - text: >-
300
+ Is this review positive or negative? Review: Best cast iron skillet you will
301
+ ever buy.
302
+ example_title: Sentiment analysis
303
+ - text: >-
304
+ Barack Obama nominated Hilary Clinton as his secretary of state on Monday.
305
+ He chose her because she had ...
306
+ example_title: Coreference resolution
307
+ - text: >-
308
+ On a shelf, there are five books: a gray book, a red book, a purple book, a
309
+ blue book, and a black book ...
310
+ example_title: Logic puzzles
311
+ - text: >-
312
+ The two men running to become New York City's next mayor will face off in
313
+ their first debate Wednesday night ...
314
+ example_title: Reading comprehension
315
+ base_model:
316
+ - microsoft/OmniParser
317
+ - stabilityai/stable-diffusion-3.5-large
318
+ ---","{""id"": ""Sumic515/testaaa"", ""author"": ""Sumic515"", ""sha"": ""706ad9baf7d23c61184b02caf059ec2e92c0e89f"", ""last_modified"": ""2024-11-11 04:46:20+00:00"", ""created_at"": ""2024-11-11 04:22:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""text-generation"", ""dataset:fka/awesome-chatgpt-prompts"", ""dataset:Spawning/PD12M"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:creativeml-openrail-m"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\n- stabilityai/stable-diffusion-3.5-large\ndatasets:\n- fka/awesome-chatgpt-prompts\n- Spawning/PD12M\nlibrary_name: transformers\nlicense: creativeml-openrail-m\npipeline_tag: text-generation\nwidget:\n- text: 'Is this review positive or negative? Review: Best cast iron skillet you will\n ever buy.'\n example_title: Sentiment analysis\n- text: Barack Obama nominated Hilary Clinton as his secretary of state on Monday.\n He chose her because she had ...\n example_title: Coreference resolution\n- text: 'On a shelf, there are five books: a gray book, a red book, a purple book,\n a blue book, and a black book ...'\n example_title: Logic puzzles\n- text: The two men running to become New York City's next mayor will face off in\n their first debate Wednesday night ...\n example_title: Reading comprehension"", ""widget_data"": [{""text"": ""Is this review positive or negative? Review: Best cast iron skillet you will ever buy."", ""example_title"": ""Sentiment analysis""}, {""text"": ""Barack Obama nominated Hilary Clinton as his secretary of state on Monday. He chose her because she had ..."", ""example_title"": ""Coreference resolution""}, {""text"": ""On a shelf, there are five books: a gray book, a red book, a purple book, a blue book, and a black book ..."", ""example_title"": ""Logic puzzles""}, {""text"": ""The two men running to become New York City's next mayor will face off in their first debate Wednesday night ..."", ""example_title"": ""Reading comprehension""}], ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-11 04:46:20+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\n- stabilityai/stable-diffusion-3.5-large\ndatasets:\n- fka/awesome-chatgpt-prompts\n- Spawning/PD12M\nlibrary_name: transformers\nlicense: creativeml-openrail-m\npipeline_tag: text-generation\nwidget:\n- text: 'Is this review positive or negative? Review: Best cast iron skillet you will\n ever buy.'\n example_title: Sentiment analysis\n- text: Barack Obama nominated Hilary Clinton as his secretary of state on Monday.\n He chose her because she had ...\n example_title: Coreference resolution\n- text: 'On a shelf, there are five books: a gray book, a red book, a purple book,\n a blue book, and a black book ...'\n example_title: Logic puzzles\n- text: The two men running to become New York City's next mayor will face off in\n their first debate Wednesday night ...\n example_title: Reading comprehension"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6731868ebea29cae03d24ab6"", ""modelId"": ""Sumic515/testaaa"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Sumic515/testaaa&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSumic515%2Ftestaaa%5D(%2FSumic515%2Ftestaaa)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
319
+ User4444456/KhaterineHammond444456,"---
320
+ license: apache-2.0
321
+ language:
322
+ - ak
323
+ metrics:
324
+ - accuracy
325
+ base_model:
326
+ - microsoft/OmniParser
327
+ new_version: genmo/mochi-1-preview
328
+ pipeline_tag: token-classification
329
+ library_name: bertopic
330
+ ---","{""id"": ""User4444456/KhaterineHammond444456"", ""author"": ""User4444456"", ""sha"": ""e1882eb7f221eb266289b725ddb18320691f57fa"", ""last_modified"": ""2024-11-14 01:23:45+00:00"", ""created_at"": ""2024-11-14 01:21:33+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""bertopic"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""bertopic"", ""token-classification"", ""ak"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""token-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- ak\nlibrary_name: bertopic\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: token-classification\nnew_version: genmo/mochi-1-preview"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-14 01:23:45+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- ak\nlibrary_name: bertopic\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: token-classification\nnew_version: genmo/mochi-1-preview"", ""transformersInfo"": null, ""_id"": ""6735509d728f9dcf371b6b2d"", ""modelId"": ""User4444456/KhaterineHammond444456"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=User4444456/KhaterineHammond444456&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BUser4444456%2FKhaterineHammond444456%5D(%2FUser4444456%2FKhaterineHammond444456)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
331
+ betzabetmilagrosul/roberta-base,"---
332
+ license: apache-2.0
333
+ datasets:
334
+ - Spawning/PD12M
335
+ language:
336
+ - ar
337
+ metrics:
338
+ - brier_score
339
+ base_model:
340
+ - microsoft/OmniParser
341
+ new_version: Qwen/Qwen2.5-Coder-32B-Instruct
342
+ pipeline_tag: translation
343
+ library_name: fairseq
344
+ tags:
345
+ - music
346
+ ---","{""id"": ""betzabetmilagrosul/roberta-base"", ""author"": ""betzabetmilagrosul"", ""sha"": ""cb19004b506ebdd4b1d1dfb7402b1486db1bd999"", ""last_modified"": ""2024-11-14 04:16:25+00:00"", ""created_at"": ""2024-11-14 04:15:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fairseq"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fairseq"", ""music"", ""translation"", ""ar"", ""dataset:Spawning/PD12M"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""translation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- Spawning/PD12M\nlanguage:\n- ar\nlibrary_name: fairseq\nlicense: apache-2.0\nmetrics:\n- brier_score\npipeline_tag: translation\ntags:\n- music\nnew_version: Qwen/Qwen2.5-Coder-32B-Instruct"", ""widget_data"": [{""text"": ""\u0625\u0633\u0645\u064a \u0645\u062d\u0645\u062f \u0648\u0623\u0633\u0643\u0646 \u0641\u064a \u0628\u0631\u0644\u064a\u0646""}, {""text"": ""\u0625\u0633\u0645\u064a \u0633\u0627\u0631\u0647 \u0648\u0623\u0633\u0643\u0646 \u0641\u064a \u0644\u0646\u062f\u0646""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-14 04:16:25+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- Spawning/PD12M\nlanguage:\n- ar\nlibrary_name: fairseq\nlicense: apache-2.0\nmetrics:\n- brier_score\npipeline_tag: translation\ntags:\n- music\nnew_version: Qwen/Qwen2.5-Coder-32B-Instruct"", ""transformersInfo"": null, ""_id"": ""6735795b7506985f274bc0a2"", ""modelId"": ""betzabetmilagrosul/roberta-base"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=betzabetmilagrosul/roberta-base&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bbetzabetmilagrosul%2Froberta-base%5D(%2Fbetzabetmilagrosul%2Froberta-base)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
347
+ Ravi1317/Honeybee,"---
348
+ license: apache-2.0
349
+ datasets:
350
+ - fka/awesome-chatgpt-prompts
351
+ metrics:
352
+ - bertscore
353
+ base_model:
354
+ - nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
355
+ - mlx-community/nvidia_Llama-3.1-Nemotron-70B-Instruct-HF_4bit
356
+ - microsoft/OmniParser
357
+ - stabilityai/stable-diffusion-3.5-large
358
+ - tencent/Tencent-Hunyuan-Large
359
+ new_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
360
+ pipeline_tag: image-to-video
361
+ library_name: asteroid
362
+ ---","{""id"": ""Ravi1317/Honeybee"", ""author"": ""Ravi1317"", ""sha"": ""8b5c6f0ca7fb48f33bbb036eed5e130bb2b5e1ed"", ""last_modified"": ""2024-11-14 15:27:26+00:00"", ""created_at"": ""2024-11-14 15:23:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""asteroid"", ""image-to-video"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""image-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/Llama-3.1-Nemotron-70B-Instruct-HF\n- mlx-community/nvidia_Llama-3.1-Nemotron-70B-Instruct-HF_4bit\n- microsoft/OmniParser\n- stabilityai/stable-diffusion-3.5-large\n- tencent/Tencent-Hunyuan-Large\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: asteroid\nlicense: apache-2.0\nmetrics:\n- bertscore\npipeline_tag: image-to-video\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-14 15:27:26+00:00"", ""cardData"": ""base_model:\n- nvidia/Llama-3.1-Nemotron-70B-Instruct-HF\n- mlx-community/nvidia_Llama-3.1-Nemotron-70B-Instruct-HF_4bit\n- microsoft/OmniParser\n- stabilityai/stable-diffusion-3.5-large\n- tencent/Tencent-Hunyuan-Large\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: asteroid\nlicense: apache-2.0\nmetrics:\n- bertscore\npipeline_tag: image-to-video\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""transformersInfo"": null, ""_id"": ""673615ea6375fff559c2c9b1"", ""modelId"": ""Ravi1317/Honeybee"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Ravi1317/Honeybee&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BRavi1317%2FHoneybee%5D(%2FRavi1317%2FHoneybee)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
363
+ MAKERBOT/BOX_AI_GT3,"---
364
+ license: artistic-2.0
365
+ datasets:
366
+ - microsoft/orca-agentinstruct-1M-v1
367
+ language:
368
+ - am
369
+ - aa
370
+ - ae
371
+ - ak
372
+ - ar
373
+ - av
374
+ - an
375
+ - bi
376
+ - be
377
+ - az
378
+ metrics:
379
+ - bleu
380
+ base_model:
381
+ - microsoft/OmniParser
382
+ new_version: microsoft/OmniParser
383
+ pipeline_tag: text-classification
384
+ library_name: asteroid
385
+ tags:
386
+ - code
387
+ - chemistry
388
+ - biology
389
+ - music
390
+ ---","{""id"": ""MAKERBOT/BOX_AI_GT3"", ""author"": ""MAKERBOT"", ""sha"": ""5dfc7e66b603e6c8f0fd47fee86baaca378ea54c"", ""last_modified"": ""2024-11-16 11:10:59+00:00"", ""created_at"": ""2024-11-15 20:24:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""asteroid"", ""code"", ""chemistry"", ""biology"", ""music"", ""text-classification"", ""am"", ""aa"", ""ae"", ""ak"", ""ar"", ""av"", ""an"", ""bi"", ""be"", ""az"", ""dataset:microsoft/orca-agentinstruct-1M-v1"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:artistic-2.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- microsoft/orca-agentinstruct-1M-v1\nlanguage:\n- am\n- aa\n- ae\n- ak\n- ar\n- av\n- an\n- bi\n- be\n- az\nlibrary_name: asteroid\nlicense: artistic-2.0\nmetrics:\n- bleu\npipeline_tag: text-classification\ntags:\n- code\n- chemistry\n- biology\n- music\nnew_version: microsoft/OmniParser"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-16 11:10:59+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- microsoft/orca-agentinstruct-1M-v1\nlanguage:\n- am\n- aa\n- ae\n- ak\n- ar\n- av\n- an\n- bi\n- be\n- az\nlibrary_name: asteroid\nlicense: artistic-2.0\nmetrics:\n- bleu\npipeline_tag: text-classification\ntags:\n- code\n- chemistry\n- biology\n- music\nnew_version: microsoft/OmniParser"", ""transformersInfo"": null, ""_id"": ""6737ae0bbb93277ca2a754c3"", ""modelId"": ""MAKERBOT/BOX_AI_GT3"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=MAKERBOT/BOX_AI_GT3&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMAKERBOT%2FBOX_AI_GT3%5D(%2FMAKERBOT%2FBOX_AI_GT3)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
391
+ imjustajellyfish/skibidi,"---
392
+ license: mit
393
+ datasets:
394
+ - neuralwork/arxiver
395
+ language:
396
+ - en
397
+ metrics:
398
+ - bertscore
399
+ base_model:
400
+ - microsoft/OmniParser
401
+ new_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
402
+ pipeline_tag: text-generation
403
+ library_name: allennlp
404
+ ---","{""id"": ""imjustajellyfish/skibidi"", ""author"": ""imjustajellyfish"", ""sha"": ""844f123ba7a9cef28bcd1ccc1f83682cac51c364"", ""last_modified"": ""2024-11-16 22:49:08+00:00"", ""created_at"": ""2024-11-16 22:47:48+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""allennlp"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""allennlp"", ""text-generation"", ""en"", ""dataset:neuralwork/arxiver"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- neuralwork/arxiver\nlanguage:\n- en\nlibrary_name: allennlp\nlicense: mit\nmetrics:\n- bertscore\npipeline_tag: text-generation\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-16 22:49:08+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- neuralwork/arxiver\nlanguage:\n- en\nlibrary_name: allennlp\nlicense: mit\nmetrics:\n- bertscore\npipeline_tag: text-generation\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""transformersInfo"": null, ""_id"": ""67392114f8ab29550bcbe86e"", ""modelId"": ""imjustajellyfish/skibidi"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=imjustajellyfish/skibidi&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimjustajellyfish%2Fskibidi%5D(%2Fimjustajellyfish%2Fskibidi)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
405
+ khushpreet93/khushi,"---
406
+ license: unknown
407
+ language:
408
+ - hi
409
+ base_model:
410
+ - microsoft/OmniParser
411
+ tags:
412
+ - not-for-all-audiences
413
+ ---","{""id"": ""khushpreet93/khushi"", ""author"": ""khushpreet93"", ""sha"": ""c4e4d00830cba05c7e47fd0f80b941d1e2a85650"", ""last_modified"": ""2024-11-17 20:09:30+00:00"", ""created_at"": ""2024-11-17 20:07:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""not-for-all-audiences"", ""hi"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:unknown"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- hi\nlicense: unknown\ntags:\n- not-for-all-audiences"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-17 20:09:30+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- hi\nlicense: unknown\ntags:\n- not-for-all-audiences"", ""transformersInfo"": null, ""_id"": ""673a4d0f5d150a0c7e6c337c"", ""modelId"": ""khushpreet93/khushi"", ""usedStorage"": 0}",1,,0,,0,,0,,0,,0
414
+ kiraleyn/kisa_model,"---
415
+ language:
416
+ - en
417
+ base_model:
418
+ - microsoft/OmniParser
419
+ pipeline_tag: text-classification
420
+ ---","{""id"": ""kiraleyn/kisa_model"", ""author"": ""kiraleyn"", ""sha"": ""cf794a3e44b925fefc0da3ba15c0549b8a0c44fd"", ""last_modified"": ""2024-11-18 18:12:08+00:00"", ""created_at"": ""2024-11-18 16:34:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""text-classification"", ""en"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- en\npipeline_tag: text-classification"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-18 18:12:08+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- en\npipeline_tag: text-classification"", ""transformersInfo"": null, ""_id"": ""673b6ca6775d623b217af1f4"", ""modelId"": ""kiraleyn/kisa_model"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kiraleyn/kisa_model&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkiraleyn%2Fkisa_model%5D(%2Fkiraleyn%2Fkisa_model)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
421
+ musicissospecial/Evangelista,"---
422
+ base_model:
423
+ - microsoft/OmniParser
424
+ ---","{""id"": ""musicissospecial/Evangelista"", ""author"": ""musicissospecial"", ""sha"": ""ebf1b80521b2b2a38a7eb0a6de7eaae9db4c9b2e"", ""last_modified"": ""2024-11-21 15:25:52+00:00"", ""created_at"": ""2024-11-21 15:25:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-21 15:25:52+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser"", ""transformersInfo"": null, ""_id"": ""673f50f1487376b8f75a7188"", ""modelId"": ""musicissospecial/Evangelista"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=musicissospecial/Evangelista&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmusicissospecial%2FEvangelista%5D(%2Fmusicissospecial%2FEvangelista)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
425
+ Jimdonn/Marci,"---
426
+ license: apache-2.0
427
+ language:
428
+ - en
429
+ base_model:
430
+ - microsoft/OmniParser
431
+ ---","{""id"": ""Jimdonn/Marci"", ""author"": ""Jimdonn"", ""sha"": ""d2b22d7e9bce81efde8279ae34cddb28ccf430e9"", ""last_modified"": ""2024-11-21 22:20:10+00:00"", ""created_at"": ""2024-11-21 22:19:41+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""en"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- en\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-21 22:20:10+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- en\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""673fb1fd8f416fe6a9f396c0"", ""modelId"": ""Jimdonn/Marci"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Jimdonn/Marci&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BJimdonn%2FMarci%5D(%2FJimdonn%2FMarci)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
432
+ pixghotbi/news,"---
433
+ license: cc-by-nc-nd-4.0
434
+ datasets:
435
+ - fka/awesome-chatgpt-prompts
436
+ base_model:
437
+ - microsoft/OmniParser
438
+ new_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
439
+ pipeline_tag: text-generation
440
+ library_name: fastai
441
+ tags:
442
+ - art
443
+ ---
444
+ # Model Card for Model ID
445
+
446
+ <!-- Provide a quick summary of what the model is/does. -->
447
+
448
+ This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
449
+
450
+ ## Model Details
451
+
452
+ ### Model Description
453
+
454
+ <!-- Provide a longer summary of what this model is. -->
455
+
456
+
457
+
458
+ - **Developed by:** [More Information Needed]
459
+ - **Funded by [optional]:** [More Information Needed]
460
+ - **Shared by [optional]:** [More Information Needed]
461
+ - **Model type:** [More Information Needed]
462
+ - **Language(s) (NLP):** [More Information Needed]
463
+ - **License:** [More Information Needed]
464
+ - **Finetuned from model [optional]:** [More Information Needed]
465
+
466
+ ### Model Sources [optional]
467
+
468
+ <!-- Provide the basic links for the model. -->
469
+
470
+ - **Repository:** [More Information Needed]
471
+ - **Paper [optional]:** [More Information Needed]
472
+ - **Demo [optional]:** [More Information Needed]
473
+
474
+ ## Uses
475
+
476
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
477
+
478
+ ### Direct Use
479
+
480
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
481
+
482
+ [More Information Needed]
483
+
484
+ ### Downstream Use [optional]
485
+
486
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
487
+
488
+ [More Information Needed]
489
+
490
+ ### Out-of-Scope Use
491
+
492
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
493
+
494
+ [More Information Needed]
495
+
496
+ ## Bias, Risks, and Limitations
497
+
498
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
499
+
500
+ [More Information Needed]
501
+
502
+ ### Recommendations
503
+
504
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
505
+
506
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
507
+
508
+ ## How to Get Started with the Model
509
+
510
+ Use the code below to get started with the model.
511
+
512
+ [More Information Needed]
513
+
514
+ ## Training Details
515
+
516
+ ### Training Data
517
+
518
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
519
+
520
+ [More Information Needed]
521
+
522
+ ### Training Procedure
523
+
524
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
525
+
526
+ #### Preprocessing [optional]
527
+
528
+ [More Information Needed]
529
+
530
+
531
+ #### Training Hyperparameters
532
+
533
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
534
+
535
+ #### Speeds, Sizes, Times [optional]
536
+
537
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
538
+
539
+ [More Information Needed]
540
+
541
+ ## Evaluation
542
+
543
+ <!-- This section describes the evaluation protocols and provides the results. -->
544
+
545
+ ### Testing Data, Factors & Metrics
546
+
547
+ #### Testing Data
548
+
549
+ <!-- This should link to a Dataset Card if possible. -->
550
+
551
+ [More Information Needed]
552
+
553
+ #### Factors
554
+
555
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
556
+
557
+ [More Information Needed]
558
+
559
+ #### Metrics
560
+
561
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
562
+
563
+ [More Information Needed]
564
+
565
+ ### Results
566
+
567
+ [More Information Needed]
568
+
569
+ #### Summary
570
+
571
+
572
+
573
+ ## Model Examination [optional]
574
+
575
+ <!-- Relevant interpretability work for the model goes here -->
576
+
577
+ [More Information Needed]
578
+
579
+ ## Environmental Impact
580
+
581
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
582
+
583
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
584
+
585
+ - **Hardware Type:** [More Information Needed]
586
+ - **Hours used:** [More Information Needed]
587
+ - **Cloud Provider:** [More Information Needed]
588
+ - **Compute Region:** [More Information Needed]
589
+ - **Carbon Emitted:** [More Information Needed]
590
+
591
+ ## Technical Specifications [optional]
592
+
593
+ ### Model Architecture and Objective
594
+
595
+ [More Information Needed]
596
+
597
+ ### Compute Infrastructure
598
+
599
+ [More Information Needed]
600
+
601
+ #### Hardware
602
+
603
+ [More Information Needed]
604
+
605
+ #### Software
606
+
607
+ [More Information Needed]
608
+
609
+ ## Citation [optional]
610
+
611
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
612
+
613
+ **BibTeX:**
614
+
615
+ [More Information Needed]
616
+
617
+ **APA:**
618
+
619
+ [More Information Needed]
620
+
621
+ ## Glossary [optional]
622
+
623
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
624
+
625
+ [More Information Needed]
626
+
627
+ ## More Information [optional]
628
+
629
+ [More Information Needed]
630
+
631
+ ## Model Card Authors [optional]
632
+
633
+ [More Information Needed]
634
+
635
+ ## Model Card Contact
636
+
637
+ [More Information Needed]","{""id"": ""pixghotbi/news"", ""author"": ""pixghotbi"", ""sha"": ""52f52afaa32aed4292353a863185bc6d86a3e8c4"", ""last_modified"": ""2024-11-24 10:51:12+00:00"", ""created_at"": ""2024-11-24 09:06:52+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fastai"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fastai"", ""art"", ""text-generation"", ""dataset:fka/awesome-chatgpt-prompts"", ""arxiv:1910.09700"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:cc-by-nc-nd-4.0"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: fastai\nlicense: cc-by-nc-nd-4.0\npipeline_tag: text-generation\ntags:\n- art\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-24 10:51:12+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: fastai\nlicense: cc-by-nc-nd-4.0\npipeline_tag: text-generation\ntags:\n- art\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""transformersInfo"": null, ""_id"": ""6742ecac33e10fc6d4b7ea32"", ""modelId"": ""pixghotbi/news"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=pixghotbi/news&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpixghotbi%2Fnews%5D(%2Fpixghotbi%2Fnews)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
638
+ tayir55922/Gem,"---
639
+ license: apache-2.0
640
+ datasets:
641
+ - fka/awesome-chatgpt-prompts
642
+ language:
643
+ - en
644
+ metrics:
645
+ - code_eval
646
+ base_model:
647
+ - microsoft/OmniParser
648
+ new_version: microsoft/OmniParser
649
+ pipeline_tag: text-classification
650
+ library_name: fasttext
651
+ ---","{""id"": ""tayir55922/Gem"", ""author"": ""tayir55922"", ""sha"": ""6741c3ebbae8b91b8ff0fbf1fa6866e6dc9c7064"", ""last_modified"": ""2024-11-24 18:47:17+00:00"", ""created_at"": ""2024-11-24 18:26:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fasttext"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fasttext"", ""text-classification"", ""en"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- en\nlibrary_name: fasttext\nlicense: apache-2.0\nmetrics:\n- code_eval\npipeline_tag: text-classification\nnew_version: microsoft/OmniParser"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-24 18:47:17+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- en\nlibrary_name: fasttext\nlicense: apache-2.0\nmetrics:\n- code_eval\npipeline_tag: text-classification\nnew_version: microsoft/OmniParser"", ""transformersInfo"": null, ""_id"": ""67436fe76efe2b931fcb1d20"", ""modelId"": ""tayir55922/Gem"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=tayir55922/Gem&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btayir55922%2FGem%5D(%2Ftayir55922%2FGem)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
652
+ Spookiedongo/Flame,"---
653
+ license: apache-2.0
654
+ datasets:
655
+ - fka/awesome-chatgpt-prompts
656
+ language:
657
+ - ab
658
+ base_model:
659
+ - microsoft/OmniParser
660
+ ---","{""id"": ""Spookiedongo/Flame"", ""author"": ""Spookiedongo"", ""sha"": ""efb6335d3ef1bbef27286910582a295c6d7c4acf"", ""last_modified"": ""2024-11-24 21:33:29+00:00"", ""created_at"": ""2024-11-24 21:32:52+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""ab"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""doi:10.57967/hf/3721"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ab\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-24 21:33:29+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ab\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""67439b845843b8e3fa78b725"", ""modelId"": ""Spookiedongo/Flame"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Spookiedongo/Flame&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSpookiedongo%2FFlame%5D(%2FSpookiedongo%2FFlame)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
661
+ Rezwan9463/Janani_AI,"---
662
+ license: apache-2.0
663
+ datasets:
664
+ - Rezwan9463/Janani_AI
665
+ base_model:
666
+ - microsoft/OmniParser
667
+ new_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
668
+ library_name: fastai
669
+ language:
670
+ - bn
671
+ - en
672
+ ---","{""id"": ""Rezwan9463/Janani_AI"", ""author"": ""Rezwan9463"", ""sha"": ""38bee2a90d4d0e0e3edfcbf66b35bb674f9687b4"", ""last_modified"": ""2024-11-27 19:19:58+00:00"", ""created_at"": ""2024-11-27 19:10:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fastai"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fastai"", ""bn"", ""en"", ""dataset:Rezwan9463/Janani_AI"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- Rezwan9463/Janani_AI\nlanguage:\n- bn\n- en\nlibrary_name: fastai\nlicense: apache-2.0\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-27 19:19:58+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- Rezwan9463/Janani_AI\nlanguage:\n- bn\n- en\nlibrary_name: fastai\nlicense: apache-2.0\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""transformersInfo"": null, ""_id"": ""67476e98f5d61d1792beaefd"", ""modelId"": ""Rezwan9463/Janani_AI"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Rezwan9463/Janani_AI&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BRezwan9463%2FJanani_AI%5D(%2FRezwan9463%2FJanani_AI)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
673
+ rororo2002/TuluPoc,"---
674
+ license: apache-2.0
675
+ base_model:
676
+ - microsoft/OmniParser
677
+ ---","{""id"": ""rororo2002/TuluPoc"", ""author"": ""rororo2002"", ""sha"": ""bd20acbced4dd20aba721e218d0c684ca72025aa"", ""last_modified"": ""2024-11-28 22:23:45+00:00"", ""created_at"": ""2024-11-28 22:22:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-28 22:23:45+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""6748ed1a4431ba7184fadb42"", ""modelId"": ""rororo2002/TuluPoc"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=rororo2002/TuluPoc&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brororo2002%2FTuluPoc%5D(%2Frororo2002%2FTuluPoc)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
678
+ mrrush404/TestSubject,"---
679
+ datasets:
680
+ - HuggingFaceTB/smoltalk
681
+ metrics:
682
+ - character
683
+ base_model:
684
+ - microsoft/OmniParser
685
+ new_version: stabilityai/stable-diffusion-3.5-large
686
+ library_name: fasttext
687
+ ---","{""id"": ""mrrush404/TestSubject"", ""author"": ""mrrush404"", ""sha"": ""34173199f5fefa023fd13728374b5c2de21efee9"", ""last_modified"": ""2024-12-01 10:48:27+00:00"", ""created_at"": ""2024-12-01 10:46:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fasttext"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fasttext"", ""dataset:HuggingFaceTB/smoltalk"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- HuggingFaceTB/smoltalk\nlibrary_name: fasttext\nmetrics:\n- character\nnew_version: stabilityai/stable-diffusion-3.5-large"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-01 10:48:27+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- HuggingFaceTB/smoltalk\nlibrary_name: fasttext\nmetrics:\n- character\nnew_version: stabilityai/stable-diffusion-3.5-large"", ""transformersInfo"": null, ""_id"": ""674c3e9b8472d421eec5fde7"", ""modelId"": ""mrrush404/TestSubject"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mrrush404/TestSubject&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmrrush404%2FTestSubject%5D(%2Fmrrush404%2FTestSubject)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
688
+ Auremaya/test2,"---
689
+ license: cc-by-4.0
690
+ language:
691
+ - fr
692
+ base_model:
693
+ - microsoft/OmniParser
694
+ ---","{""id"": ""Auremaya/test2"", ""author"": ""Auremaya"", ""sha"": ""a178288139512e8b97a9cf0130bb581a754aacdd"", ""last_modified"": ""2024-12-02 09:34:36+00:00"", ""created_at"": ""2024-12-02 09:28:01+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fr"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:cc-by-4.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- fr\nlicense: cc-by-4.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-02 09:34:36+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- fr\nlicense: cc-by-4.0"", ""transformersInfo"": null, ""_id"": ""674d7da1acf77535c65b4fec"", ""modelId"": ""Auremaya/test2"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Auremaya/test2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BAuremaya%2Ftest2%5D(%2FAuremaya%2Ftest2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
695
+ ChloeAndRose/Chloe,"---
696
+ license: afl-3.0
697
+ datasets:
698
+ - HuggingFaceTB/smoltalk
699
+ language:
700
+ - en
701
+ metrics:
702
+ - accuracy
703
+ base_model:
704
+ - microsoft/OmniParser
705
+ new_version: microsoft/OmniParser
706
+ pipeline_tag: text-generation
707
+ library_name: asteroid
708
+ tags:
709
+ - HighSchool
710
+ - Roleplay
711
+ ---","{""id"": ""ChloeAndRose/Chloe"", ""author"": ""ChloeAndRose"", ""sha"": ""8ccf2ab2b82af62cb41154746105bbd823cffb54"", ""last_modified"": ""2024-12-02 14:01:40+00:00"", ""created_at"": ""2024-12-02 13:59:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""asteroid"", ""HighSchool"", ""Roleplay"", ""text-generation"", ""en"", ""dataset:HuggingFaceTB/smoltalk"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:afl-3.0"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- HuggingFaceTB/smoltalk\nlanguage:\n- en\nlibrary_name: asteroid\nlicense: afl-3.0\nmetrics:\n- accuracy\npipeline_tag: text-generation\ntags:\n- HighSchool\n- Roleplay\nnew_version: microsoft/OmniParser"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-02 14:01:40+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- HuggingFaceTB/smoltalk\nlanguage:\n- en\nlibrary_name: asteroid\nlicense: afl-3.0\nmetrics:\n- accuracy\npipeline_tag: text-generation\ntags:\n- HighSchool\n- Roleplay\nnew_version: microsoft/OmniParser"", ""transformersInfo"": null, ""_id"": ""674dbd439c20af647fc87014"", ""modelId"": ""ChloeAndRose/Chloe"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=ChloeAndRose/Chloe&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BChloeAndRose%2FChloe%5D(%2FChloeAndRose%2FChloe)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
712
+ Bam34568/Hg,"---
713
+ license: afl-3.0
714
+ datasets:
715
+ - PleIAs/common_corpus
716
+ language:
717
+ - av
718
+ metrics:
719
+ - bleu
720
+ base_model:
721
+ - microsoft/OmniParser
722
+ new_version: stabilityai/stable-diffusion-3.5-large
723
+ pipeline_tag: zero-shot-classification
724
+ library_name: diffusers
725
+ ---","{""id"": ""Bam34568/Hg"", ""author"": ""Bam34568"", ""sha"": ""027c78fcd4054cc50124c8632fe9ab85f5fe4871"", ""last_modified"": ""2024-12-02 18:00:39+00:00"", ""created_at"": ""2024-12-02 17:58:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""zero-shot-classification"", ""av"", ""dataset:PleIAs/common_corpus"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:afl-3.0"", ""region:us""], ""pipeline_tag"": ""zero-shot-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- PleIAs/common_corpus\nlanguage:\n- av\nlibrary_name: diffusers\nlicense: afl-3.0\nmetrics:\n- bleu\npipeline_tag: zero-shot-classification\nnew_version: stabilityai/stable-diffusion-3.5-large"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-02 18:00:39+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- PleIAs/common_corpus\nlanguage:\n- av\nlibrary_name: diffusers\nlicense: afl-3.0\nmetrics:\n- bleu\npipeline_tag: zero-shot-classification\nnew_version: stabilityai/stable-diffusion-3.5-large"", ""transformersInfo"": null, ""_id"": ""674df53e13d4ba0211335ed2"", ""modelId"": ""Bam34568/Hg"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Bam34568/Hg&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BBam34568%2FHg%5D(%2FBam34568%2FHg)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
726
+ Noufy/sms_url_phishing,"---
727
+ license: apache-2.0
728
+ language:
729
+ - en
730
+ - ar
731
+ pipeline_tag: text-classification
732
+ tags:
733
+ - spam
734
+ - phishing
735
+ - anaylisis
736
+ - sms
737
+ - climate
738
+ - code
739
+ library_name: espnet
740
+ metrics:
741
+ - bleu
742
+ - accuracy
743
+ base_model:
744
+ - microsoft/OmniParser
745
+ ---
746
+ # Model Card for Model ID
747
+
748
+ <!-- Provide a quick summary of what the model is/does. -->
749
+
750
+ This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
751
+
752
+ ## Model Details
753
+
754
+ ### Model Description
755
+
756
+ <!-- Provide a longer summary of what this model is. -->
757
+
758
+
759
+
760
+ - **Developed by:** [More Information Needed]
761
+ - **Funded by [optional]:** [More Information Needed]
762
+ - **Shared by [optional]:** [More Information Needed]
763
+ - **Model type:** [More Information Needed]
764
+ - **Language(s) (NLP):** [More Information Needed]
765
+ - **License:** [More Information Needed]
766
+ - **Finetuned from model [optional]:** [More Information Needed]
767
+
768
+ ### Model Sources [optional]
769
+
770
+ <!-- Provide the basic links for the model. -->
771
+
772
+ - **Repository:** [More Information Needed]
773
+ - **Paper [optional]:** [More Information Needed]
774
+ - **Demo [optional]:** [More Information Needed]
775
+
776
+ ## Uses
777
+
778
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
779
+
780
+ ### Direct Use
781
+
782
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
783
+
784
+ [More Information Needed]
785
+
786
+ ### Downstream Use [optional]
787
+
788
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
789
+
790
+ [More Information Needed]
791
+
792
+ ### Out-of-Scope Use
793
+
794
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
795
+
796
+ [More Information Needed]
797
+
798
+ ## Bias, Risks, and Limitations
799
+
800
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
801
+
802
+ [More Information Needed]
803
+
804
+ ### Recommendations
805
+
806
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
807
+
808
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
809
+
810
+ ## How to Get Started with the Model
811
+
812
+ Use the code below to get started with the model.
813
+
814
+ [More Information Needed]
815
+
816
+ ## Training Details
817
+
818
+ ### Training Data
819
+
820
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
821
+
822
+ [More Information Needed]
823
+
824
+ ### Training Procedure
825
+
826
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
827
+
828
+ #### Preprocessing [optional]
829
+
830
+ [More Information Needed]
831
+
832
+
833
+ #### Training Hyperparameters
834
+
835
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
836
+
837
+ #### Speeds, Sizes, Times [optional]
838
+
839
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
840
+
841
+ [More Information Needed]
842
+
843
+ ## Evaluation
844
+
845
+ <!-- This section describes the evaluation protocols and provides the results. -->
846
+
847
+ ### Testing Data, Factors & Metrics
848
+
849
+ #### Testing Data
850
+
851
+ <!-- This should link to a Dataset Card if possible. -->
852
+
853
+ [More Information Needed]
854
+
855
+ #### Factors
856
+
857
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
858
+
859
+ [More Information Needed]
860
+
861
+ #### Metrics
862
+
863
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
864
+
865
+ [More Information Needed]
866
+
867
+ ### Results
868
+
869
+ [More Information Needed]
870
+
871
+ #### Summary
872
+
873
+
874
+
875
+ ## Model Examination [optional]
876
+
877
+ <!-- Relevant interpretability work for the model goes here -->
878
+
879
+ [More Information Needed]
880
+
881
+ ## Environmental Impact
882
+
883
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
884
+
885
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
886
+
887
+ - **Hardware Type:** [More Information Needed]
888
+ - **Hours used:** [More Information Needed]
889
+ - **Cloud Provider:** [More Information Needed]
890
+ - **Compute Region:** [More Information Needed]
891
+ - **Carbon Emitted:** [More Information Needed]
892
+
893
+ ## Technical Specifications [optional]
894
+
895
+ ### Model Architecture and Objective
896
+
897
+ [More Information Needed]
898
+
899
+ ### Compute Infrastructure
900
+
901
+ [More Information Needed]
902
+
903
+ #### Hardware
904
+
905
+ [More Information Needed]
906
+
907
+ #### Software
908
+
909
+ [More Information Needed]
910
+
911
+ ## Citation [optional]
912
+
913
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
914
+
915
+ **BibTeX:**
916
+
917
+ [More Information Needed]
918
+
919
+ **APA:**
920
+
921
+ [More Information Needed]
922
+
923
+ ## Glossary [optional]
924
+
925
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
926
+
927
+ [More Information Needed]
928
+
929
+ ## More Information [optional]
930
+
931
+ [More Information Needed]
932
+
933
+ ## Model Card Authors [optional]
934
+
935
+ [More Information Needed]
936
+
937
+ ## Model Card Contact
938
+
939
+ [More Information Needed]","{""id"": ""Noufy/sms_url_phishing"", ""author"": ""Noufy"", ""sha"": ""56242af3dca16030843660d8b6c5ddaa97337a55"", ""last_modified"": ""2024-12-02 18:54:22+00:00"", ""created_at"": ""2024-12-02 18:22:20+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""espnet"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""espnet"", ""spam"", ""phishing"", ""anaylisis"", ""sms"", ""climate"", ""code"", ""text-classification"", ""en"", ""ar"", ""arxiv:1910.09700"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- en\n- ar\nlibrary_name: espnet\nlicense: apache-2.0\nmetrics:\n- bleu\n- accuracy\npipeline_tag: text-classification\ntags:\n- spam\n- phishing\n- anaylisis\n- sms\n- climate\n- code"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NavyBayes.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-02 18:54:22+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\nlanguage:\n- en\n- ar\nlibrary_name: espnet\nlicense: apache-2.0\nmetrics:\n- bleu\n- accuracy\npipeline_tag: text-classification\ntags:\n- spam\n- phishing\n- anaylisis\n- sms\n- climate\n- code"", ""transformersInfo"": null, ""_id"": ""674dfadc15c6cf5d0061d8f3"", ""modelId"": ""Noufy/sms_url_phishing"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/139,1
940
+ Test11223344556677889900/Test-AI,"---
941
+ license: afl-3.0
942
+ datasets:
943
+ - microsoft/orca-agentinstruct-1M-v1
944
+ language:
945
+ - en
946
+ metrics:
947
+ - accuracy
948
+ - code_eval
949
+ base_model:
950
+ - microsoft/OmniParser
951
+ new_version: microsoft/OmniParser
952
+ library_name: fasttext
953
+ ---","{""id"": ""Test11223344556677889900/Test-AI"", ""author"": ""Test11223344556677889900"", ""sha"": ""32c2fec71b77e96093782a1b2520b0f4ac7780e4"", ""last_modified"": ""2024-12-04 05:37:23+00:00"", ""created_at"": ""2024-12-04 05:34:33+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fasttext"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fasttext"", ""en"", ""dataset:microsoft/orca-agentinstruct-1M-v1"", ""base_model:microsoft/OmniParser"", ""base_model:finetune:microsoft/OmniParser"", ""license:afl-3.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- microsoft/orca-agentinstruct-1M-v1\nlanguage:\n- en\nlibrary_name: fasttext\nlicense: afl-3.0\nmetrics:\n- accuracy\n- code_eval\nnew_version: microsoft/OmniParser"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-04 05:37:23+00:00"", ""cardData"": ""base_model:\n- microsoft/OmniParser\ndatasets:\n- microsoft/orca-agentinstruct-1M-v1\nlanguage:\n- en\nlibrary_name: fasttext\nlicense: afl-3.0\nmetrics:\n- accuracy\n- code_eval\nnew_version: microsoft/OmniParser"", ""transformersInfo"": null, ""_id"": ""674fe9e953b0d0d0f01a8170"", ""modelId"": ""Test11223344556677889900/Test-AI"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Test11223344556677889900/Test-AI&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTest11223344556677889900%2FTest-AI%5D(%2FTest11223344556677889900%2FTest-AI)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
OpenCoder-8B-Instruct_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ infly/OpenCoder-8B-Instruct,"---
3
+ license: other
4
+ license_name: inf
5
+ license_link: https://huggingface.co/infly/OpenCoder-8B-Instruct/blob/main/LICENSE
6
+ language:
7
+ - en
8
+ - zh
9
+ base_model:
10
+ - infly/OpenCoder-8B-Base
11
+ pipeline_tag: text-generation
12
+ library_name: transformers
13
+ datasets:
14
+ - OpenCoder-LLM/opencoder-sft-stage1
15
+ - OpenCoder-LLM/opencoder-sft-stage2
16
+ ---
17
+
18
+
19
+
20
+ <div align=""center"">
21
+ <img src=""https://github.com/OpenCoder-llm/opencoder-llm.github.io/blob/main/static/images/opencoder_icon.jpg?raw=true"" width=""50%"" alt=""OpenCoder-Icon"" />
22
+ </div>
23
+
24
+
25
+
26
+ <p align=""center"">
27
+ <!-- <a href=""https://arxiv.org/pdf/2411.04905""><b>Paper Link</b>👁️</a> -->
28
+ 🏠 <a href=""https://opencoder-llm.github.io/"">Home Page</a>&nbsp&nbsp |
29
+ &nbsp&nbsp 🤗 <a href=""https://huggingface.co/collections/infly/opencoder-672cec44bbb86c39910fb55e"">Model</a>&nbsp&nbsp |
30
+ &nbsp&nbsp 📊 <a href=""https://huggingface.co/collections/OpenCoder-LLM/opencoder-datasets-672e6db6a0fed24bd69ef1c2"">Dataset</a>&nbsp&nbsp |
31
+ &nbsp&nbsp 📄<a href=""https://arxiv.org/abs/2411.04905"">Paper</a>&nbsp&nbsp |
32
+ &nbsp&nbsp 🚀<a href=""https://huggingface.co/spaces/OpenCoder-LLM/OpenCoder-8B-Instruct"">Demo</a>&nbsp&nbsp
33
+ </p>
34
+
35
+
36
+ ## 1. Introduction
37
+
38
+ **OpenCoder** is an open and reproducible code LLM family which includes 1.5B and 8B base and chat models, supporting both English and Chinese languages. Starting from scratch, OpenCoder is pretrained on 2.5 trillion tokens composed of 90% raw code and 10% code-related web data, and supervised finetuned on over 4.5M high-quality SFT examples, finally reaching the performance of top-tier code LLMs. We provide not only model weights and inference code, but also the reproducible training data, the complete data processing pipeline, rigorous experimental ablation results, and detailed training protocols. Empowering researchers to build and innovate, OpenCoder is your open foundation for advancing code AI.
39
+
40
+ - **Complete Open Source**: OpenCoder ensures full transparency by releasing not only the model weights and forthcoming inference code but also the complete data-cleaning code for training. This release includes high-quality synthetic data, an extensive set of checkpoints, and a dataset of over 4.5 million supervised fine-tuning (SFT) entries, making OpenCoder one of the most comprehensively open-sourced models available.
41
+ - **Comprehensive Experimental Analysis**: OpenCoder is rigorously tested through extensive ablation studies on various data-cleaning strategies and training processes, including file-level and repository-level deduplication experiments, ensuring thorough exploration and validation of the model’s performance.
42
+ - **High-Quality Synthetic Data**: OpenCoder provides a fully developed synthetic data generation process and over 4.5 million SFT data entries, establishing a robust data foundation for model training and evaluation.
43
+ - **Exceptional Performance**: OpenCoder achieves high performance across multiple language model benchmarks, positioning it among the leading open-source models for code.
44
+
45
+
46
+ ## 2. Models
47
+
48
+ | Model | Sequence Length | Download |
49
+ |:---------------------:|:---------------:|:-----------------------------------------------------------------------:|
50
+ | OpenCoder-1.5B-Base | 4K | 🤗 [HuggingFace](https://huggingface.co/infly/OpenCoder-1.5B-Base) |
51
+ | OpenCoder-8B-Base | 8K | 🤗 [HuggingFace](https://huggingface.co/infly/OpenCoder-8B-Base) |
52
+ | OpenCoder-1.5B-Instruct | 4K | 🤗 [HuggingFace](https://huggingface.co/infly/OpenCoder-1.5B-Instruct) |
53
+ | OpenCoder-8B-Instruct | 8K | 🤗 [HuggingFace](https://huggingface.co/infly/OpenCoder-8B-Instruct) |
54
+
55
+ ## 3. Datasets
56
+
57
+ ### Pre-training
58
+
59
+ | Dataset | Size | Download |
60
+ |:---------------------:|:---------------:|:-----------------------------------------------------------------------:|
61
+ | fineweb-code-corpus | 148 GB | 🤗 [HuggingFace](https://huggingface.co/datasets/OpenCoder-LLM/fineweb-code-corpus) |
62
+ | fineweb-math-corpus | 10 GB | 🤗 [HuggingFace](https://huggingface.co/datasets/OpenCoder-LLM/fineweb-math-corpus) |
63
+
64
+
65
+ ### Post-training
66
+
67
+ | Dataset | Num | Download |
68
+ |:---------------------:|:---------------:|:-----------------------------------------------------------------------:|
69
+ | opencoder-sft-stage1 | 4.21 M | 🤗 [HuggingFace](https://huggingface.co/datasets/OpenCoder-LLM/opencoder-sft-stage1) |
70
+ | opencoder-sft-stage2 | 375 K | 🤗 [HuggingFace](https://huggingface.co/datasets/OpenCoder-LLM/opencoder-sft-stage2) |
71
+
72
+ **This is not the end; we are organizing the remaining data and uploading it progressively.**
73
+
74
+
75
+ ## 4. Benchmarks
76
+
77
+ **Note:** For the detailed evaluation results, please refer to [our paper](https://arxiv.org/pdf/2411.04905).
78
+
79
+ <!-- ### Base Model -->
80
+ <!-- | model | OpenCoder-1.5B-Base | OpenCoder-8B-Base |
81
+ |:---------------:|:-------------:|:------------:|
82
+ | HumanEval(+) | 54.3 (49.4) | 66.5 (63.4) |
83
+ | MBPP(+) | 70.6 (58.7) | 79.9 (70.4) |
84
+ | BigCodeBench | 24.5 | 40.5 |
85
+ | BigCodeBench-Hard | 5.4 | 9.5 | -->
86
+
87
+
88
+ <!-- ### Chat Model -->
89
+ | model | OpenCoder-1.5B-Instruct | OpenCoder-8B-Instruct |
90
+ |:---------------:|:-------------:|:------------:|
91
+ | HumanEval(+) | 72.5 (67.7) | 83.5 (78.7) |
92
+ | MBPP(+) | 72.7 (61.9) | 79.1 (69.0) |
93
+ | BigCodeBench | 33.3 | 40.3 |
94
+ | BigCodeBench-Hard | 11.5 | 16.9 |
95
+ | LiveCodeBench | 12.8 | 23.2 |
96
+ | MultiPL-E (AVG) | 57.5 | 71.0 |
97
+
98
+
99
+ ## 5. Inference
100
+
101
+ ### Inference with Huggingface's Transformers
102
+
103
+ ```python
104
+ import torch
105
+ from transformers import AutoTokenizer, AutoModelForCausalLM
106
+
107
+ model_name = ""infly/OpenCoder-8B-Instruct""
108
+ model = AutoModelForCausalLM.from_pretrained(model_name,
109
+ torch_dtype=torch.bfloat16,
110
+ device_map=""auto"",
111
+ trust_remote_code=True)
112
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
113
+
114
+ messages=[
115
+ { 'role': 'user', 'content': ""write a quick sort algorithm in python.""}
116
+ ]
117
+
118
+ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"")
119
+
120
+ outputs = model.generate(inputs, max_new_tokens=512, do_sample=False)
121
+
122
+ result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
123
+ print(result)
124
+ ```
125
+
126
+ <!-- ### Inference with vLLM (recommended) -->
127
+
128
+ ## 6. License
129
+
130
+ OpenCoder series (including Base and Chat) support commercial applications under a permissive [License](https://huggingface.co/infly/OpenCoder-8B-Instruct/blob/main/LICENSE).
131
+
132
+ ## 7. Citation
133
+ ```
134
+ @inproceedings{Huang2024OpenCoderTO,
135
+ title={OpenCoder: The Open Cookbook for Top-Tier Code Large Language Models},
136
+ author={Siming Huang and Tianhao Cheng and Jason Klein Liu and Jiaran Hao and Liuyihan Song and Yang Xu and J. Yang and J. H. Liu and Chenchen Zhang and Linzheng Chai and Ruifeng Yuan and Zhaoxiang Zhang and Jie Fu and Qian Liu and Ge Zhang and Zili Wang and Yuan Qi and Yinghui Xu and Wei Chu},
137
+ year={2024},
138
+ url={https://arxiv.org/pdf/2411.04905}
139
+ }
140
+ ```","{""id"": ""infly/OpenCoder-8B-Instruct"", ""author"": ""infly"", ""sha"": ""01badbbf10c2dfd7e2a0b5f570065ef44548576c"", ""last_modified"": ""2024-11-14 05:52:08+00:00"", ""created_at"": ""2024-11-07 16:23:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 658447, ""downloads_all_time"": null, ""likes"": 189, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""conversational"", ""en"", ""zh"", ""dataset:OpenCoder-LLM/opencoder-sft-stage1"", ""dataset:OpenCoder-LLM/opencoder-sft-stage2"", ""arxiv:2411.04905"", ""base_model:infly/OpenCoder-8B-Base"", ""base_model:finetune:infly/OpenCoder-8B-Base"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- infly/OpenCoder-8B-Base\ndatasets:\n- OpenCoder-LLM/opencoder-sft-stage1\n- OpenCoder-LLM/opencoder-sft-stage2\nlanguage:\n- en\n- zh\nlibrary_name: transformers\nlicense: other\nlicense_name: inf\nlicense_link: https://huggingface.co/infly/OpenCoder-8B-Instruct/blob/main/LICENSE\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|im_start|>"", ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are OpenCoder, created by OpenCoder Team.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_inflm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""OpenCoder-LLM/OpenCoder-8B-Instruct"", ""HPAI-BSC/TuRTLe-Leaderboard"", ""Zenithwang/infly-OpenCoder-8B-Instruct"", ""plusevplay/infly-OpenCoder-8B-Instruct"", ""Mischa88/infly-OpenCoder-8B-Instruct"", ""kkasemos/experiments"", ""argakiig/infly-OpenCoder-8B-Instruct"", ""cybtek/infly-OpenCoder-8B-Instruct"", ""Olppara/infly-OpenCoder-8B-Instruct"", ""MegaTronX/OpenCoder""], ""safetensors"": {""parameters"": {""BF16"": 7771262976}, ""total"": 7771262976}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-14 05:52:08+00:00"", ""cardData"": ""base_model:\n- infly/OpenCoder-8B-Base\ndatasets:\n- OpenCoder-LLM/opencoder-sft-stage1\n- OpenCoder-LLM/opencoder-sft-stage2\nlanguage:\n- en\n- zh\nlibrary_name: transformers\nlicense: other\nlicense_name: inf\nlicense_link: https://huggingface.co/infly/OpenCoder-8B-Instruct/blob/main/LICENSE\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""672ce9724e48ac74eeaf9121"", ""modelId"": ""infly/OpenCoder-8B-Instruct"", ""usedStorage"": 15544095541}",0,"https://huggingface.co/huihui-ai/OpenCoder-8B-Instruct-abliterated, https://huggingface.co/AIFunOver/OpenCoder-8B-Instruct-openvino-fp16",2,,0,"https://huggingface.co/mradermacher/OpenCoder-8B-Instruct-GGUF, https://huggingface.co/AIFunOver/OpenCoder-8B-Instruct-openvino-4bit, https://huggingface.co/KnutJaegersberg/OpenCoder-8B-Instruct-Q8_0-GGUF, https://huggingface.co/NikolayKozloff/OpenCoder-8B-Instruct-Q8_0-GGUF, https://huggingface.co/NikolayKozloff/OpenCoder-8B-Instruct-Q6_K-GGUF, https://huggingface.co/NikolayKozloff/OpenCoder-8B-Instruct-Q5_K_M-GGUF, https://huggingface.co/NikolayKozloff/OpenCoder-8B-Instruct-Q5_K_S-GGUF, https://huggingface.co/mradermacher/OpenCoder-8B-Instruct-i1-GGUF, https://huggingface.co/PrunaAI/infly-OpenCoder-8B-Instruct-bnb-smashed, https://huggingface.co/nautilie7/OpenCoder-8B-Instruct-Q8_0-GGUF, https://huggingface.co/DevQuasar/infly.OpenCoder-8B-Instruct-GGUF, https://huggingface.co/gardner/OpenCoder-8B-Instruct-Q4_K_M-GGUF, https://huggingface.co/bartowski/OpenCoder-8B-Instruct-GGUF, https://huggingface.co/lmstudio-community/OpenCoder-8B-Instruct-GGUF, https://huggingface.co/Orion-zhen/OpenCoder-8B-Instruct-AWQ, https://huggingface.co/AIFunOver/OpenCoder-8B-Instruct-openvino-8bit, https://huggingface.co/BitSnake88/OpenCoder-8B-Instruct-Q4_K_M-GGUF, https://huggingface.co/Aashraf995/OpenCoder-8B-Instruct-Q4_K_M-GGUF, https://huggingface.co/Aashraf995/OpenCoder-8B-Instruct-Q8_0-GGUF, https://huggingface.co/performanceoptician/OpenCoder-8B-Instruct-IQ3_XXS-GGUF, https://huggingface.co/tensorblock/OpenCoder-8B-Instruct-GGUF, https://huggingface.co/itlwas/OpenCoder-8B-Instruct-Q4_K_M-GGUF, https://huggingface.co/onekq-ai/OpenCoder-8B-Instruct-bnb-4bit",23,,0,"HPAI-BSC/TuRTLe-Leaderboard, MegaTronX/OpenCoder, Mischa88/infly-OpenCoder-8B-Instruct, Olppara/infly-OpenCoder-8B-Instruct, OpenCoder-LLM/OpenCoder-8B-Instruct, Zenithwang/infly-OpenCoder-8B-Instruct, argakiig/infly-OpenCoder-8B-Instruct, cybtek/infly-OpenCoder-8B-Instruct, huggingface/InferenceSupport/discussions/new?title=infly/OpenCoder-8B-Instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Binfly%2FOpenCoder-8B-Instruct%5D(%2Finfly%2FOpenCoder-8B-Instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kkasemos/experiments, plusevplay/infly-OpenCoder-8B-Instruct",11
141
+ huihui-ai/OpenCoder-8B-Instruct-abliterated,"---
142
+ license: other
143
+ license_name: inf
144
+ license_link: >-
145
+ https://huggingface.co/huihui-ai/OpenCoder-8B-Instruct-abliterated/blob/main/LICENSE
146
+ language:
147
+ - en
148
+ - zh
149
+ base_model:
150
+ - infly/OpenCoder-8B-Instruct
151
+ pipeline_tag: text-generation
152
+ library_name: transformers
153
+ datasets:
154
+ - OpenCoder-LLM/opencoder-sft-stage1
155
+ - OpenCoder-LLM/opencoder-sft-stage2
156
+ tags:
157
+ - abliterated
158
+ - uncensored
159
+ ---
160
+
161
+ # huihui-ai/OpenCoder-8B-Instruct-abliterated
162
+
163
+ This is an uncensored version of [infly/OpenCoder-8B-Instruct](https://huggingface.co/infly/OpenCoder-8B-Instruct) created with abliteration (see [remove-refusals-with-transformers](https://github.com/Sumandora/remove-refusals-with-transformers) to know more about it).
164
+
165
+ If the desired result is not achieved, you can clear the conversation and try again.
166
+
167
+ ### Inference with Huggingface's Transformers
168
+
169
+ ```python
170
+ import torch
171
+ from transformers import AutoTokenizer, AutoModelForCausalLM
172
+
173
+ model_name = ""infly/OpenCoder-8B-Instruct-abliterated""
174
+ model = AutoModelForCausalLM.from_pretrained(model_name,
175
+ torch_dtype=torch.bfloat16,
176
+ device_map=""auto"",
177
+ trust_remote_code=True)
178
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
179
+
180
+ messages=[
181
+ { 'role': 'user', 'content': ""write a quick sort algorithm in python.""}
182
+ ]
183
+
184
+ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"")
185
+
186
+ outputs = model.generate(inputs, max_new_tokens=512, do_sample=False)
187
+
188
+ result = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
189
+ print(result)
190
+ ```","{""id"": ""huihui-ai/OpenCoder-8B-Instruct-abliterated"", ""author"": ""huihui-ai"", ""sha"": ""c3f8b9018ec0091b787cd81d426c7ed4e7e08065"", ""last_modified"": ""2024-11-11 11:46:30+00:00"", ""created_at"": ""2024-11-11 11:11:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""abliterated"", ""uncensored"", ""conversational"", ""en"", ""zh"", ""dataset:OpenCoder-LLM/opencoder-sft-stage1"", ""dataset:OpenCoder-LLM/opencoder-sft-stage2"", ""base_model:infly/OpenCoder-8B-Instruct"", ""base_model:finetune:infly/OpenCoder-8B-Instruct"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- infly/OpenCoder-8B-Instruct\ndatasets:\n- OpenCoder-LLM/opencoder-sft-stage1\n- OpenCoder-LLM/opencoder-sft-stage2\nlanguage:\n- en\n- zh\nlibrary_name: transformers\nlicense: other\nlicense_name: inf\nlicense_link: https://huggingface.co/huihui-ai/OpenCoder-8B-Instruct-abliterated/blob/main/LICENSE\npipeline_tag: text-generation\ntags:\n- abliterated\n- uncensored"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|im_start|>"", ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are OpenCoder, created by OpenCoder Team.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_inflm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7771262976}, ""total"": 7771262976}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-11 11:46:30+00:00"", ""cardData"": ""base_model:\n- infly/OpenCoder-8B-Instruct\ndatasets:\n- OpenCoder-LLM/opencoder-sft-stage1\n- OpenCoder-LLM/opencoder-sft-stage2\nlanguage:\n- en\n- zh\nlibrary_name: transformers\nlicense: other\nlicense_name: inf\nlicense_link: https://huggingface.co/huihui-ai/OpenCoder-8B-Instruct-abliterated/blob/main/LICENSE\npipeline_tag: text-generation\ntags:\n- abliterated\n- uncensored"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6731e6564aebf502fa6fe091"", ""modelId"": ""huihui-ai/OpenCoder-8B-Instruct-abliterated"", ""usedStorage"": 15544095541}",1,,0,,0,"https://huggingface.co/Aashraf995/OpenCoder-8B-Instruct-abliterated-Q4_K_M-GGUF, https://huggingface.co/mradermacher/OpenCoder-8B-Instruct-abliterated-GGUF, https://huggingface.co/mradermacher/OpenCoder-8B-Instruct-abliterated-i1-GGUF",3,,0,huggingface/InferenceSupport/discussions/new?title=huihui-ai/OpenCoder-8B-Instruct-abliterated&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhuihui-ai%2FOpenCoder-8B-Instruct-abliterated%5D(%2Fhuihui-ai%2FOpenCoder-8B-Instruct-abliterated)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
191
+ AIFunOver/OpenCoder-8B-Instruct-openvino-fp16,"---
192
+ base_model: infly/OpenCoder-8B-Instruct
193
+ datasets:
194
+ - OpenCoder-LLM/opencoder-sft-stage1
195
+ - OpenCoder-LLM/opencoder-sft-stage2
196
+ language:
197
+ - en
198
+ - zh
199
+ library_name: transformers
200
+ license: other
201
+ license_name: inf
202
+ license_link: https://huggingface.co/infly/OpenCoder-8B-Instruct/blob/main/LICENSE
203
+ pipeline_tag: text-generation
204
+ tags:
205
+ - openvino
206
+ - nncf
207
+ - fp16
208
+ ---
209
+
210
+ This model is a quantized version of [`infly/OpenCoder-8B-Instruct`](https://huggingface.co/infly/OpenCoder-8B-Instruct) and is converted to the OpenVINO format. This model was obtained via the [nncf-quantization](https://huggingface.co/spaces/echarlaix/nncf-quantization) space with [optimum-intel](https://github.com/huggingface/optimum-intel).
211
+ First make sure you have `optimum-intel` installed:
212
+ ```bash
213
+ pip install optimum[openvino]
214
+ ```
215
+ To load your model you can do as follows:
216
+ ```python
217
+ from optimum.intel import OVModelForCausalLM
218
+ model_id = ""AIFunOver/OpenCoder-8B-Instruct-openvino-fp16""
219
+ model = OVModelForCausalLM.from_pretrained(model_id)
220
+ ```
221
+ ","{""id"": ""AIFunOver/OpenCoder-8B-Instruct-openvino-fp16"", ""author"": ""AIFunOver"", ""sha"": ""f79dfb43600ddcb3ed456eb6b750dacddb5c0ce7"", ""last_modified"": ""2024-11-12 07:31:04+00:00"", ""created_at"": ""2024-11-12 06:42:39+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""openvino"", ""llama"", ""text-generation"", ""nncf"", ""fp16"", ""conversational"", ""en"", ""zh"", ""dataset:OpenCoder-LLM/opencoder-sft-stage1"", ""dataset:OpenCoder-LLM/opencoder-sft-stage2"", ""base_model:infly/OpenCoder-8B-Instruct"", ""base_model:finetune:infly/OpenCoder-8B-Instruct"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: infly/OpenCoder-8B-Instruct\ndatasets:\n- OpenCoder-LLM/opencoder-sft-stage1\n- OpenCoder-LLM/opencoder-sft-stage2\nlanguage:\n- en\n- zh\nlibrary_name: transformers\nlicense: other\nlicense_name: inf\nlicense_link: https://huggingface.co/infly/OpenCoder-8B-Instruct/blob/main/LICENSE\npipeline_tag: text-generation\ntags:\n- openvino\n- nncf\n- fp16"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|im_start|>"", ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are OpenCoder, created by OpenCoder Team.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_detokenizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_detokenizer.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_tokenizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_tokenizer.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-12 07:31:04+00:00"", ""cardData"": ""base_model: infly/OpenCoder-8B-Instruct\ndatasets:\n- OpenCoder-LLM/opencoder-sft-stage1\n- OpenCoder-LLM/opencoder-sft-stage2\nlanguage:\n- en\n- zh\nlibrary_name: transformers\nlicense: other\nlicense_name: inf\nlicense_link: https://huggingface.co/infly/OpenCoder-8B-Instruct/blob/main/LICENSE\npipeline_tag: text-generation\ntags:\n- openvino\n- nncf\n- fp16"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6732f8df78e78fe899943f1d"", ""modelId"": ""AIFunOver/OpenCoder-8B-Instruct-openvino-fp16"", ""usedStorage"": 15546133445}",1,,0,,0,,0,,0,"echarlaix/nncf-quantization, huggingface/InferenceSupport/discussions/new?title=AIFunOver/OpenCoder-8B-Instruct-openvino-fp16&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BAIFunOver%2FOpenCoder-8B-Instruct-openvino-fp16%5D(%2FAIFunOver%2FOpenCoder-8B-Instruct-openvino-fp16)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
OpenELM-3B-Instruct_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
@@ -0,0 +1,798 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ apple/OpenELM-3B-Instruct,"---
3
+ license: apple-amlr
4
+ license_name: apple-sample-code-license
5
+ license_link: LICENSE
6
+ ---
7
+
8
+ # OpenELM
9
+
10
+ *Sachin Mehta, Mohammad Hossein Sekhavat, Qingqing Cao, Maxwell Horton, Yanzi Jin, Chenfan Sun, Iman Mirzadeh, Mahyar Najibi, Dmitry Belenko, Peter Zatloukal, Mohammad Rastegari*
11
+
12
+ We introduce **OpenELM**, a family of **Open** **E**fficient **L**anguage **M**odels. OpenELM uses a layer-wise scaling strategy to efficiently allocate parameters within each layer of the transformer model, leading to enhanced accuracy. We pretrained OpenELM models using the [CoreNet](https://github.com/apple/corenet) library. We release both pretrained and instruction tuned models with 270M, 450M, 1.1B and 3B parameters. We release the complete framework, encompassing data preparation, training, fine-tuning, and evaluation procedures, alongside multiple pre-trained checkpoints and training logs, to facilitate open research.
13
+
14
+ Our pre-training dataset contains RefinedWeb, deduplicated PILE, a subset of RedPajama, and a subset of Dolma v1.6, totaling approximately 1.8 trillion tokens. Please check license agreements and terms of these datasets before using them.
15
+
16
+
17
+
18
+ ## Usage
19
+
20
+ We have provided an example function to generate output from OpenELM models loaded via [HuggingFace Hub](https://huggingface.co/docs/hub/) in `generate_openelm.py`.
21
+
22
+ You can try the model by running the following command:
23
+ ```
24
+ python generate_openelm.py --model apple/OpenELM-3B-Instruct --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2
25
+ ```
26
+ Please refer to [this link](https://huggingface.co/docs/hub/security-tokens) to obtain your hugging face access token.
27
+
28
+ Additional arguments to the hugging face generate function can be passed via `generate_kwargs`. As an example, to speedup the inference, you can try [lookup token speculative generation](https://huggingface.co/docs/transformers/generation_strategies) by passing the `prompt_lookup_num_tokens` argument as follows:
29
+ ```
30
+ python generate_openelm.py --model apple/OpenELM-3B-Instruct --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2 prompt_lookup_num_tokens=10
31
+ ```
32
+ Alternatively, try model-wise speculative generation with an [assistive model](https://huggingface.co/blog/assisted-generation) by passing a smaller model through the `assistant_model` argument, for example:
33
+ ```
34
+ python generate_openelm.py --model apple/OpenELM-3B-Instruct --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2 --assistant_model [SMALLER_MODEL]
35
+ ```
36
+
37
+ ## Main Results
38
+
39
+ ### Zero-Shot
40
+
41
+ | **Model Size** | **ARC-c** | **ARC-e** | **BoolQ** | **HellaSwag** | **PIQA** | **SciQ** | **WinoGrande** | **Average** |
42
+ |-----------------------------------------------------------------------------|-----------|-----------|-----------|---------------|-----------|-----------|----------------|-------------|
43
+ | [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 26.45 | 45.08 | **53.98** | 46.71 | 69.75 | **84.70** | **53.91** | 54.37 |
44
+ | [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **30.55** | **46.68** | 48.56 | **52.07** | **70.78** | 84.40 | 52.72 | **55.11** |
45
+ | [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 27.56 | 48.06 | 55.78 | 53.97 | 72.31 | 87.20 | 58.01 | 57.56 |
46
+ | [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **30.38** | **50.00** | **60.37** | **59.34** | **72.63** | **88.00** | **58.96** | **59.95** |
47
+ | [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 32.34 | **55.43** | 63.58 | 64.81 | **75.57** | **90.60** | 61.72 | 63.44 |
48
+ | [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **37.97** | 52.23 | **70.00** | **71.20** | 75.03 | 89.30 | **62.75** | **65.50** |
49
+ | [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 35.58 | 59.89 | 67.40 | 72.44 | 78.24 | **92.70** | 65.51 | 67.39 |
50
+ | [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **39.42** | **61.74** | **68.17** | **76.36** | **79.00** | 92.50 | **66.85** | **69.15** |
51
+
52
+ ### LLM360
53
+
54
+ | **Model Size** | **ARC-c** | **HellaSwag** | **MMLU** | **TruthfulQA** | **WinoGrande** | **Average** |
55
+ |-----------------------------------------------------------------------------|-----------|---------------|-----------|----------------|----------------|-------------|
56
+ | [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 27.65 | 47.15 | 25.72 | **39.24** | **53.83** | 38.72 |
57
+ | [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **32.51** | **51.58** | **26.70** | 38.72 | 53.20 | **40.54** |
58
+ | [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 30.20 | 53.86 | **26.01** | 40.18 | 57.22 | 41.50 |
59
+ | [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **33.53** | **59.31** | 25.41 | **40.48** | **58.33** | **43.41** |
60
+ | [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 36.69 | 65.71 | **27.05** | 36.98 | 63.22 | 45.93 |
61
+ | [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **41.55** | **71.83** | 25.65 | **45.95** | **64.72** | **49.94** |
62
+ | [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 42.24 | 73.28 | **26.76** | 34.98 | 67.25 | 48.90 |
63
+ | [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **47.70** | **76.87** | 24.80 | **38.76** | **67.96** | **51.22** |
64
+
65
+
66
+ ### OpenLLM Leaderboard
67
+
68
+ | **Model Size** | **ARC-c** | **CrowS-Pairs** | **HellaSwag** | **MMLU** | **PIQA** | **RACE** | **TruthfulQA** | **WinoGrande** | **Average** |
69
+ |-----------------------------------------------------------------------------|-----------|-----------------|---------------|-----------|-----------|-----------|----------------|----------------|-------------|
70
+ | [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 27.65 | **66.79** | 47.15 | 25.72 | 69.75 | 30.91 | **39.24** | **53.83** | 45.13 |
71
+ | [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **32.51** | 66.01 | **51.58** | **26.70** | **70.78** | 33.78 | 38.72 | 53.20 | **46.66** |
72
+ | [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 30.20 | **68.63** | 53.86 | **26.01** | 72.31 | 33.11 | 40.18 | 57.22 | 47.69 |
73
+ | [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **33.53** | 67.44 | **59.31** | 25.41 | **72.63** | **36.84** | **40.48** | **58.33** | **49.25** |
74
+ | [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 36.69 | **71.74** | 65.71 | **27.05** | **75.57** | 36.46 | 36.98 | 63.22 | 51.68 |
75
+ | [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **41.55** | 71.02 | **71.83** | 25.65 | 75.03 | **39.43** | **45.95** | **64.72** | **54.40** |
76
+ | [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 42.24 | **73.29** | 73.28 | **26.76** | 78.24 | **38.76** | 34.98 | 67.25 | 54.35 |
77
+ | [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **47.70** | 72.33 | **76.87** | 24.80 | **79.00** | 38.47 | **38.76** | **67.96** | **55.73** |
78
+
79
+ See the technical report for more results and comparison.
80
+
81
+ ## Evaluation
82
+
83
+ ### Setup
84
+
85
+ Install the following dependencies:
86
+
87
+ ```bash
88
+
89
+ # install public lm-eval-harness
90
+
91
+ harness_repo=""public-lm-eval-harness""
92
+ git clone https://github.com/EleutherAI/lm-evaluation-harness ${harness_repo}
93
+ cd ${harness_repo}
94
+ # use main branch on 03-15-2024, SHA is dc90fec
95
+ git checkout dc90fec
96
+ pip install -e .
97
+ cd ..
98
+
99
+ # 66d6242 is the main branch on 2024-04-01
100
+ pip install datasets@git+https://github.com/huggingface/datasets.git@66d6242
101
+ pip install tokenizers>=0.15.2 transformers>=4.38.2 sentencepiece>=0.2.0
102
+
103
+ ```
104
+
105
+ ### Evaluate OpenELM
106
+
107
+ ```bash
108
+
109
+ # OpenELM-3B-Instruct
110
+ hf_model=apple/OpenELM-3B-Instruct
111
+
112
+ # this flag is needed because lm-eval-harness set add_bos_token to False by default, but OpenELM uses LLaMA tokenizer which requires add_bos_token to be True
113
+ tokenizer=meta-llama/Llama-2-7b-hf
114
+ add_bos_token=True
115
+ batch_size=1
116
+
117
+ mkdir lm_eval_output
118
+
119
+ shot=0
120
+ task=arc_challenge,arc_easy,boolq,hellaswag,piqa,race,winogrande,sciq,truthfulqa_mc2
121
+ lm_eval --model hf \
122
+ --model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \
123
+ --tasks ${task} \
124
+ --device cuda:0 \
125
+ --num_fewshot ${shot} \
126
+ --output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \
127
+ --batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log
128
+
129
+ shot=5
130
+ task=mmlu,winogrande
131
+ lm_eval --model hf \
132
+ --model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \
133
+ --tasks ${task} \
134
+ --device cuda:0 \
135
+ --num_fewshot ${shot} \
136
+ --output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \
137
+ --batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log
138
+
139
+ shot=25
140
+ task=arc_challenge,crows_pairs_english
141
+ lm_eval --model hf \
142
+ --model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \
143
+ --tasks ${task} \
144
+ --device cuda:0 \
145
+ --num_fewshot ${shot} \
146
+ --output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \
147
+ --batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log
148
+
149
+ shot=10
150
+ task=hellaswag
151
+ lm_eval --model hf \
152
+ --model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \
153
+ --tasks ${task} \
154
+ --device cuda:0 \
155
+ --num_fewshot ${shot} \
156
+ --output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \
157
+ --batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log
158
+
159
+ ```
160
+
161
+
162
+ ## Bias, Risks, and Limitations
163
+
164
+ The release of OpenELM models aims to empower and enrich the open research community by providing access to state-of-the-art language models. Trained on publicly available datasets, these models are made available without any safety guarantees. Consequently, there exists the possibility of these models producing outputs that are inaccurate, harmful, biased, or objectionable in response to user prompts. Thus, it is imperative for users and developers to undertake thorough safety testing and implement appropriate filtering mechanisms tailored to their specific requirements.
165
+
166
+ ## Citation
167
+
168
+ If you find our work useful, please cite:
169
+
170
+ ```BibTex
171
+ @article{mehtaOpenELMEfficientLanguage2024,
172
+ title = {{OpenELM}: {An} {Efficient} {Language} {Model} {Family} with {Open} {Training} and {Inference} {Framework}},
173
+ shorttitle = {{OpenELM}},
174
+ url = {https://arxiv.org/abs/2404.14619v1},
175
+ language = {en},
176
+ urldate = {2024-04-24},
177
+ journal = {arXiv.org},
178
+ author = {Mehta, Sachin and Sekhavat, Mohammad Hossein and Cao, Qingqing and Horton, Maxwell and Jin, Yanzi and Sun, Chenfan and Mirzadeh, Iman and Najibi, Mahyar and Belenko, Dmitry and Zatloukal, Peter and Rastegari, Mohammad},
179
+ month = apr,
180
+ year = {2024},
181
+ }
182
+
183
+ @inproceedings{mehta2022cvnets,
184
+ author = {Mehta, Sachin and Abdolhosseini, Farzad and Rastegari, Mohammad},
185
+ title = {CVNets: High Performance Library for Computer Vision},
186
+ year = {2022},
187
+ booktitle = {Proceedings of the 30th ACM International Conference on Multimedia},
188
+ series = {MM '22}
189
+ }
190
+ ```
191
+ ","{""id"": ""apple/OpenELM-3B-Instruct"", ""author"": ""apple"", ""sha"": ""8288250ae190b81ed7ad515ea661e880b9cde2ba"", ""last_modified"": ""2025-02-28 18:31:32+00:00"", ""created_at"": ""2024-04-12 21:52:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9066, ""downloads_all_time"": null, ""likes"": 332, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""openelm"", ""text-generation"", ""custom_code"", ""arxiv:2404.14619"", ""license:apple-amlr"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apple-amlr\nlicense_name: apple-sample-code-license\nlicense_link: LICENSE"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""OpenELMForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_openelm.OpenELMConfig"", ""AutoModelForCausalLM"": ""modeling_openelm.OpenELMForCausalLM""}, ""model_type"": ""openelm""}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_openelm.OpenELMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_openelm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generate_openelm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_openelm.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Norod78/OpenELM_3B_Demo"", ""cmjatom/OpenELM_3B_Demo"", ""KvrParaskevi/OpenELM_3B_Demo"", ""Jawachan/course-rag"", ""JERNGOC/Apple_elm_test""], ""safetensors"": {""parameters"": {""BF16"": 3036647424}, ""total"": 3036647424}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-28 18:31:32+00:00"", ""cardData"": ""license: apple-amlr\nlicense_name: apple-sample-code-license\nlicense_link: LICENSE"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_openelm.OpenELMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6619ad174988d1bb9a02e7bc"", ""modelId"": ""apple/OpenELM-3B-Instruct"", ""usedStorage"": 6073328184}",0,"https://huggingface.co/PrunaAI/apple-OpenELM-3B-Instruct-HQQ-1bit-smashed, https://huggingface.co/PrunaAI/apple-OpenELM-3B-Instruct-HQQ-2bit-smashed, https://huggingface.co/PrunaAI/apple-OpenELM-3B-Instruct-HQQ-4bit-smashed, https://huggingface.co/PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int2bit-smashed, https://huggingface.co/PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int4bit-smashed, https://huggingface.co/PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int8bit-smashed, https://huggingface.co/PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-float8bit-smashed",7,,0,"https://huggingface.co/waltervix/OpenELM-3B-Instruct-Q2_K-GGUF, https://huggingface.co/QuietImpostor/OpenELM-3B-Instruct-GGUFs, https://huggingface.co/waltervix/OpenELM-3B-Instruct-Q4_K_M-GGUF, https://huggingface.co/waltervix/OpenELM-3B-Instruct-Q8_0-GGUF, https://huggingface.co/spongeman/OpenELM-3B-Instruct-Q4_K_M-GGUF, https://huggingface.co/fernandoruiz/OpenELM-3B-Instruct-Q4_K_M-GGUF, https://huggingface.co/fernandoruiz/OpenELM-3B-Instruct-Q4_K_S-GGUF, https://huggingface.co/gibzyn/OpenELM-3B-Instruct-Q8_0-GGUF, https://huggingface.co/sdalemorrey/OpenELM-3B-Instruct-Q8_0-GGUF, https://huggingface.co/reach-vb/OpenELM-3B-Instruct-Q8_0-GGUF, https://huggingface.co/mradermacher/OpenELM-3B-Instruct-GGUF, https://huggingface.co/NikolayKozloff/OpenELM-3B-Instruct-Q8_0-GGUF, https://huggingface.co/ysn-rfd/OpenELM-3B-Instruct-Q8_0-GGUF, https://huggingface.co/ysn-rfd/OpenELM-3B-Instruct-Q4_0-GGUF, https://huggingface.co/ysn-rfd/OpenELM-3B-Instruct-Q2_K-GGUF, https://huggingface.co/ysn-rfd/OpenELM-3B-Instruct-GGUF",16,,0,"JERNGOC/Apple_elm_test, Jawachan/course-rag, KvrParaskevi/OpenELM_3B_Demo, Norod78/OpenELM_3B_Demo, cmjatom/OpenELM_3B_Demo, huggingface/InferenceSupport/discussions/new?title=apple/OpenELM-3B-Instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bapple%2FOpenELM-3B-Instruct%5D(%2Fapple%2FOpenELM-3B-Instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",6
192
+ PrunaAI/apple-OpenELM-3B-Instruct-HQQ-1bit-smashed,"---
193
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
194
+ base_model: apple/OpenELM-3B-Instruct
195
+ metrics:
196
+ - memory_disk
197
+ - memory_inference
198
+ - inference_latency
199
+ - inference_throughput
200
+ - inference_CO2_emissions
201
+ - inference_energy_consumption
202
+ tags:
203
+ - pruna-ai
204
+ ---
205
+ <!-- header start -->
206
+ <!-- 200823 -->
207
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
208
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
209
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
210
+ </a>
211
+ </div>
212
+ <!-- header end -->
213
+
214
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
215
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
216
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
217
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
218
+
219
+ # Simply make AI models cheaper, smaller, faster, and greener!
220
+
221
+ - Give a thumbs up if you like this model!
222
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
223
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
224
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
225
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
226
+
227
+ ## Results
228
+
229
+ ![image info](./plots.png)
230
+
231
+ **Frequently Asked Questions**
232
+ - ***How does the compression work?*** The model is compressed with hqq.
233
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
234
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
235
+ - ***What is the model format?*** We use safetensors.
236
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
237
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
238
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
239
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
240
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
241
+
242
+ ## Setup
243
+
244
+ You can run the smashed model with these steps:
245
+
246
+ 0. Check requirements from the original repo apple/OpenELM-3B-Instruct installed. In particular, check python, cuda, and transformers versions.
247
+ 1. Make sure that you have installed quantization related packages.
248
+ ```bash
249
+ pip install hqq
250
+ ```
251
+ 2. Load & run the model.
252
+ ```python
253
+ from transformers import AutoModelForCausalLM, AutoTokenizer
254
+ from hqq.engine.hf import HQQModelForCausalLM
255
+ from hqq.models.hf.base import AutoHQQHFModel
256
+
257
+ try:
258
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-1bit-smashed"", device_map='auto')
259
+ except:
260
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-1bit-smashed"")
261
+ tokenizer = AutoTokenizer.from_pretrained(""apple/OpenELM-3B-Instruct"")
262
+
263
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
264
+
265
+ outputs = model.generate(input_ids, max_new_tokens=216)
266
+ tokenizer.decode(outputs[0])
267
+ ```
268
+
269
+ ## Configurations
270
+
271
+ The configuration info are in `smash_config.json`.
272
+
273
+ ## Credits & License
274
+
275
+ The license of the smashed model follows the license of the original model. Please check the license of the original model apple/OpenELM-3B-Instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
276
+
277
+ ## Want to compress other models?
278
+
279
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
280
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-1bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""52a3b30e3b88b2200378250cf6ac35a7b831c4d3"", ""last_modified"": ""2024-07-12 20:21:32+00:00"", ""created_at"": ""2024-07-12 20:21:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""openelm"", ""text-generation"", ""pruna-ai"", ""custom_code"", ""base_model:apple/OpenELM-3B-Instruct"", ""base_model:finetune:apple/OpenELM-3B-Instruct"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""OpenELMForCausalLM""], ""auto_map"": {""AutoConfig"": ""apple/OpenELM-3B-Instruct--configuration_openelm.OpenELMConfig"", ""AutoModelForCausalLM"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM""}, ""model_type"": ""openelm"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-12 20:21:32+00:00"", ""cardData"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""66919033124a23efb4702646"", ""modelId"": ""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-1bit-smashed"", ""usedStorage"": 633975123}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/apple-OpenELM-3B-Instruct-HQQ-1bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fapple-OpenELM-3B-Instruct-HQQ-1bit-smashed%5D(%2FPrunaAI%2Fapple-OpenELM-3B-Instruct-HQQ-1bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
281
+ PrunaAI/apple-OpenELM-3B-Instruct-HQQ-2bit-smashed,"---
282
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
283
+ base_model: apple/OpenELM-3B-Instruct
284
+ metrics:
285
+ - memory_disk
286
+ - memory_inference
287
+ - inference_latency
288
+ - inference_throughput
289
+ - inference_CO2_emissions
290
+ - inference_energy_consumption
291
+ tags:
292
+ - pruna-ai
293
+ ---
294
+ <!-- header start -->
295
+ <!-- 200823 -->
296
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
297
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
298
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
299
+ </a>
300
+ </div>
301
+ <!-- header end -->
302
+
303
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
304
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
305
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
306
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
307
+
308
+ # Simply make AI models cheaper, smaller, faster, and greener!
309
+
310
+ - Give a thumbs up if you like this model!
311
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
312
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
313
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
314
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
315
+
316
+ ## Results
317
+
318
+ ![image info](./plots.png)
319
+
320
+ **Frequently Asked Questions**
321
+ - ***How does the compression work?*** The model is compressed with hqq.
322
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
323
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
324
+ - ***What is the model format?*** We use safetensors.
325
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
326
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
327
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
328
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
329
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
330
+
331
+ ## Setup
332
+
333
+ You can run the smashed model with these steps:
334
+
335
+ 0. Check requirements from the original repo apple/OpenELM-3B-Instruct installed. In particular, check python, cuda, and transformers versions.
336
+ 1. Make sure that you have installed quantization related packages.
337
+ ```bash
338
+ pip install hqq
339
+ ```
340
+ 2. Load & run the model.
341
+ ```python
342
+ from transformers import AutoModelForCausalLM, AutoTokenizer
343
+ from hqq.engine.hf import HQQModelForCausalLM
344
+ from hqq.models.hf.base import AutoHQQHFModel
345
+
346
+ try:
347
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-2bit-smashed"", device_map='auto')
348
+ except:
349
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-2bit-smashed"")
350
+ tokenizer = AutoTokenizer.from_pretrained(""apple/OpenELM-3B-Instruct"")
351
+
352
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
353
+
354
+ outputs = model.generate(input_ids, max_new_tokens=216)
355
+ tokenizer.decode(outputs[0])
356
+ ```
357
+
358
+ ## Configurations
359
+
360
+ The configuration info are in `smash_config.json`.
361
+
362
+ ## Credits & License
363
+
364
+ The license of the smashed model follows the license of the original model. Please check the license of the original model apple/OpenELM-3B-Instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
365
+
366
+ ## Want to compress other models?
367
+
368
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
369
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-2bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""82537ac0859ef9d531c185094fa84252a30c474b"", ""last_modified"": ""2024-07-12 20:23:17+00:00"", ""created_at"": ""2024-07-12 20:22:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""openelm"", ""text-generation"", ""pruna-ai"", ""custom_code"", ""base_model:apple/OpenELM-3B-Instruct"", ""base_model:finetune:apple/OpenELM-3B-Instruct"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""OpenELMForCausalLM""], ""auto_map"": {""AutoConfig"": ""apple/OpenELM-3B-Instruct--configuration_openelm.OpenELMConfig"", ""AutoModelForCausalLM"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM""}, ""model_type"": ""openelm"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-12 20:23:17+00:00"", ""cardData"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""669190923ebf20809d70a5ae"", ""modelId"": ""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-2bit-smashed"", ""usedStorage"": 1001238867}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/apple-OpenELM-3B-Instruct-HQQ-2bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fapple-OpenELM-3B-Instruct-HQQ-2bit-smashed%5D(%2FPrunaAI%2Fapple-OpenELM-3B-Instruct-HQQ-2bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
370
+ PrunaAI/apple-OpenELM-3B-Instruct-HQQ-4bit-smashed,"---
371
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
372
+ base_model: apple/OpenELM-3B-Instruct
373
+ metrics:
374
+ - memory_disk
375
+ - memory_inference
376
+ - inference_latency
377
+ - inference_throughput
378
+ - inference_CO2_emissions
379
+ - inference_energy_consumption
380
+ tags:
381
+ - pruna-ai
382
+ ---
383
+ <!-- header start -->
384
+ <!-- 200823 -->
385
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
386
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
387
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
388
+ </a>
389
+ </div>
390
+ <!-- header end -->
391
+
392
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
393
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
394
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
395
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
396
+
397
+ # Simply make AI models cheaper, smaller, faster, and greener!
398
+
399
+ - Give a thumbs up if you like this model!
400
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
401
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
402
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
403
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
404
+
405
+ ## Results
406
+
407
+ ![image info](./plots.png)
408
+
409
+ **Frequently Asked Questions**
410
+ - ***How does the compression work?*** The model is compressed with hqq.
411
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
412
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
413
+ - ***What is the model format?*** We use safetensors.
414
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
415
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
416
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
417
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
418
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
419
+
420
+ ## Setup
421
+
422
+ You can run the smashed model with these steps:
423
+
424
+ 0. Check requirements from the original repo apple/OpenELM-3B-Instruct installed. In particular, check python, cuda, and transformers versions.
425
+ 1. Make sure that you have installed quantization related packages.
426
+ ```bash
427
+ pip install hqq
428
+ ```
429
+ 2. Load & run the model.
430
+ ```python
431
+ from transformers import AutoModelForCausalLM, AutoTokenizer
432
+ from hqq.engine.hf import HQQModelForCausalLM
433
+ from hqq.models.hf.base import AutoHQQHFModel
434
+
435
+ try:
436
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-4bit-smashed"", device_map='auto')
437
+ except:
438
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-4bit-smashed"")
439
+ tokenizer = AutoTokenizer.from_pretrained(""apple/OpenELM-3B-Instruct"")
440
+
441
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
442
+
443
+ outputs = model.generate(input_ids, max_new_tokens=216)
444
+ tokenizer.decode(outputs[0])
445
+ ```
446
+
447
+ ## Configurations
448
+
449
+ The configuration info are in `smash_config.json`.
450
+
451
+ ## Credits & License
452
+
453
+ The license of the smashed model follows the license of the original model. Please check the license of the original model apple/OpenELM-3B-Instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
454
+
455
+ ## Want to compress other models?
456
+
457
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
458
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-4bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""a4cfb5c390a404369286c59b3a2940f7622b65ca"", ""last_modified"": ""2024-07-12 20:24:03+00:00"", ""created_at"": ""2024-07-12 20:23:04+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""openelm"", ""text-generation"", ""pruna-ai"", ""custom_code"", ""base_model:apple/OpenELM-3B-Instruct"", ""base_model:finetune:apple/OpenELM-3B-Instruct"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""OpenELMForCausalLM""], ""auto_map"": {""AutoConfig"": ""apple/OpenELM-3B-Instruct--configuration_openelm.OpenELMConfig"", ""AutoModelForCausalLM"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM""}, ""model_type"": ""openelm"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-12 20:24:03+00:00"", ""cardData"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""apple/OpenELM-3B-Instruct--modeling_openelm.OpenELMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""669190a833cc3623622bb362"", ""modelId"": ""PrunaAI/apple-OpenELM-3B-Instruct-HQQ-4bit-smashed"", ""usedStorage"": 1735766355}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/apple-OpenELM-3B-Instruct-HQQ-4bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fapple-OpenELM-3B-Instruct-HQQ-4bit-smashed%5D(%2FPrunaAI%2Fapple-OpenELM-3B-Instruct-HQQ-4bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
459
+ PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int2bit-smashed,"---
460
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
461
+ base_model: apple/OpenELM-3B-Instruct
462
+ metrics:
463
+ - memory_disk
464
+ - memory_inference
465
+ - inference_latency
466
+ - inference_throughput
467
+ - inference_CO2_emissions
468
+ - inference_energy_consumption
469
+ tags:
470
+ - pruna-ai
471
+ ---
472
+ <!-- header start -->
473
+ <!-- 200823 -->
474
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
475
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
476
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
477
+ </a>
478
+ </div>
479
+ <!-- header end -->
480
+
481
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
482
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
483
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
484
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
485
+
486
+ # Simply make AI models cheaper, smaller, faster, and greener!
487
+
488
+ - Give a thumbs up if you like this model!
489
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
490
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
491
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
492
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
493
+
494
+ ## Results
495
+
496
+ ![image info](./plots.png)
497
+
498
+ **Frequently Asked Questions**
499
+ - ***How does the compression work?*** The model is compressed with quanto.
500
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
501
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
502
+ - ***What is the model format?*** We use safetensors.
503
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
504
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
505
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
506
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
507
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
508
+
509
+ ## Setup
510
+
511
+ You can run the smashed model with these steps:
512
+
513
+ 0. Check requirements from the original repo apple/OpenELM-3B-Instruct installed. In particular, check python, cuda, and transformers versions.
514
+ 1. Make sure that you have installed quantization related packages.
515
+ ```bash
516
+ pip install quanto
517
+ ```
518
+ 2. Load & run the model.
519
+ ```python
520
+ from transformers import AutoModelForCausalLM, AutoTokenizer
521
+ IMPORTS
522
+
523
+ model = AutoModelForCausalLM.from_pretrained(""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int2bit-smashed"", trust_remote_code=True, device_map='auto')
524
+ tokenizer = AutoTokenizer.from_pretrained(""apple/OpenELM-3B-Instruct"")
525
+
526
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
527
+
528
+ outputs = model.generate(input_ids, max_new_tokens=216)
529
+ tokenizer.decode(outputs[0])
530
+ ```
531
+
532
+ ## Configurations
533
+
534
+ The configuration info are in `smash_config.json`.
535
+
536
+ ## Credits & License
537
+
538
+ The license of the smashed model follows the license of the original model. Please check the license of the original model apple/OpenELM-3B-Instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
539
+
540
+ ## Want to compress other models?
541
+
542
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
543
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int2bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""ab413b65408c31a23e9c89aa0921bd3ac6ba5736"", ""last_modified"": ""2024-07-19 09:23:28+00:00"", ""created_at"": ""2024-07-12 20:46:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:apple/OpenELM-3B-Instruct"", ""base_model:finetune:apple/OpenELM-3B-Instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:23:28+00:00"", ""cardData"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6691961f21aeef32fb1f0ff8"", ""modelId"": ""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int2bit-smashed"", ""usedStorage"": 6229294661}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int2bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fapple-OpenELM-3B-Instruct-QUANTO-int2bit-smashed%5D(%2FPrunaAI%2Fapple-OpenELM-3B-Instruct-QUANTO-int2bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
544
+ PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int4bit-smashed,"---
545
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
546
+ base_model: apple/OpenELM-3B-Instruct
547
+ metrics:
548
+ - memory_disk
549
+ - memory_inference
550
+ - inference_latency
551
+ - inference_throughput
552
+ - inference_CO2_emissions
553
+ - inference_energy_consumption
554
+ tags:
555
+ - pruna-ai
556
+ ---
557
+ <!-- header start -->
558
+ <!-- 200823 -->
559
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
560
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
561
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
562
+ </a>
563
+ </div>
564
+ <!-- header end -->
565
+
566
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
567
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
568
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
569
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
570
+
571
+ # Simply make AI models cheaper, smaller, faster, and greener!
572
+
573
+ - Give a thumbs up if you like this model!
574
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
575
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
576
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
577
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
578
+
579
+ ## Results
580
+
581
+ ![image info](./plots.png)
582
+
583
+ **Frequently Asked Questions**
584
+ - ***How does the compression work?*** The model is compressed with quanto.
585
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
586
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
587
+ - ***What is the model format?*** We use safetensors.
588
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
589
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
590
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
591
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
592
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
593
+
594
+ ## Setup
595
+
596
+ You can run the smashed model with these steps:
597
+
598
+ 0. Check requirements from the original repo apple/OpenELM-3B-Instruct installed. In particular, check python, cuda, and transformers versions.
599
+ 1. Make sure that you have installed quantization related packages.
600
+ ```bash
601
+ pip install quanto
602
+ ```
603
+ 2. Load & run the model.
604
+ ```python
605
+ from transformers import AutoModelForCausalLM, AutoTokenizer
606
+ IMPORTS
607
+
608
+ model = AutoModelForCausalLM.from_pretrained(""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int4bit-smashed"", trust_remote_code=True, device_map='auto')
609
+ tokenizer = AutoTokenizer.from_pretrained(""apple/OpenELM-3B-Instruct"")
610
+
611
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
612
+
613
+ outputs = model.generate(input_ids, max_new_tokens=216)
614
+ tokenizer.decode(outputs[0])
615
+ ```
616
+
617
+ ## Configurations
618
+
619
+ The configuration info are in `smash_config.json`.
620
+
621
+ ## Credits & License
622
+
623
+ The license of the smashed model follows the license of the original model. Please check the license of the original model apple/OpenELM-3B-Instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
624
+
625
+ ## Want to compress other models?
626
+
627
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
628
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int4bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""4b1ed848fee07a25e62c7b6379f7b0c6c46f90ac"", ""last_modified"": ""2024-07-19 09:20:47+00:00"", ""created_at"": ""2024-07-12 20:46:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:apple/OpenELM-3B-Instruct"", ""base_model:finetune:apple/OpenELM-3B-Instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:20:47+00:00"", ""cardData"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6691961f7db12e76d5797bc8"", ""modelId"": ""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int4bit-smashed"", ""usedStorage"": 6229294661}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int4bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fapple-OpenELM-3B-Instruct-QUANTO-int4bit-smashed%5D(%2FPrunaAI%2Fapple-OpenELM-3B-Instruct-QUANTO-int4bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
629
+ PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int8bit-smashed,"---
630
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
631
+ base_model: apple/OpenELM-3B-Instruct
632
+ metrics:
633
+ - memory_disk
634
+ - memory_inference
635
+ - inference_latency
636
+ - inference_throughput
637
+ - inference_CO2_emissions
638
+ - inference_energy_consumption
639
+ tags:
640
+ - pruna-ai
641
+ ---
642
+ <!-- header start -->
643
+ <!-- 200823 -->
644
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
645
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
646
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
647
+ </a>
648
+ </div>
649
+ <!-- header end -->
650
+
651
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
652
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
653
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
654
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
655
+
656
+ # Simply make AI models cheaper, smaller, faster, and greener!
657
+
658
+ - Give a thumbs up if you like this model!
659
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
660
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
661
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
662
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
663
+
664
+ ## Results
665
+
666
+ ![image info](./plots.png)
667
+
668
+ **Frequently Asked Questions**
669
+ - ***How does the compression work?*** The model is compressed with quanto.
670
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
671
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
672
+ - ***What is the model format?*** We use safetensors.
673
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
674
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
675
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
676
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
677
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
678
+
679
+ ## Setup
680
+
681
+ You can run the smashed model with these steps:
682
+
683
+ 0. Check requirements from the original repo apple/OpenELM-3B-Instruct installed. In particular, check python, cuda, and transformers versions.
684
+ 1. Make sure that you have installed quantization related packages.
685
+ ```bash
686
+ pip install quanto
687
+ ```
688
+ 2. Load & run the model.
689
+ ```python
690
+ from transformers import AutoModelForCausalLM, AutoTokenizer
691
+ IMPORTS
692
+
693
+ model = AutoModelForCausalLM.from_pretrained(""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int8bit-smashed"", trust_remote_code=True, device_map='auto')
694
+ tokenizer = AutoTokenizer.from_pretrained(""apple/OpenELM-3B-Instruct"")
695
+
696
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
697
+
698
+ outputs = model.generate(input_ids, max_new_tokens=216)
699
+ tokenizer.decode(outputs[0])
700
+ ```
701
+
702
+ ## Configurations
703
+
704
+ The configuration info are in `smash_config.json`.
705
+
706
+ ## Credits & License
707
+
708
+ The license of the smashed model follows the license of the original model. Please check the license of the original model apple/OpenELM-3B-Instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
709
+
710
+ ## Want to compress other models?
711
+
712
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
713
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int8bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""56b1f417cbfdca38d95380fc81ae0d8b306f4b75"", ""last_modified"": ""2024-07-19 09:23:32+00:00"", ""created_at"": ""2024-07-12 20:46:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:apple/OpenELM-3B-Instruct"", ""base_model:finetune:apple/OpenELM-3B-Instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:23:32+00:00"", ""cardData"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6691962e306a261f184c6941"", ""modelId"": ""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int8bit-smashed"", ""usedStorage"": 6229294533}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-int8bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fapple-OpenELM-3B-Instruct-QUANTO-int8bit-smashed%5D(%2FPrunaAI%2Fapple-OpenELM-3B-Instruct-QUANTO-int8bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
714
+ PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-float8bit-smashed,"---
715
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
716
+ base_model: apple/OpenELM-3B-Instruct
717
+ metrics:
718
+ - memory_disk
719
+ - memory_inference
720
+ - inference_latency
721
+ - inference_throughput
722
+ - inference_CO2_emissions
723
+ - inference_energy_consumption
724
+ tags:
725
+ - pruna-ai
726
+ ---
727
+ <!-- header start -->
728
+ <!-- 200823 -->
729
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
730
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
731
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
732
+ </a>
733
+ </div>
734
+ <!-- header end -->
735
+
736
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
737
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
738
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
739
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
740
+
741
+ # Simply make AI models cheaper, smaller, faster, and greener!
742
+
743
+ - Give a thumbs up if you like this model!
744
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
745
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
746
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
747
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
748
+
749
+ ## Results
750
+
751
+ ![image info](./plots.png)
752
+
753
+ **Frequently Asked Questions**
754
+ - ***How does the compression work?*** The model is compressed with quanto.
755
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
756
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
757
+ - ***What is the model format?*** We use safetensors.
758
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
759
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
760
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
761
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
762
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
763
+
764
+ ## Setup
765
+
766
+ You can run the smashed model with these steps:
767
+
768
+ 0. Check requirements from the original repo apple/OpenELM-3B-Instruct installed. In particular, check python, cuda, and transformers versions.
769
+ 1. Make sure that you have installed quantization related packages.
770
+ ```bash
771
+ pip install quanto
772
+ ```
773
+ 2. Load & run the model.
774
+ ```python
775
+ from transformers import AutoModelForCausalLM, AutoTokenizer
776
+ IMPORTS
777
+
778
+ model = AutoModelForCausalLM.from_pretrained(""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-float8bit-smashed"", trust_remote_code=True, device_map='auto')
779
+ tokenizer = AutoTokenizer.from_pretrained(""apple/OpenELM-3B-Instruct"")
780
+
781
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
782
+
783
+ outputs = model.generate(input_ids, max_new_tokens=216)
784
+ tokenizer.decode(outputs[0])
785
+ ```
786
+
787
+ ## Configurations
788
+
789
+ The configuration info are in `smash_config.json`.
790
+
791
+ ## Credits & License
792
+
793
+ The license of the smashed model follows the license of the original model. Please check the license of the original model apple/OpenELM-3B-Instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
794
+
795
+ ## Want to compress other models?
796
+
797
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
798
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-float8bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""c1a5fe75e545dc0b9096920e26ddefbb00de364e"", ""last_modified"": ""2024-07-19 09:22:04+00:00"", ""created_at"": ""2024-07-12 20:52:01+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:apple/OpenELM-3B-Instruct"", ""base_model:finetune:apple/OpenELM-3B-Instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:22:04+00:00"", ""cardData"": ""base_model: apple/OpenELM-3B-Instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6691977163fbb2098b49d38c"", ""modelId"": ""PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-float8bit-smashed"", ""usedStorage"": 6229294533}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/apple-OpenELM-3B-Instruct-QUANTO-float8bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fapple-OpenELM-3B-Instruct-QUANTO-float8bit-smashed%5D(%2FPrunaAI%2Fapple-OpenELM-3B-Instruct-QUANTO-float8bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Phi-4-multimodal-instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
The diff for this file is too large to render. See raw diff
 
Phind-CodeLlama-34B-Python-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Phind/Phind-CodeLlama-34B-Python-v1,"---
3
+ license: llama2
4
+ model-index:
5
+ - name: Phind-CodeLlama-34B-v1
6
+ results:
7
+ - task:
8
+ type: text-generation
9
+ dataset:
10
+ type: openai_humaneval
11
+ name: HumanEval
12
+ metrics:
13
+ - name: pass@1
14
+ type: pass@1
15
+ value: 69.5%
16
+ verified: false
17
+ tags:
18
+ - code llama
19
+ ---
20
+
21
+ # **Phind-CodeLlama-34B-Python-v1**
22
+ We've fine-tuned CodeLlama-34B and CodeLlama-34B-Python on an internal Phind dataset that achieve 67.6% and 69.5% pass@1 on HumanEval, respectively. GPT-4 achieves 67%. We've applied OpenAI's decontamination methodology to our dataset to ensure result validity.
23
+
24
+ More details can be found on our [blog post](https://www.phind.com/blog/code-llama-beats-gpt4).
25
+
26
+ ## Model Details
27
+ This model is fine-tuned from CodeLlama-34B-Python and achieves 69.5% pass@1 on HumanEval.
28
+
29
+ ## Dataset Details
30
+ We fined-tuned on a proprietary dataset of ~80k high quality programming problems and solutions. This dataset consists of instruction-answer pairs instead of code completion examples, making it structurally different from HumanEval. The Phind models were trained for 2 epochs, for a total of ~160k examples shown. LoRA was not used -- both models are a native finetune. We used DeepSpeed ZeRO 3 and Flash Attention 2 to train these models in three hours on 32 A100-80GB GPUs. We used a sequence length of 4096 tokens.
31
+
32
+ ## How to Get Started with the Model
33
+
34
+ Make sure to install Transformers from the main git branch:
35
+
36
+ ```bash
37
+ pip install git+https://github.com/huggingface/transformers.git
38
+ ```
39
+
40
+ ## How to Prompt the Model
41
+ **Please note that this model is somewhat instruction-tuned, but not chat-tuned.**
42
+
43
+ Do not try to use the Llama chat markup with this model. Instead, simply tell it what you want and add ""\n: "" at the end of your task.
44
+
45
+ For example:
46
+
47
+ ```
48
+ Write me a linked list implementation: \n
49
+ ```
50
+
51
+ ## How to reproduce HumanEval Results
52
+
53
+ To reproduce our results:
54
+
55
+ ```python
56
+
57
+ from transformers import AutoTokenizer, LlamaForCausalLM
58
+ from human_eval.data import write_jsonl, read_problems
59
+ from tqdm import tqdm
60
+
61
+ # initialize the model
62
+
63
+ model_path = ""Phind/Phind-CodeLlama-34B-v1""
64
+ model = LlamaForCausalLM.from_pretrained(model_path, device_map=""auto"")
65
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
66
+
67
+ # HumanEval helper
68
+
69
+ def generate_one_completion(prompt: str):
70
+ tokenizer.pad_token = tokenizer.eos_token
71
+ inputs = tokenizer(prompt, return_tensors=""pt"", truncation=True, max_length=4096)
72
+
73
+ # Generate
74
+ generate_ids = model.generate(inputs.input_ids.to(""cuda""), max_new_tokens=256, do_sample=True, top_p=0.75, top_k=40, temperature=0.1)
75
+ completion = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
76
+ completion = completion.replace(prompt, """").split(""\n\n\n"")[0]
77
+
78
+ return completion
79
+
80
+ # perform HumanEval
81
+ problems = read_problems()
82
+
83
+ num_samples_per_task = 1
84
+ samples = [
85
+ dict(task_id=task_id, completion=generate_one_completion(problems[task_id][""prompt""]))
86
+ for task_id in tqdm(problems)
87
+ for _ in range(num_samples_per_task)
88
+ ]
89
+ write_jsonl(""samples.jsonl"", samples)
90
+
91
+ # run `evaluate_functional_correctness samples.jsonl` in your HumanEval code sandbox
92
+ ```
93
+
94
+ ## Bias, Risks, and Limitations
95
+
96
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
97
+ This model has undergone very limited testing. Additional safety testing should be performed before any real-world deployments.
98
+
99
+
100
+ ## Training details
101
+
102
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
103
+
104
+ - **Hardware Type:** 32x A100-80GB
105
+ - **Hours used:** 90 GPU-hours
106
+ - **Cloud Provider:** AWS
107
+ - **Compute Region:** us-east-1","{""id"": ""Phind/Phind-CodeLlama-34B-Python-v1"", ""author"": ""Phind"", ""sha"": ""b6c254180307b532b3e9abce45c8fecf5b986e73"", ""last_modified"": ""2023-08-26 03:05:48+00:00"", ""created_at"": ""2023-08-25 20:33:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 297, ""downloads_all_time"": null, ""likes"": 252, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""code llama"", ""license:llama2"", ""model-index"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: llama2\ntags:\n- code llama\nmodel-index:\n- name: Phind-CodeLlama-34B-v1\n results:\n - task:\n type: text-generation\n dataset:\n name: HumanEval\n type: openai_humaneval\n metrics:\n - type: pass@1\n value: 69.5%\n name: pass@1\n verified: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""Phind-CodeLlama-34B-v1"", ""results"": [{""task"": {""type"": ""text-generation""}, ""dataset"": {""type"": ""openai_humaneval"", ""name"": ""HumanEval""}, ""metrics"": [{""name"": ""pass@1"", ""type"": ""pass@1"", ""value"": ""69.5%"", ""verified"": false}]}]}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""neubla/neubla-llm-evaluation-board"", ""SoUmNerd/Phind-Phind-CodeLlama-34B-Python-v1"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard"", ""AdenHelpful/Phind-Phind-CodeLlama-34B-Python-v1"", ""MoAlsalman/Phind-Phind-CodeLlama-34B-Python-v1"", ""amit1300/Phind-Phind-CodeLlama-34B-Python-v1"", ""leduy09Mchine/Code_Bot"", ""leduy09Mchine/CODE_GR"", ""neeraw/Phind-Phind-CodeLlama-34B-Python-v1"", ""neeraw/Phind-Phind-CodeLlama-34B-Python-new"", ""paranjay-bd/PCLPy-34B-v1"", ""JD2020/Phind-Phind-CodeLlama-34B-Python-v1"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""kbmlcoding/open_llm_leaderboard_free"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""Asiya057/Incarna-Mind"", ""Asiya057/Incarna-Mind-POC"", ""mjalg/IFEvalTR""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-08-26 03:05:48+00:00"", ""cardData"": ""license: llama2\ntags:\n- code llama\nmodel-index:\n- name: Phind-CodeLlama-34B-v1\n results:\n - task:\n type: text-generation\n dataset:\n name: HumanEval\n type: openai_humaneval\n metrics:\n - type: pass@1\n value: 69.5%\n name: pass@1\n verified: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64e910055b8d8156f29692ec"", ""modelId"": ""Phind/Phind-CodeLlama-34B-Python-v1"", ""usedStorage"": 134976616088}",0,,0,,0,"https://huggingface.co/TheBloke/Phind-CodeLlama-34B-Python-v1-GGUF, https://huggingface.co/TheBloke/Phind-CodeLlama-34B-Python-v1-GPTQ, https://huggingface.co/TheBloke/Phind-CodeLlama-34B-Python-v1-AWQ, https://huggingface.co/mradermacher/Phind-CodeLlama-34B-Python-v1-GGUF, https://huggingface.co/mradermacher/Phind-CodeLlama-34B-Python-v1-i1-GGUF",5,"https://huggingface.co/Solshine/Tlamatini-Papalotl-Wisdom-Butterfly-CodeLlama-v0-1, https://huggingface.co/Solshine/Tlamatini-Papalotl-Wisdom-Butterfly-CodeLlama-v0-2",2,"Asiya057/Incarna-Mind, BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, SoUmNerd/Phind-Phind-CodeLlama-34B-Python-v1, Vikhrmodels/small-shlepa-lb, felixz/open_llm_leaderboard, gsaivinay/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=Phind/Phind-CodeLlama-34B-Python-v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPhind%2FPhind-CodeLlama-34B-Python-v1%5D(%2FPhind%2FPhind-CodeLlama-34B-Python-v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb, neubla/neubla-llm-evaluation-board",13
Qwen-7B-Chat_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Qwen/Qwen-7B-Chat,"---
3
+ language:
4
+ - zh
5
+ - en
6
+ tags:
7
+ - qwen
8
+ pipeline_tag: text-generation
9
+ inference: false
10
+ license: other
11
+ license_name: tongyi-qianwen-license-agreement
12
+ license_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
13
+ ---
14
+
15
+ # Qwen-7B-Chat
16
+
17
+ <p align=""center"">
18
+ <img src=""https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg"" width=""400""/>
19
+ <p>
20
+ <br>
21
+
22
+ <p align=""center"">
23
+ 🤗 <a href=""https://huggingface.co/Qwen"">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp🤖 <a href=""https://modelscope.cn/organization/qwen"">ModelScope</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href=""https://arxiv.org/abs/2309.16609"">Paper</a> &nbsp&nbsp | &nbsp&nbsp🖥️ <a href=""https://modelscope.cn/studios/qwen/Qwen-7B-Chat-Demo/summary"">Demo</a>
24
+ <br>
25
+ <a href=""https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png"">WeChat (微信)</a>&nbsp&nbsp | &nbsp&nbsp<a href=""https://discord.gg/z3GAxXZ9Ce"">Discord</a>&nbsp&nbsp | &nbsp&nbsp<a href=""https://dashscope.aliyun.com"">API</a>
26
+ </p>
27
+ <br>
28
+
29
+
30
+ ## 介绍(Introduction)
31
+
32
+ **通义千问-7B(Qwen-7B)**是阿里云研发的通义千问大模型系列的70亿参数规模的模型。Qwen-7B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-7B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-7B-Chat。相较于最初开源的Qwen-7B模型,我们现已将预训练模型和Chat模型更新到效果更优的版本。本仓库为Qwen-7B-Chat的仓库。
33
+
34
+ 如果您想了解更多关于通义千问-7B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。
35
+
36
+ **Qwen-7B** is the 7B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-7B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-7B, we release Qwen-7B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. Now we have updated both our pretrained and chat models with better performances. This repository is the one for Qwen-7B-Chat.
37
+
38
+ For more details about Qwen, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository.
39
+ <br>
40
+
41
+ ## 要求(Requirements)
42
+
43
+ * python 3.8及以上版本
44
+ * pytorch 1.12及以上版本,推荐2.0及以上版本
45
+ * 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项)
46
+ * python 3.8 and above
47
+ * pytorch 1.12 and above, 2.0 and above are recommended
48
+ * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
49
+ <br>
50
+
51
+ ## 依赖项(Dependency)
52
+
53
+ 运行Qwen-7B-Chat,请确保满足上述要求,再执行以下pip命令安装依赖库
54
+
55
+ To run Qwen-7B-Chat, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
56
+
57
+ ```bash
58
+ pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
59
+ ```
60
+
61
+ 另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。
62
+
63
+ In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage.
64
+
65
+ ```bash
66
+ git clone https://github.com/Dao-AILab/flash-attention
67
+ cd flash-attention && pip install .
68
+ # 下方安装可选,安装可能比较缓慢。
69
+ # pip install csrc/layer_norm
70
+ # pip install csrc/rotary
71
+ ```
72
+ <br>
73
+
74
+ ## 快速使用(Quickstart)
75
+
76
+ 下面我们展示了一个使用Qwen-7B-Chat模型,进行多轮对话交互的样例:
77
+
78
+ We show an example of multi-turn interaction with Qwen-7B-Chat in the following code:
79
+
80
+ ```python
81
+ from transformers import AutoModelForCausalLM, AutoTokenizer
82
+ from transformers.generation import GenerationConfig
83
+
84
+ # Note: The default behavior now has injection attack prevention off.
85
+ tokenizer = AutoTokenizer.from_pretrained(""Qwen/Qwen-7B-Chat"", trust_remote_code=True)
86
+
87
+ # use bf16
88
+ # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-7B-Chat"", device_map=""auto"", trust_remote_code=True, bf16=True).eval()
89
+ # use fp16
90
+ # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-7B-Chat"", device_map=""auto"", trust_remote_code=True, fp16=True).eval()
91
+ # use cpu only
92
+ # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-7B-Chat"", device_map=""cpu"", trust_remote_code=True).eval()
93
+ # use auto mode, automatically select precision based on the device.
94
+ model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-7B-Chat"", device_map=""auto"", trust_remote_code=True).eval()
95
+
96
+ # Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this.
97
+ # model.generation_config = GenerationConfig.from_pretrained(""Qwen/Qwen-7B-Chat"", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
98
+
99
+ # 第一轮对话 1st dialogue turn
100
+ response, history = model.chat(tokenizer, ""你好"", history=None)
101
+ print(response)
102
+ # 你好!很高兴为你提供帮助。
103
+
104
+ # 第二轮对话 2nd dialogue turn
105
+ response, history = model.chat(tokenizer, ""给我讲一个年轻人奋斗创业最终取得成功的故事。"", history=history)
106
+ print(response)
107
+ # 这是一个关于一个年轻人奋斗创业最终取得成功的故事。
108
+ # 故事的主人公叫李明,他来自一个普通的家庭,父母都是普通的工人。从小,李明就立下了一个目标:要成为一名成功的企业家。
109
+ # 为了实现这个目标,李明勤奋学习,考上了大学。在大学期间,他积极参加各种创业比赛,获得了不少奖项。他还利用课余时间去实习,积累了宝贵的经验。
110
+ # 毕业后,李明决定开始自己的创业之路。他开始寻找投资机会,但多次都被拒绝了。然而,他并没有放弃。他继续努力,不断改进自己的创业计划,并寻找新的投资机会。
111
+ # 最终,李明成功地获得了一笔投资,开始了自己的创业之路。他成立了一家科技公司,专注于开发新型软件。在他的领导下,公司迅速发展起来,成为了一家成功的科技企业。
112
+ # 李明的成功并不是偶然的。他勤奋、坚韧、勇于冒险,不断学习和改进自己。他的成功也证明了,只要努力奋斗,任何人都有可能取得成功。
113
+
114
+ # 第三轮对话 3rd dialogue turn
115
+ response, history = model.chat(tokenizer, ""给这个故事起一个标题"", history=history)
116
+ print(response)
117
+ # 《奋斗创业:一个年轻人的成功之路》
118
+ ```
119
+
120
+ 关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。
121
+
122
+ For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information.
123
+ <br>
124
+
125
+ ## Tokenizer
126
+
127
+ > 注:作为术语的“tokenization”在中文中尚无共识的概念对应,本文档采用英文表达以利说明。
128
+
129
+ 基于tiktoken的分词器有别于其他分词器,比如sentencepiece分词器。尤其在微调阶段,需要特别注意特殊token的使用。关于tokenizer的更多信息,以及微调时涉及的相关使用,请参阅[文档](https://github.com/QwenLM/Qwen/blob/main/tokenization_note_zh.md)。
130
+
131
+ Our tokenizer based on tiktoken is different from other tokenizers, e.g., sentencepiece tokenizer. You need to pay attention to special tokens, especially in finetuning. For more detailed information on the tokenizer and related use in fine-tuning, please refer to the [documentation](https://github.com/QwenLM/Qwen/blob/main/tokenization_note.md).
132
+ <br>
133
+
134
+ ## 量化 (Quantization)
135
+
136
+ ### 用法 (Usage)
137
+
138
+ **请注意:我们更新量化方案为基于[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)的量化,提供Qwen-7B-Chat的Int4量化模型[点击这里](https://huggingface.co/Qwen/Qwen-7B-Chat-Int4)。相比此前方案,该方案在模型评测效果几乎无损,且存储需求更低,推理速度更优。**
139
+
140
+ **Note: we provide a new solution based on [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), and release an Int4 quantized model for Qwen-7B-Chat [Click here](https://huggingface.co/Qwen/Qwen-7B-Chat-Int4), which achieves nearly lossless model effects but improved performance on both memory costs and inference speed, in comparison with the previous solution.**
141
+
142
+ 以下我们提供示例说明如何使用Int4量化模型。在开始使用前,请先保证满足要求(如torch 2.0及以上,transformers版本为4.32.0及以上,等等),并安装所需安装包:
143
+
144
+ Here we demonstrate how to use our provided quantized models for inference. Before you start, make sure you meet the requirements of auto-gptq (e.g., torch 2.0 and above, transformers 4.32.0 and above, etc.) and install the required packages:
145
+
146
+ ```bash
147
+ pip install auto-gptq optimum
148
+ ```
149
+
150
+ 如安装`auto-gptq`遇到问题,我们建议您到官方[repo](https://github.com/PanQiWei/AutoGPTQ)搜索合适的预编译wheel。
151
+
152
+ 随后即可使用和上述一致的用法调用量化模型:
153
+
154
+ If you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a pre-build wheel.
155
+
156
+ Then you can load the quantized model easily and run inference as same as usual:
157
+
158
+ ```python
159
+ model = AutoModelForCausalLM.from_pretrained(
160
+ ""Qwen/Qwen-7B-Chat-Int4"",
161
+ device_map=""auto"",
162
+ trust_remote_code=True
163
+ ).eval()
164
+ response, history = model.chat(tokenizer, ""你好"", history=None)
165
+ ```
166
+
167
+
168
+
169
+ ### 效果评测
170
+
171
+ 我们对BF16,Int8和Int4模型在基准评测上做了测试(使用zero-shot设置),发现量化模型效果损失较小,结果如下所示:
172
+
173
+ We illustrate the zero-shot performance of both BF16, Int8 and Int4 models on the benchmark, and we find that the quantized model does not suffer from significant performance degradation. Results are shown below:
174
+
175
+ | Quantization | MMLU | CEval (val) | GSM8K | Humaneval |
176
+ | ------------- | :--------: | :----------: | :----: | :--------: |
177
+ | BF16 | 55.8 | 59.7 | 50.3 | 37.2 |
178
+ | Int8 | 55.4 | 59.4 | 48.3 | 34.8 |
179
+ | Int4 | 55.1 | 59.2 | 49.7 | 29.9 |
180
+
181
+ ### 推理速度 (Inference Speed)
182
+
183
+ 我们测算了不同精度模型以及不同FlashAttn库版本下模型生成2048和8192个token的平均推理速度。如图所示:
184
+
185
+ We measured the average inference speed of generating 2048 and 8192 tokens with different quantization levels and versions of flash-attention, respectively.
186
+
187
+ | Quantization | FlashAttn | Speed (2048 tokens) | Speed (8192 tokens) |
188
+ | ------------- | :-------: | :------------------:| :------------------:|
189
+ | BF16 | v2 | 40.93 | 36.14 |
190
+ | Int8 | v2 | 37.47 | 32.54 |
191
+ | Int4 | v2 | 50.09 | 38.61 |
192
+ | BF16 | v1 | 40.75 | 35.34 |
193
+ | Int8 | v1 | 37.51 | 32.39 |
194
+ | Int4 | v1 | 45.98 | 36.47 |
195
+ | BF16 | Disabled | 37.55 | 33.56 |
196
+ | Int8 | Disabled | 37.84 | 32.65 |
197
+ | Int4 | Disabled | 48.12 | 36.70 |
198
+
199
+ 具体而言,我们记录在长度为1的上下文的条件下生成8192个token的性能。评测运行于单张A100-SXM4-80G GPU,使用PyTorch 2.0.1和CUDA 11.8。推理速度是生成8192个token的速度均值。
200
+
201
+ In detail, the setting of profiling is generating 8192 new tokens with 1 context token. The profiling runs on a single A100-SXM4-80G GPU with PyTorch 2.0.1 and CUDA 11.8. The inference speed is averaged over the generated 8192 tokens.
202
+
203
+ 注意:以上Int4/Int8模型生成速度使用autogptq库给出,当前``AutoModelForCausalLM.from_pretrained``载入的模型生成速度会慢大约20%。我们已经将该问题汇报给HuggingFace团队,若有解决方案将即时更新。
204
+
205
+ Note: The generation speed of the Int4/Int8 models mentioned above is provided by the autogptq library. The current speed of the model loaded using ""AutoModelForCausalLM.from_pretrained"" will be approximately 20% slower. We have reported this issue to the HuggingFace team and will update it promptly if a solution is available.
206
+
207
+ ### 显存使用 (GPU Memory Usage)
208
+
209
+ 我们还测算了不同模型精度编码2048个token及生成8192个token的峰值显存占用情况。(显存消耗在是否使用FlashAttn的情况下均类似。)结果如下所示:
210
+
211
+ We also profile the peak GPU memory usage for encoding 2048 tokens as context (and generating single token) and generating 8192 tokens (with single token as context) under different quantization levels, respectively. (The GPU memory usage is similar when using flash-attention or not.)The results are shown below.
212
+
213
+ | Quantization Level | Peak Usage for Encoding 2048 Tokens | Peak Usage for Generating 8192 Tokens |
214
+ | ------------------ | :---------------------------------: | :-----------------------------------: |
215
+ | BF16 | 16.99GB | 22.53GB |
216
+ | Int8 | 11.20GB | 16.62GB |
217
+ | Int4 | 8.21GB | 13.63GB |
218
+
219
+ 上述性能测算使用[此脚本](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py)完成。
220
+
221
+ The above speed and memory profiling are conducted using [this script](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py).
222
+ <br>
223
+
224
+ ## 模型细节(Model)
225
+
226
+ 与Qwen-7B预训练模型相同,Qwen-7B-Chat模型规模基本情况如下所示:
227
+
228
+ The details of the model architecture of Qwen-7B-Chat are listed as follows:
229
+
230
+ | Hyperparameter | Value |
231
+ |:----------------|:------:|
232
+ | n_layers | 32 |
233
+ | n_heads | 32 |
234
+ | d_model | 4096 |
235
+ | vocab size | 151851 |
236
+ | sequence length | 8192 |
237
+
238
+ 在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法,
239
+ 即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。
240
+
241
+ 在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-7B-Chat使用了约15万token大小的词表。
242
+ 该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分��种进行能力增强。
243
+ 词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。
244
+
245
+ For position encoding, FFN activation function, and normalization calculation methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration).
246
+
247
+ For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-7B-Chat uses a vocabulary of over 150K tokens.
248
+ It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary.
249
+ It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization.
250
+ <br>
251
+
252
+ ## 评测效果(Evaluation)
253
+
254
+ 对于Qwen-7B-Chat模型,我们同样评测了常规的中文理解(C-Eval)、英文理解(MMLU)、代码(HumanEval)和数学(GSM8K)等权威任务,同时包含了长序列任务的评测结果。由于Qwen-7B-Chat模型经过对齐后,激发了较强的外部系统调用能力,我们还进行了工具使用能力方面的评测。
255
+
256
+ 提示:由于硬件和框架造成的舍入误差,复现结果如有波动属于正常现象。
257
+
258
+ For Qwen-7B-Chat, we also evaluate the model on C-Eval, MMLU, HumanEval, GSM8K, etc., as well as the benchmark evaluation for long-context understanding, and tool usage.
259
+
260
+ Note: Due to rounding errors caused by hardware and framework, differences in reproduced results are possible.
261
+
262
+ ### 中文评测(Chinese Evaluation)
263
+
264
+ #### C-Eval
265
+
266
+ 在[C-Eval](https://arxiv.org/abs/2305.08322)验证集上,我们评价了Qwen-7B-Chat模型的0-shot & 5-shot准确率
267
+
268
+ We demonstrate the 0-shot & 5-shot accuracy of Qwen-7B-Chat on C-Eval validation set
269
+
270
+ | Model | Avg. Acc. |
271
+ |:--------------------------------:|:---------:|
272
+ | LLaMA2-7B-Chat | 31.9 |
273
+ | LLaMA2-13B-Chat | 36.2 |
274
+ | LLaMA2-70B-Chat | 44.3 |
275
+ | ChatGLM2-6B-Chat | 52.6 |
276
+ | InternLM-7B-Chat | 53.6 |
277
+ | Baichuan2-7B-Chat | 55.6 |
278
+ | Baichuan2-13B-Chat | 56.7 |
279
+ | Qwen-7B-Chat (original) (0-shot) | 54.2 |
280
+ | **Qwen-7B-Chat (0-shot)** | 59.7 |
281
+ | **Qwen-7B-Chat (5-shot)** | 59.3 |
282
+ | **Qwen-14B-Chat (0-shot)** | 69.8 |
283
+ | **Qwen-14B-Chat (5-shot)** | **71.7** |
284
+
285
+ C-Eval测试集上,Qwen-7B-Chat模型的zero-shot准确率结果如下:
286
+
287
+ The zero-shot accuracy of Qwen-7B-Chat on C-Eval testing set is provided below:
288
+
289
+ | Model | Avg. | STEM | Social Sciences | Humanities | Others |
290
+ | :---------------------- | :------: | :--: | :-------------: | :--------: | :----: |
291
+ | Chinese-Alpaca-Plus-13B | 41.5 | 36.6 | 49.7 | 43.1 | 41.2 |
292
+ | Chinese-Alpaca-2-7B | 40.3 | - | - | - | - |
293
+ | ChatGLM2-6B-Chat | 50.1 | 46.4 | 60.4 | 50.6 | 46.9 |
294
+ | Baichuan-13B-Chat | 51.5 | 43.7 | 64.6 | 56.2 | 49.2 |
295
+ | Qwen-7B-Chat (original) | 54.6 | 47.8 | 67.6 | 59.3 | 50.6 |
296
+ | **Qwen-7B-Chat** | 58.6 | 53.3 | 72.1 | 62.8 | 52.0 |
297
+ | **Qwen-14B-Chat** | **69.1** | 65.1 | 80.9 | 71.2 | 63.4 |
298
+
299
+ 在7B规模模型上,经过人类指令对齐的Qwen-7B-Chat模型,准确率在同类相近规模模型中仍然处于前列。
300
+
301
+ Compared with other pretrained models with comparable model size, the human-aligned Qwen-7B-Chat performs well in C-Eval accuracy.
302
+
303
+ ### 英文评测(English Evaluation)
304
+
305
+ #### MMLU
306
+
307
+ [MMLU](https://arxiv.org/abs/2009.03300)评测集上,Qwen-7B-Chat模型的 0-shot & 5-shot 准确率如下,效果同样在同类对齐模型中同样表现较优。
308
+
309
+ The 0-shot & 5-shot accuracy of Qwen-7B-Chat on MMLU is provided below.
310
+ The performance of Qwen-7B-Chat still on the top between other human-aligned models with comparable size.
311
+
312
+ | Model | Avg. Acc. |
313
+ |:--------------------------------:|:---------:|
314
+ | ChatGLM2-6B-Chat | 46.0 |
315
+ | LLaMA2-7B-Chat | 46.2 |
316
+ | InternLM-7B-Chat | 51.1 |
317
+ | Baichuan2-7B-Chat | 52.9 |
318
+ | LLaMA2-13B-Chat | 54.6 |
319
+ | Baichuan2-13B-Chat | 57.3 |
320
+ | LLaMA2-70B-Chat | 63.8 |
321
+ | Qwen-7B-Chat (original) (0-shot) | 53.9 |
322
+ | **Qwen-7B-Chat (0-shot)** | 55.8 |
323
+ | **Qwen-7B-Chat (5-shot)** | 57.0 |
324
+ | **Qwen-14B-Chat (0-shot)** | 64.6 |
325
+ | **Qwen-14B-Chat (5-shot)** | **66.5** |
326
+
327
+ ### 代码评测(Coding Evaluation)
328
+
329
+ Qwen-7B-Chat在[HumanEval](https://github.com/openai/human-eval)的zero-shot Pass@1效果如下
330
+
331
+ The zero-shot Pass@1 of Qwen-7B-Chat on [HumanEval](https://github.com/openai/human-eval) is demonstrated below
332
+
333
+ | Model | Pass@1 |
334
+ |:-----------------------:|:--------:|
335
+ | ChatGLM2-6B-Chat | 11.0 |
336
+ | LLaMA2-7B-Chat | 12.2 |
337
+ | Baichuan2-7B-Chat | 13.4 |
338
+ | InternLM-7B-Chat | 14.6 |
339
+ | Baichuan2-13B-Chat | 17.7 |
340
+ | LLaMA2-13B-Chat | 18.9 |
341
+ | LLaMA2-70B-Chat | 32.3 |
342
+ | Qwen-7B-Chat (original) | 24.4 |
343
+ | **Qwen-7B-Chat** | 37.2 |
344
+ | **Qwen-14B-Chat** | **43.9** |
345
+
346
+ ### 数学评测(Mathematics Evaluation)
347
+
348
+ 在评测数学能力的[GSM8K](https://github.com/openai/grade-school-math)上,Qwen-7B-Chat的准确率结果如下
349
+
350
+ The accuracy of Qwen-7B-Chat on GSM8K is shown below
351
+
352
+ | Model | Acc. |
353
+ |:------------------------------------:|:--------:|
354
+ | LLaMA2-7B-Chat | 26.3 |
355
+ | ChatGLM2-6B-Chat | 28.8 |
356
+ | Baichuan2-7B-Chat | 32.8 |
357
+ | InternLM-7B-Chat | 33.0 |
358
+ | LLaMA2-13B-Chat | 37.1 |
359
+ | Baichuan2-13B-Chat | 55.3 |
360
+ | LLaMA2-70B-Chat | 59.3 |
361
+ | **Qwen-7B-Chat (original) (0-shot)** | 41.1 |
362
+ | **Qwen-7B-Chat (0-shot)** | 50.3 |
363
+ | **Qwen-7B-Chat (8-shot)** | 54.1 |
364
+ | **Qwen-14B-Chat (0-shot)** | **60.1** |
365
+ | **Qwen-14B-Chat (8-shot)** | 59.3 |
366
+
367
+ ### 长序列评测(Long-Context Understanding)
368
+
369
+ 通过NTK插值,LogN注意力缩放可以扩展Qwen-7B-Chat的上下文长度。在长文本摘要数据集[VCSUM](https://arxiv.org/abs/2305.05280)上(文本平均长度在15K左右),Qwen-7B-Chat的Rouge-L结果如下:
370
+
371
+ **(若要启用这些技巧,请将config.json里的`use_dynamic_ntk`和`use_logn_attn`设置为true)**
372
+
373
+ We introduce NTK-aware interpolation, LogN attention scaling to extend the context length of Qwen-7B-Chat. The Rouge-L results of Qwen-7B-Chat on long-text summarization dataset [VCSUM](https://arxiv.org/abs/2305.05280) (The average length of this dataset is around 15K) are shown below:
374
+
375
+ **(To use these tricks, please set `use_dynamic_ntk` and `use_long_attn` to true in config.json.)**
376
+
377
+ | Model | VCSUM (zh) |
378
+ |:------------------|:----------:|
379
+ | GPT-3.5-Turbo-16k | 16.0 |
380
+ | LLama2-7B-Chat | 0.2 |
381
+ | InternLM-7B-Chat | 13.0 |
382
+ | ChatGLM2-6B-Chat | 16.3 |
383
+ | **Qwen-7B-Chat** | **16.6** |
384
+
385
+ ### 工具使用能力的评测(Tool Usage)
386
+
387
+ #### ReAct Prompting
388
+
389
+ 千问支持通过 [ReAct Prompting](https://arxiv.org/abs/2210.03629) 调用插件/工具/API。ReAct 也是 [LangChain](https://python.langchain.com/) 框架采用的主要方式之一。在我们开源的、用于评估工具使用能力的评测基准上,千问的表现如下:
390
+
391
+ Qwen-Chat supports calling plugins/tools/APIs through [ReAct Prompting](https://arxiv.org/abs/2210.03629). ReAct is also one of the main approaches used by the [LangChain](https://python.langchain.com/) framework. In our evaluation benchmark for assessing tool usage capabilities, Qwen-Chat's performance is as follows:
392
+
393
+ <table>
394
+ <tr>
395
+ <th colspan=""4"" align=""center"">Chinese Tool-Use Benchmark</th>
396
+ </tr>
397
+ <tr>
398
+ <th align=""center"">Model</th><th align=""center"">Tool Selection (Acc.↑)</th><th align=""center"">Tool Input (Rouge-L↑)</th><th align=""center"">False Positive Error↓</th>
399
+ </tr>
400
+ <tr>
401
+ <td>GPT-4</td><td align=""center"">95%</td><td align=""center"">0.90</td><td align=""center"">15.0%</td>
402
+ </tr>
403
+ <tr>
404
+ <td>GPT-3.5</td><td align=""center"">85%</td><td align=""center"">0.88</td><td align=""center"">75.0%</td>
405
+ </tr>
406
+ <tr>
407
+ <td>Qwen-7B-Chat</td><td align=""center"">98%</td><td align=""center"">0.91</td><td align=""center"">7.3%</td>
408
+ </tr>
409
+ <tr>
410
+ <td>Qwen-14B-Chat</td><td align=""center"">98%</td><td align=""center"">0.93</td><td align=""center"">2.4%</td>
411
+ </tr>
412
+ </table>
413
+
414
+ > 评测基准中出现的插件均没有出现在千问的训练集中。该基准评估了模型在多个候选插件中选择正确插件的准确率、传入插件的参数的合理性、以及假阳率。假阳率(False Positive)定义:在处理不该调用插件的请求时,错误地调用了插件。
415
+
416
+ > The plugins that appear in the evaluation set do not appear in the training set of Qwen. This benchmark evaluates the accuracy of the model in selecting the correct plugin from multiple candidate plugins, the rationality of the parameters passed into the plugin, and the false positive rate. False Positive: Incorrectly invoking a plugin when it should not have been called when responding to a query.
417
+
418
+ ![](assets/react_showcase_001.png)
419
+ ![](assets/react_showcase_002.png)
420
+
421
+ #### Code Interpreter
422
+
423
+ 为了考察Qwen使用Python Code Interpreter完成数学解题、数据可视化、及文件处理与爬虫等任务的能力,我们专门建设并开源了一个评测这方面能力的[评测基准](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark)。
424
+
425
+ 我们发现Qwen在生成代码的可执行率、结果正确性上均表现较好:
426
+
427
+ To assess Qwen's ability to use the Python Code Interpreter for tasks such as mathematical problem solving, data visualization, and other general-purpose tasks such as file handling and web scraping, we have created and open-sourced a benchmark specifically designed for evaluating these capabilities. You can find the benchmark at this [link](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark).
428
+
429
+ We have observed that Qwen performs well in terms of code executability and result accuracy when generating code:
430
+
431
+ <table>
432
+ <tr>
433
+ <th colspan=""4"" align=""center"">Executable Rate of Generated Code (%)</th>
434
+ </tr>
435
+ <tr>
436
+ <th align=""center"">Model</th><th align=""center"">Math↑</th><th align=""center"">Visualization↑</th><th align=""center"">General↑</th>
437
+ </tr>
438
+ <tr>
439
+ <td>GPT-4</td><td align=""center"">91.9</td><td align=""center"">85.9</td><td align=""center"">82.8</td>
440
+ </tr>
441
+ <tr>
442
+ <td>GPT-3.5</td><td align=""center"">89.2</td><td align=""center"">65.0</td><td align=""center"">74.1</td>
443
+ </tr>
444
+ <tr>
445
+ <td>LLaMA2-7B-Chat</td>
446
+ <td align=""center"">41.9</td>
447
+ <td align=""center"">33.1</td>
448
+ <td align=""center"">24.1 </td>
449
+ </tr>
450
+ <tr>
451
+ <td>LLaMA2-13B-Chat</td>
452
+ <td align=""center"">50.0</td>
453
+ <td align=""center"">40.5</td>
454
+ <td align=""center"">48.3 </td>
455
+ </tr>
456
+ <tr>
457
+ <td>CodeLLaMA-7B-Instruct</td>
458
+ <td align=""center"">85.1</td>
459
+ <td align=""center"">54.0</td>
460
+ <td align=""center"">70.7 </td>
461
+ </tr>
462
+ <tr>
463
+ <td>CodeLLaMA-13B-Instruct</td>
464
+ <td align=""center"">93.2</td>
465
+ <td align=""center"">55.8</td>
466
+ <td align=""center"">74.1 </td>
467
+ </tr>
468
+ <tr>
469
+ <td>InternLM-7B-Chat-v1.1</td>
470
+ <td align=""center"">78.4</td>
471
+ <td align=""center"">44.2</td>
472
+ <td align=""center"">62.1 </td>
473
+ </tr>
474
+ <tr>
475
+ <td>InternLM-20B-Chat</td>
476
+ <td align=""center"">70.3</td>
477
+ <td align=""center"">44.2</td>
478
+ <td align=""center"">65.5 </td>
479
+ </tr>
480
+ <tr>
481
+ <td>Qwen-7B-Chat</td>
482
+ <td align=""center"">82.4</td>
483
+ <td align=""center"">64.4</td>
484
+ <td align=""center"">67.2 </td>
485
+ </tr>
486
+ <tr>
487
+ <td>Qwen-14B-Chat</td>
488
+ <td align=""center"">89.2</td>
489
+ <td align=""center"">84.1</td>
490
+ <td align=""center"">65.5</td>
491
+ </tr>
492
+ </table>
493
+
494
+ <table>
495
+ <tr>
496
+ <th colspan=""4"" align=""center"">Accuracy of Code Execution Results (%)</th>
497
+ </tr>
498
+ <tr>
499
+ <th align=""center"">Model</th><th align=""center"">Math↑</th><th align=""center"">Visualization-Hard↑</th><th align=""center"">Visualization-Easy↑</th>
500
+ </tr>
501
+ <tr>
502
+ <td>GPT-4</td><td align=""center"">82.8</td><td align=""center"">66.7</td><td align=""center"">60.8</td>
503
+ </tr>
504
+ <tr>
505
+ <td>GPT-3.5</td><td align=""center"">47.3</td><td align=""center"">33.3</td><td align=""center"">55.7</td>
506
+ </tr>
507
+ <tr>
508
+ <td>LLaMA2-7B-Chat</td>
509
+ <td align=""center"">3.9</td>
510
+ <td align=""center"">14.3</td>
511
+ <td align=""center"">39.2 </td>
512
+ </tr>
513
+ <tr>
514
+ <td>LLaMA2-13B-Chat</td>
515
+ <td align=""center"">8.3</td>
516
+ <td align=""center"">8.3</td>
517
+ <td align=""center"">40.5 </td>
518
+ </tr>
519
+ <tr>
520
+ <td>CodeLLaMA-7B-Instruct</td>
521
+ <td align=""center"">14.3</td>
522
+ <td align=""center"">26.2</td>
523
+ <td align=""center"">60.8 </td>
524
+ </tr>
525
+ <tr>
526
+ <td>CodeLLaMA-13B-Instruct</td>
527
+ <td align=""center"">28.2</td>
528
+ <td align=""center"">27.4</td>
529
+ <td align=""center"">62.0 </td>
530
+ </tr>
531
+ <tr>
532
+ <td>InternLM-7B-Chat-v1.1</td>
533
+ <td align=""center"">28.5</td>
534
+ <td align=""center"">4.8</td>
535
+ <td align=""center"">40.5 </td>
536
+ </tr>
537
+ <tr>
538
+ <td>InternLM-20B-Chat</td>
539
+ <td align=""center"">34.6</td>
540
+ <td align=""center"">21.4</td>
541
+ <td align=""center"">45.6 </td>
542
+ </tr>
543
+ <tr>
544
+ <td>Qwen-7B-Chat</td>
545
+ <td align=""center"">41.9</td>
546
+ <td align=""center"">40.5</td>
547
+ <td align=""center"">54.4 </td>
548
+ </tr>
549
+ <tr>
550
+ <td>Qwen-14B-Chat</td>
551
+ <td align=""center"">58.4</td>
552
+ <td align=""center"">53.6</td>
553
+ <td align=""center"">59.5</td>
554
+ </tr>
555
+ </table>
556
+
557
+ <p align=""center"">
558
+ <br>
559
+ <img src=""assets/code_interpreter_showcase_001.jpg"" />
560
+ <br>
561
+ <p>
562
+
563
+ #### Huggingface Agent
564
+
565
+ 千问还具备作为 [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents) 的能力。它在 Huggingface 提供的run模式评测基准上的表现如下:
566
+
567
+ Qwen-Chat also has the capability to be used as a [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents). Its performance on the run-mode benchmark provided by HuggingFace is as follows:
568
+
569
+ <table>
570
+ <tr>
571
+ <th colspan=""4"" align=""center"">HuggingFace Agent Benchmark- Run Mode</th>
572
+ </tr>
573
+ <tr>
574
+ <th align=""center"">Model</th><th align=""center"">Tool Selection↑</th><th align=""center"">Tool Used↑</th><th align=""center"">Code↑</th>
575
+ </tr>
576
+ <tr>
577
+ <td>GPT-4</td><td align=""center"">100</td><td align=""center"">100</td><td align=""center"">97.4</td>
578
+ </tr>
579
+ <tr>
580
+ <td>GPT-3.5</td><td align=""center"">95.4</td><td align=""center"">96.3</td><td align=""center"">87.0</td>
581
+ </tr>
582
+ <tr>
583
+ <td>StarCoder-Base-15B</td><td align=""center"">86.1</td><td align=""center"">87.0</td><td align=""center"">68.9</td>
584
+ </tr>
585
+ <tr>
586
+ <td>StarCoder-15B</td><td align=""center"">87.0</td><td align=""center"">88.0</td><td align=""center"">68.9</td>
587
+ </tr>
588
+ <tr>
589
+ <td>Qwen-7B-Chat</td><td align=""center"">87.0</td><td align=""center"">87.0</td><td align=""center"">71.5</td>
590
+ </tr>
591
+ <tr>
592
+ <td>Qwen-14B-Chat</td><td align=""center"">93.5</td><td align=""center"">94.4</td><td align=""center"">87.0</td>
593
+ </tr>
594
+ </table>
595
+
596
+ <table>
597
+ <tr>
598
+ <th colspan=""4"" align=""center"">HuggingFace Agent Benchmark - Chat Mode</th>
599
+ </tr>
600
+ <tr>
601
+ <th align=""center"">Model</th><th align=""center"">Tool Selection↑</th><th align=""center"">Tool Used↑</th><th align=""center"">Code↑</th>
602
+ </tr>
603
+ <tr>
604
+ <td>GPT-4</td><td align=""center"">97.9</td><td align=""center"">97.9</td><td align=""center"">98.5</td>
605
+ </tr>
606
+ <tr>
607
+ <td>GPT-3.5</td><td align=""center"">97.3</td><td align=""center"">96.8</td><td align=""center"">89.6</td>
608
+ </tr>
609
+ <tr>
610
+ <td>StarCoder-Base-15B</td><td align=""center"">97.9</td><td align=""center"">97.9</td><td align=""center"">91.1</td>
611
+ </tr>
612
+ <tr>
613
+ <td>StarCoder-15B</td><td align=""center"">97.9</td><td align=""center"">97.9</td><td align=""center"">89.6</td>
614
+ </tr>
615
+ <tr>
616
+ <td>Qwen-7B-Chat</td><td align=""center"">94.7</td><td align=""center"">94.7</td><td align=""center"">85.1</td>
617
+ </tr>
618
+ <tr>
619
+ <td>Qwen-14B-Chat</td><td align=""center"">97.9</td><td align=""center"">97.9</td><td align=""center"">95.5</td>
620
+ </tr>
621
+ </table>
622
+
623
+ <br>
624
+
625
+ ## x86 平台 (x86 Platforms)
626
+ 在 酷睿™/至强® 可扩展处理器或 Arc™ GPU 上部署量化模型时,建议使用 [OpenVINO™ Toolkit](https://docs.openvino.ai/2023.3/gen_ai_guide.html)以充分利用硬件,实现更好的推理性能。您可以安装并运行此 [example notebook](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/254-llm-chatbot)。相关问题,您可在[OpenVINO repo](https://github.com/openvinotoolkit/openvino_notebooks/issues)中提交。
627
+
628
+ When deploy on Core™/Xeon® Scalable Processors or with Arc™ GPU, [OpenVINO™ Toolkit](https://docs.openvino.ai/2023.3/gen_ai_guide.html) is recommended. You can install and run this [example notebook](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/254-llm-chatbot). For related issues, you are welcome to file an issue at [OpenVINO repo](https://github.com/openvinotoolkit/openvino_notebooks/issues).
629
+
630
+ ## FAQ
631
+
632
+ 如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。
633
+
634
+ If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue.
635
+ <br>
636
+
637
+ ## 引用 (Citation)
638
+
639
+ 如果你觉得我们的工作对你有帮助,欢迎引用!
640
+
641
+ If you find our work helpful, feel free to give us a cite.
642
+
643
+ ```
644
+ @article{qwen,
645
+ title={Qwen Technical Report},
646
+ author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu},
647
+ journal={arXiv preprint arXiv:2309.16609},
648
+ year={2023}
649
+ }
650
+ ```
651
+ <br>
652
+
653
+ ## 使用协议(License Agreement)
654
+
655
+ 我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,请填写[问卷](https://dashscope.console.aliyun.com/openModelApply/qianwen)申请。
656
+
657
+ Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/qianwen) to apply.
658
+ <br>
659
+
660
+ ## 联系我们(Contact Us)
661
+
662
+ 如果你想给我们的研发团队和产品团队留言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件(qianwen_opensource@alibabacloud.com)联系我们。
663
+
664
+ If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to qianwen_opensource@alibabacloud.com.
665
+
666
+ ","{""id"": ""Qwen/Qwen-7B-Chat"", ""author"": ""Qwen"", ""sha"": ""93a65d34827a3cc269b727e67004743b723e2f83"", ""last_modified"": ""2024-03-19 10:09:52+00:00"", ""created_at"": ""2023-08-03 03:01:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 29060, ""downloads_all_time"": null, ""likes"": 770, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""zh"", ""en"", ""arxiv:2309.16609"", ""arxiv:2305.08322"", ""arxiv:2009.03300"", ""arxiv:2305.05280"", ""arxiv:2210.03629"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6731\u5229\u5b89\uff0c\u6211\u559c\u6b22""}, {""text"": ""\u6211\u53eb\u6258\u9a6c\u65af\uff0c\u6211\u7684\u4e3b\u8981""}, {""text"": ""\u6211\u53eb\u739b\u4e3d\u4e9a\uff0c\u6211\u6700\u559c\u6b22\u7684""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u662f""}, {""text"": ""\u4ece\u524d\uff0c""}], ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/code_interpreter_showcase_001.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/react_showcase_001.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/react_showcase_002.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/wechat.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/react_prompt.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""LanguageBind/MoE-LLaVA"", ""ZhangYuhan/3DGen-Arena"", ""gsaivinay/open_llm_leaderboard"", ""KBaba7/Quant"", ""mikeee/qwen-7b-chat"", ""EmbeddedLLM/chat-template-generation"", ""Justinrune/LLaMA-Factory"", ""yhavinga/dutch-tokenizer-arena"", ""kenken999/fastapi_django_main_live"", ""lightmate/llm-chatbot"", ""ali-vilab/IDEA-Bench-Arena"", ""bhaskartripathi/LLM_Quantization"", ""officialhimanshu595/llama-factory"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""li-qing/FIRE"", ""Zulelee/langchain-chatchat"", ""xu-song/kplug"", ""justest/GPT-Academic-with-B3n-AI"", ""calvinchaochao/text_generation"", ""llmbb/LLMBB-Agent"", ""hzwluoye/gpt-academic"", ""tianleliphoebe/visual-arena"", ""Docfile/open_llm_leaderboard"", ""Ashmal/MobiLlama"", ""xun/Qwen-Token-Calc"", ""ruslanmv/convert_to_gguf"", ""malvika2003/openvino_notebooks"", ""Yiyuan/VSA"", ""IS2Lab/S-Eval"", ""vitalyr/gpt-academic"", ""PegaMichael/Taiwan-LLaMa2-Copy"", ""cming0420/gpt-academic"", ""hengkai/gpt-academic"", ""tjtanaa/chat-template-generation"", ""kuxian/gpt-academic"", ""DrBadass/gpt-academic"", ""qinglin96/gpt-academic3.6"", ""QLWD/gpt-academic"", ""darren1231/gpt-academic_2"", ""CaiRou-Huang/TwLLM7B-v2.0-base"", ""shuozhang2/Monkey"", ""DuanSuKa/gpt-academic2"", ""Leachim/gpt-academic"", ""BuzzHr/gpt-academic002"", ""durukan/gptacademic"", ""pallavijaini/NeuralChat-LLAMA-POC"", ""bibimbap/Qwen-7B-Chat"", ""xiaohua1011/gpt-academic"", ""Havi999/tongyi"", ""zhaomuqing/gpt-academic"", ""blackwingedkite/gutalk"", ""zhlinh/gpt-academic"", ""SincoMao/test"", ""cllatMTK/Breeze"", ""znskiss/Qwen-7B-main"", ""forever-yu/gpt-academic"", ""BuzzHr/gpt-academic001"", ""flatindo/titles"", ""nengrenjie83/MedicalGPT-main"", ""pngwn/open_llm_leaderboard_two"", ""wuhaibo/Qwen-7B-Chat"", ""SevenQin/cmkj-gpt"", ""Cran-May/qwen-7b-chat"", ""Ya2023/neurobot"", ""gordonchan/embedding-m3e-large"", ""qgyd2021/qwen_7b_chinese_modern_poetry"", ""JiakunXu/chat_with_llm"", ""blackwingedkite/alpaca2_clas"", ""Nymbo/MoE-LLaVA"", ""azurice/gpt-playground"", ""zouhsab/TinnyADLLAVA"", ""jaekwon/intel_cpu_chat"", ""whuib/gpt-academic"", ""liang-huggingface/PubmedSearch"", ""lianglv/NeuralChat-ICX-INT4"", ""Bofeee5675/FIRE"", ""evelyn-lo/evelyn"", ""thepianist9/Linly"", ""yuantao-infini-ai/demo_test"", ""pennxp/qianwen"", ""zjasper666/bf16_vs_fp8"", ""Hndsguy/813-MindSearch"", ""martinakaduc/melt"", ""JiakaiDu/RAG_Test"", ""mnsak/pro"", ""mnsak/project_agents"", ""macota1/axa"", ""Superkingjcj/Lagent"", ""Vic-729/weathersearch_agent"", ""SEUZCYYDS/Lagent"", ""Chipsleep/hgagent"", ""Jianfei217/MultiAgents"", ""Gon04/lagent_demo"", ""kai119/lagent"", ""sfang32/Agent_Based_on_Lagent"", ""Jianfei217/MulAgent"", ""lt676767/Lagent"", ""xiaoxishui/LAGENT""], ""safetensors"": {""parameters"": {""BF16"": 7721324544}, ""total"": 7721324544}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-19 10:09:52+00:00"", ""cardData"": ""language:\n- zh\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""64cb188b96a31741f19ef6b4"", ""modelId"": ""Qwen/Qwen-7B-Chat"", ""usedStorage"": 46329475248}",0,https://huggingface.co/stvlynn/Qwen-7B-Chat-Cantonese,1,"https://huggingface.co/Jungwonchang/Ko-QWEN-7B-Chat-LoRA, https://huggingface.co/xjw1001002/Qwen7B_brand_model, https://huggingface.co/xjw1001002/Qwen_match_price_V2, https://huggingface.co/yooshijay/qwen-7B_psychat, https://huggingface.co/xjw1001002/price_tag_V3_qwen7b, https://huggingface.co/ggkk2012/qwen7b-finance, https://huggingface.co/kyriemao/chatretriever, https://huggingface.co/lyy14011305/firefly-qwen-7b-sft-qlora, https://huggingface.co/Zetsu00/qwen-7b-lora-simplifier-v2, https://huggingface.co/Zetsu00/Qwen-7B-QLoRA-simplifier",10,"https://huggingface.co/TheBloke/Qwen-7B-Chat-AWQ, https://huggingface.co/mradermacher/Qwen-7B-Chat-GGUF, https://huggingface.co/mradermacher/Qwen-7B-Chat-i1-GGUF, https://huggingface.co/TheBloke/Qwen-7B-Chat-GPTQ, https://huggingface.co/Xorbits/Qwen-7B-Chat-GGUF, https://huggingface.co/mlc-ai/Qwen-7B-Chat-q4f16_1-MLC, https://huggingface.co/mlc-ai/Qwen-7B-Chat-q4f32_1-MLC",7,,0,"FallnAI/Quantize-HF-Models, Justinrune/LLaMA-Factory, KBaba7/Quant, ZhangYuhan/3DGen-Arena, Zulelee/langchain-chatchat, ali-vilab/IDEA-Bench-Arena, bhaskartripathi/LLM_Quantization, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/151, kenken999/fastapi_django_main_live, lightmate/llm-chatbot, mikeee/qwen-7b-chat, yhavinga/dutch-tokenizer-arena",13
667
+ stvlynn/Qwen-7B-Chat-Cantonese,"---
668
+ license: agpl-3.0
669
+ datasets:
670
+ - stvlynn/Cantonese-Dialogue
671
+ language:
672
+ - zh
673
+ pipeline_tag: text-generation
674
+ tags:
675
+ - Cantonese
676
+ - 廣東話
677
+ - 粤语
678
+ base_model: Qwen/Qwen-7B-Chat
679
+ ---
680
+
681
+ # Qwen-7B-Chat-Cantonese (通义千问·粤语)
682
+ ## Intro
683
+ Qwen-7B-Chat-Cantonese is a fine-tuned version based on Qwen-7B-Chat, trained on a substantial amount of Cantonese language data.
684
+
685
+ Qwen-7B-Chat-Cantonese係基於Qwen-7B-Chat嘅微調版本,基於大量粵語數據進行訓練。
686
+
687
+ [ModelScope(魔搭社区)](https://www.modelscope.cn/models/stvlynn/Qwen-7B-Chat-Cantonese)
688
+
689
+ ## Usage
690
+
691
+ ### Requirements
692
+
693
+ * python 3.8 and above
694
+ * pytorch 1.12 and above, 2.0 and above are recommended
695
+ * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
696
+
697
+ ### Dependency
698
+
699
+ To run Qwen-7B-Chat-Cantonese, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
700
+
701
+ ```bash
702
+ pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
703
+ ```
704
+
705
+ In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage.
706
+
707
+ ```bash
708
+ git clone https://github.com/Dao-AILab/flash-attention
709
+ cd flash-attention && pip install .
710
+ ```
711
+
712
+ ### Quickstart
713
+
714
+ Pls turn to QwenLM/Qwen - [Quickstart](https://github.com/QwenLM/Qwen?tab=readme-ov-file#quickstart)
715
+
716
+ ## Training Parameters
717
+
718
+ | Parameter | Description | Value |
719
+ |-----------------|----------------------------------------|--------|
720
+ | Learning Rate | AdamW optimizer learning rate | 7e-5 |
721
+ | Weight Decay | Regularization strength | 0.8 |
722
+ | Gamma | Learning rate decay factor | 1.0 |
723
+ | Batch Size | Number of samples per batch | 1000 |
724
+ | Precision | Floating point precision | fp16 |
725
+ | Learning Policy | Learning rate adjustment policy | cosine |
726
+ | Warmup Steps | Initial steps without learning rate adjustment | 0 |
727
+ | Total Steps | Total training steps | 1024 |
728
+ | Gradient Accumulation Steps | Number of steps to accumulate gradients before updating | 8 |
729
+
730
+ ![loss](https://cdn.statically.io/gh/stvlynn/cloudimg@master/blog/2310/image.q9v1ak08ljk.webp)
731
+
732
+ ## Demo
733
+ ![深水埗有哪些美食](https://cdn.statically.io/gh/stvlynn/cloudimg@master/blog/2310/截屏2024-05-04-11.59.27.2bea6k113e68.webp)
734
+
735
+ ![鲁迅为什么打周树人](https://cdn.statically.io/gh/stvlynn/cloudimg@master/blog/2310/截屏2024-05-04-11.56.46.72tt5czl2gw0.webp)
736
+
737
+ ![树上几只鸟](https://cdn.statically.io/gh/stvlynn/cloudimg@master/blog/2310/截屏2024-05-04-12.00.38.267hvmc3z3c0.webp)
738
+
739
+ ## Special Note
740
+
741
+ This is my first fine-tuning LLM project. Pls forgive me if there's anything wrong.
742
+
743
+ If you have any questions or suggestions, feel free to contact me.
744
+
745
+ [Twitter @stv_lynn](https://x.com/stv_lynn)
746
+
747
+ [Telegram @stvlynn](https://t.me/stvlynn)
748
+
749
+ [email i@stv.pm](mailto://i@stv.pm)","{""id"": ""stvlynn/Qwen-7B-Chat-Cantonese"", ""author"": ""stvlynn"", ""sha"": ""0bc29be620b3337fc3e9a91262cb2804c325e25a"", ""last_modified"": ""2024-05-06 09:36:16+00:00"", ""created_at"": ""2024-05-04 02:11:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 147, ""downloads_all_time"": null, ""likes"": 23, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen"", ""feature-extraction"", ""Cantonese"", ""\u5ee3\u6771\u8a71"", ""\u7ca4\u8bed"", ""text-generation"", ""conversational"", ""custom_code"", ""zh"", ""dataset:stvlynn/Cantonese-Dialogue"", ""base_model:Qwen/Qwen-7B-Chat"", ""base_model:finetune:Qwen/Qwen-7B-Chat"", ""license:agpl-3.0"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-7B-Chat\ndatasets:\n- stvlynn/Cantonese-Dialogue\nlanguage:\n- zh\nlicense: agpl-3.0\npipeline_tag: text-generation\ntags:\n- Cantonese\n- \u5ee3\u6771\u8a71\n- \u7ca4\u8bed"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModel"": ""modeling_qwen.QWenLMHeadModel"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {""chat_template"": ""{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|im_end|>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 7721324544}, ""total"": 7721324544}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-06 09:36:16+00:00"", ""cardData"": ""base_model: Qwen/Qwen-7B-Chat\ndatasets:\n- stvlynn/Cantonese-Dialogue\nlanguage:\n- zh\nlicense: agpl-3.0\npipeline_tag: text-generation\ntags:\n- Cantonese\n- \u5ee3\u6771\u8a71\n- \u7ca4\u8bed"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""66359937ccadfaaeac8cae0f"", ""modelId"": ""stvlynn/Qwen-7B-Chat-Cantonese"", ""usedStorage"": 15442677024}",1,,0,,0,"https://huggingface.co/mradermacher/Qwen-7B-Chat-Cantonese-GGUF, https://huggingface.co/mradermacher/Qwen-7B-Chat-Cantonese-i1-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=stvlynn/Qwen-7B-Chat-Cantonese&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bstvlynn%2FQwen-7B-Chat-Cantonese%5D(%2Fstvlynn%2FQwen-7B-Chat-Cantonese)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Qwen-7B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Qwen/Qwen-7B,"---
3
+ language:
4
+ - zh
5
+ - en
6
+ tags:
7
+ - qwen
8
+ pipeline_tag: text-generation
9
+ inference: false
10
+ license: other
11
+ license_name: tongyi-qianwen-license-agreement
12
+ license_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
13
+ ---
14
+
15
+ # Qwen-7B
16
+
17
+ <p align=""center"">
18
+ <img src=""https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg"" width=""400""/>
19
+ <p>
20
+ <br>
21
+
22
+ <p align=""center"">
23
+ 🤗 <a href=""https://huggingface.co/Qwen"">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp🤖 <a href=""https://modelscope.cn/organization/qwen"">ModelScope</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href=""https://arxiv.org/abs/2309.16609"">Paper</a> &nbsp&nbsp | &nbsp&nbsp🖥️ <a href=""https://modelscope.cn/studios/qwen/Qwen-7B-Chat-Demo/summary"">Demo</a>
24
+ <br>
25
+ <a href=""https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png"">WeChat (微信)</a>&nbsp&nbsp | &nbsp&nbsp<a href=""https://discord.gg/z3GAxXZ9Ce"">Discord</a>&nbsp&nbsp | &nbsp&nbsp<a href=""https://dashscope.aliyun.com"">API</a>
26
+ </p>
27
+ <br>
28
+
29
+ ## 介绍 (Introduction)
30
+
31
+ **通义千问-7B(Qwen-7B)**是阿里云研发的通义千问大模型系列的70亿参数规模的模型。Qwen-7B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-7B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-7B-Chat。相较于最初开源的Qwen-7B模型,我们现已将预训练模型和Chat模型更新到效果更优的版本。本仓库为Qwen-7B预训练模型的仓库。
32
+
33
+ 通义千问-7B(Qwen-7B)主要有以下特点:
34
+
35
+ 1. **大规模高质量训练语料**:使用超过2.4万亿tokens的数据进行预训练,包含高质量中、英、多语言、代码、数学等数据,涵盖通用及专业领域的训练语料。通过大量对比实验对预训练语料分布进行了优化。
36
+ 2. **强大的性能**:Qwen-7B在多个中英文下游评测任务上(涵盖常识推理、代码、数学、翻译等),效果显著超越现有的相近规模开源模型,甚至在部分指标上相比更大尺寸模型也有较强竞争力。具体评测结果请详见下文。
37
+ 3. **覆盖更全面的词表**:相比目前以中英词表为主的开源模型,Qwen-7B使用了约15万大小的词表。该词表对多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强和扩展。
38
+
39
+ 如果您想了解更多关于通义千问7B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。
40
+
41
+ **Qwen-7B** is the 7B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-7B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-7B, we release Qwen-7B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. Now we have updated both our pretrained and chat models for better performances. This repository is the one for the Qwen-7B base language model.
42
+
43
+ The features of Qwen-7B include:
44
+
45
+ 1. **Large-scale high-quality training corpora**: It is pretrained on over 2.4 trillion tokens, including Chinese, English, multilingual texts, code, and mathematics, covering general and professional fields. The distribution of the pre-training corpus has been optimized through a large number of ablation experiments.
46
+ 2. **Competitive performance**: It significantly surpasses existing open-source models of similar scale on multiple Chinese and English downstream evaluation tasks (including commonsense, reasoning, code, mathematics, etc.), and even surpasses some larger-scale models in several benchmarks. See below for specific evaluation results.
47
+ 3. **More comprehensive vocabulary coverage**: Compared with other open-source models based on Chinese and English vocabularies, Qwen-7B uses a vocabulary of over 150K tokens. This vocabulary is more friendly to multiple languages, enabling users to directly further enhance the capability for certain languages without expanding the vocabulary.
48
+
49
+ For more details about Qwen, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository.
50
+ <br>
51
+
52
+ ## 要求(Requirements)
53
+
54
+ * python 3.8及以上版本
55
+ * pytorch 1.12及以上版本,推荐2.0及以上版本
56
+ * 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项)
57
+ * python 3.8 and above
58
+ * pytorch 1.12 and above, 2.0 and above are recommended
59
+ * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
60
+ <br>
61
+
62
+ ## 依赖项 (Dependency)
63
+
64
+ 运行Qwen-7B,请确保满足上述要求,再执行以下pip命令安装依赖库
65
+
66
+ To run Qwen-7B, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
67
+
68
+ ```bash
69
+ pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
70
+ ```
71
+
72
+ 另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。
73
+
74
+ In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage.
75
+
76
+ ```bash
77
+ git clone https://github.com/Dao-AILab/flash-attention
78
+ cd flash-attention && pip install .
79
+ # 下方安装可选,安装可能比较缓慢。
80
+ # pip install csrc/layer_norm
81
+ # pip install csrc/rotary
82
+ ```
83
+ <br>
84
+
85
+ ## 快速使用(Quickstart)
86
+
87
+ 您可以通过以下代码轻松调用:
88
+
89
+ You can easily call the model with the following code:
90
+
91
+ ```python
92
+ from transformers import AutoModelForCausalLM, AutoTokenizer
93
+ from transformers.generation import GenerationConfig
94
+
95
+ # Note: The default behavior now has injection attack prevention off.
96
+ tokenizer = AutoTokenizer.from_pretrained(""Qwen/Qwen-7B"", trust_remote_code=True)
97
+
98
+ # use bf16
99
+ # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-7B"", device_map=""auto"", trust_remote_code=True, bf16=True).eval()
100
+ # use fp16
101
+ # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-7B"", device_map=""auto"", trust_remote_code=True, fp16=True).eval()
102
+ # use cpu only
103
+ # model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-7B"", device_map=""cpu"", trust_remote_code=True).eval()
104
+ # use auto mode, automatically select precision based on the device.
105
+ model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-7B"", device_map=""auto"", trust_remote_code=True).eval()
106
+
107
+ # Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this.
108
+ # model.generation_config = GenerationConfig.from_pretrained(""Qwen/Qwen-7B"", trust_remote_code=True)
109
+
110
+ inputs = tokenizer('蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是', return_tensors='pt')
111
+ inputs = inputs.to(model.device)
112
+ pred = model.generate(**inputs)
113
+ print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
114
+ # 蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是亚的斯亚贝巴(Addis Ababa)...
115
+ ```
116
+
117
+ 关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。
118
+
119
+ For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information.
120
+ <br>
121
+
122
+ ## Tokenizer
123
+
124
+ > 注:作为术语的“tokenization”在中文中尚无共识的概念对应,本文档采用英文表达以利说明。
125
+
126
+ 基于tiktoken的分词器有别于其他分词器,比如sentencepiece分词器。尤其在微调阶段,需要特别注意特殊token的使用。关于tokenizer的更多信息,以及微调时涉及的相关使用,请参阅[文档](https://github.com/QwenLM/Qwen/blob/main/tokenization_note_zh.md)。
127
+
128
+ Our tokenizer based on tiktoken is different from other tokenizers, e.g., sentencepiece tokenizer. You need to pay attention to special tokens, especially in finetuning. For more detailed information on the tokenizer and related use in fine-tuning, please refer to the [documentation](https://github.com/QwenLM/Qwen/blob/main/tokenization_note.md).
129
+ <br>
130
+
131
+ ## 模型细节 (Model)
132
+
133
+ Qwen-7B模型规模基本情况如下所示。
134
+
135
+ The details of the model architecture of Qwen-7B are listed as follows.
136
+
137
+ | Hyperparameter | Value |
138
+ |:----------------|:-------|
139
+ | n_layers | 32 |
140
+ | n_heads | 32 |
141
+ | d_model | 4096 |
142
+ | vocab size | 151851 |
143
+ | sequence length | 8192 |
144
+
145
+ 在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法,
146
+ 即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。
147
+
148
+ 在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-7B使用了超过15万token大小的词表。 该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强。
149
+ 词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。
150
+
151
+ 我们从部分语种各随机抽取100万个文档语料,以对比不同模型的编码压缩率(以支持100语种的XLM-R为基准值1,越低越好),具体性能见图。
152
+
153
+ 可以看到Qwen-7B在保持中英代码高效解码的前提下,对部分使用人群较多的语种(泰语th、希伯来语he、阿拉伯语ar、韩语ko、越南语vi、日语ja、土耳其语tr、印尼语id、波兰语pl��俄语ru、荷兰语nl、葡萄牙语pt、意大利语it、德语de、西班牙语es、法语fr等)上也实现了较高的压缩率,使得模型在这些语种上也具备较强的可扩展性和较高的训练和推理效率。
154
+
155
+ 在预训练数据方面,去重及过滤后的语料超过2.4T tokens,囊括全网文本、百科、书籍、代码、数学及各个领域垂类。
156
+
157
+ <p align=""center"">
158
+ <img src=""assets/tokenizer.png"" style=""width: 1200px""/>
159
+ <p>
160
+
161
+ For position encoding, FFN activation function, and normalization methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration).
162
+
163
+ For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-7B uses a vocabulary of over 150K tokens. It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary. It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization.
164
+
165
+ We randomly selected 1 million document corpus of each language to test and compare the encoding compression rates of different models (with XLM-R, which supports 100 languages, as the base value 1). The specific performance is shown in the figure above.
166
+
167
+ As can be seen, while ensuring the efficient decoding of Chinese, English, and code, Qwen-7B also achieves a high compression rate for many other languages (such as th, he, ar, ko, vi, ja, tr, id, pl, ru, nl, pt, it, de, es, fr etc.), equipping the model with strong scalability as well as high training and inference efficiency in these languages.
168
+
169
+ The scale of pretraining corpus reaches over 2.4T tokens after deduplication and filtration, encompassing web text, encyclopedia, books, code, mathematics, and various domains.
170
+ <br>
171
+
172
+ ## 评测效果(Evaluation)
173
+ 我们选取了MMLU,C-Eval,GSM8K, MATH, HumanEval, MBPP, BBH, CMMLU等目前较流行的benchmark,对模型的中英知识能力、翻译、数学推理、代码等能力进行综合评测。从下列结果可以看到Qwen模型在所有benchmark上均取得了同级别开源模型中的最优表现。
174
+
175
+ We selected MMLU, C-Eval, GSM8K, MATH, HumanEval, MBPP, BBH, CMMLU, which are currently popular benchmarks, to test the model’s Chinese and English knowledge capabilities, translation, mathematical reasoning, coding and other capabilities. From the following comprehensive evaluation results, we can see that the Qwen model outperform the similarly sized open-source models on all tasks.
176
+
177
+ | Model | MMLU | C-Eval | GSM8K | MATH | HumanEval | MBPP | BBH | CMMLU |
178
+ |:-------------------|:--------:|:--------:|:--------:|:--------:|:---------:|:--------:|:--------:|:--------:|
179
+ | | 5-shot | 5-shot | 8-shot | 4-shot | 0-shot | 3-shot | 3-shot | 5-shot |
180
+ | LLaMA2-7B | 46.8 | 32.5 | 16.7 | 3.3 | 12.8 | 20.8 | 38.2 | 31.8 |
181
+ | LLaMA2-13B | 55.0 | 41.4 | 29.6 | 5.0 | 18.9 | 30.3 | 45.6 | 38.4 |
182
+ | LLaMA2-34B | 62.6 | - | 42.2 | 6.2 | 22.6 | 33.0 | 44.1 | - |
183
+ | ChatGLM2-6B | 47.9 | 51.7 | 32.4 | 6.5 | - | - | 33.7 | - |
184
+ | InternLM-7B | 51.0 | 53.4 | 31.2 | 6.3 | 10.4 | 14.0 | 37.0 | 51.8 |
185
+ | InternLM-20B | 62.1 | 58.8 | 52.6 | 7.9 | 25.6 | 35.6 | 52.5 | 59.0 |
186
+ | Baichuan2-7B | 54.7 | 56.3 | 24.6 | 5.6 | 18.3 | 24.2 | 41.6 | 57.1 |
187
+ | Baichuan2-13B | 59.5 | 59.0 | 52.8 | 10.1 | 17.1 | 30.2 | 49.0 | 62.0 |
188
+ | Qwen-7B (original) | 56.7 | 59.6 | 51.6 | - | 24.4 | 31.2 | 40.6 | 58.8 |
189
+ | **Qwen-7B** | 58.2 | 63.5 | 51.7 | 11.6 | 29.9 | 31.6 | 45.0 | 62.2 |
190
+ | **Qwen-14B** | **66.3** | **72.1** | **61.3** | **24.8** | **32.3** | **40.8** | **53.4** | **71.0** |
191
+
192
+ ### 长序列评测(Long-Context Evaluation)
193
+
194
+ 我们引入NTK插值,LogN注意力缩放,窗口注意力等技巧,将Qwen-7B (original)和14B模型的上下文长度从2K扩展到8K以上,将Qwen-7B从8K扩到32K。在arXiv数据上使用PPL指标测试Qwen-7B和Qwen-14B在不同长度下的表现,结果如下:
195
+
196
+ **(若要启用NTK和LogN注意力缩放,请将config.json里的`use_dynamic_ntk`和`use_logn_attn`设置为true)**
197
+
198
+ We introduce NTK-aware interpolation, LogN attention scaling, Window attention, etc. to extend the context length to over 8K tokens. We conduct language modeling experiments on the arXiv dataset with the PPL evaluation. Results are demonstrated below:
199
+
200
+ **(To use NTK interpolation and LogN scaling, please set `use_dynamic_ntk` and `use_long_attn` to true in config.json.)**
201
+ <table>
202
+ <tr>
203
+ <th rowspan=""2"">Model</th><th colspan=""6"" align=""center"">Sequence Length</th>
204
+ </tr>
205
+ <tr>
206
+ <th align=""center"">1024</th><th align=""center"">2048</th><th align=""center"">4096</th><th align=""center"">8192</th><th align=""center"">16384</th><th align=""center"">32768</th>
207
+ </tr>
208
+ <tr>
209
+ <td>Qwen-7B (original)</td><td align=""center"">4.23</td><td align=""center"">3.78</td><td align=""center"">39.35</td><td align=""center"">469.81</td><td align=""center"">2645.09</td><td align=""center"">-</td>
210
+ </tr>
211
+ <tr>
212
+ <td>+ dynamic_ntk</td><td align=""center"">4.23</td><td align=""center"">3.78</td><td align=""center"">3.59</td><td align=""center"">3.66</td><td align=""center"">5.71</td><td align=""center"">-</td>
213
+ </tr>
214
+ <tr>
215
+ <td>+ dynamic_ntk + logn</td><td align=""center"">4.23</td><td align=""center"">3.78</td><td align=""center"">3.58</td><td align=""center"">3.56</td><td align=""center"">4.62</td><td align=""center"">-</td>
216
+ </tr>
217
+ <tr>
218
+ <td>+ dynamic_ntk + logn + window_attn</td><td align=""center"">4.23</td><td align=""center"">3.78</td><td align=""center"">3.58</td><td align=""center"">3.49</td><td align=""center"">4.32</td><td align=""center"">-</td>
219
+ </tr>
220
+ <tr>
221
+ <tr>
222
+ <td>Qwen-7B</td><td align=""center""><b>4.23</b></td><td align=""center""><b>3.81</b></td><td align=""center""><b>3.52</b></td><td align=""center""><b>3.31</b></td><td align=""center"">7.27</td><td align=""center"">181.49</td>
223
+ </tr>
224
+ <tr>
225
+ <td>+ dynamic_ntk + logn + window_attn</td><td align=""center""><b>4.23</b></td><td align=""center""><b>3.81</b></td><td align=""center""><b>3.52</b></td><td align=""center""><b>3.33</b></td><td align=""center""><b>3.22</b></td><td align=""center""><b>3.17</b></td>
226
+ </tr>
227
+ <tr>
228
+ <td>Qwen-14B</td><td align=""center""><b>-</b></td><td align=""center""><b>3.46</b></td><td align=""center"">22.79</td><td align=""center"">334.65</td><td align=""center"">3168.35</td><td align=""center"">-</td>
229
+ </tr>
230
+ <tr>
231
+ <td>+ dynamic_ntk + logn + window_attn</td><td align=""center""><b>-</b></td><td align=""center""><b>3.46</b></td><td align=""center""><b>3.29</b></td><td align=""center""><b>3.18</b></td><td align=""center"">3.42</td><td align=""center"">-</td>
232
+ </tr>
233
+ </table>
234
+
235
+ ## 评测复现(Reproduction)
236
+
237
+ 我们提供了评测脚本,方便大家复现模型效果,详见[链接](https://github.com/QwenLM/Qwen/tree/main/eval)。提示:由于硬件和框架造成的舍入误差,复现结果如有小幅波动属于正常现象。
238
+
239
+ We have provided evaluation scripts to reproduce the performance of our model, details as [link](https://github.com/QwenLM/Qwen/tree/main/eval).
240
+ <br>
241
+
242
+ ## FAQ
243
+
244
+ 如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。
245
+
246
+ If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue.
247
+ <br>
248
+
249
+ ## 引用 (Citation)
250
+
251
+ 如果你觉得我们的工作对你有帮助,欢迎引用!
252
+
253
+ If you find our work helpful, feel free to give us a cite.
254
+
255
+ ```
256
+ @article{qwen,
257
+ title={Qwen Technical Report},
258
+ author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu},
259
+ journal={arXiv preprint arXiv:2309.16609},
260
+ year={2023}
261
+ }
262
+ ```
263
+ <br>
264
+
265
+ ## 使用协议(License Agreement)
266
+
267
+ 我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,请填写[问卷](https://dashscope.console.aliyun.com/openModelApply/qianwen)申请。
268
+
269
+ Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/qianwen) to apply.
270
+ <br>
271
+
272
+ ## 联系我们(Contact Us)
273
+
274
+ 如果你想给我们的研发团队和产品团队���言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件(qianwen_opensource@alibabacloud.com)联系我们。
275
+
276
+ If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to qianwen_opensource@alibabacloud.com.
277
+
278
+ ","{""id"": ""Qwen/Qwen-7B"", ""author"": ""Qwen"", ""sha"": ""ef3c5c9c57b252f3149c1408daf4d649ec8b6c85"", ""last_modified"": ""2024-01-04 06:42:50+00:00"", ""created_at"": ""2023-08-03 02:51:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 22865, ""downloads_all_time"": null, ""likes"": 381, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""zh"", ""en"", ""arxiv:2309.16609"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6731\u5229\u5b89\uff0c\u6211\u559c\u6b22""}, {""text"": ""\u6211\u53eb\u6258\u9a6c\u65af\uff0c\u6211\u7684\u4e3b\u8981""}, {""text"": ""\u6211\u53eb\u739b\u4e3d\u4e9a\uff0c\u6211\u6700\u559c\u6b22\u7684""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u662f""}, {""text"": ""\u4ece\u524d\uff0c""}], ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/qwen_tokenizer.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/tokenizer.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/wechat.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""LanguageBind/MoE-LLaVA"", ""gsaivinay/open_llm_leaderboard"", ""Intel/powered_by_intel_llm_leaderboard"", ""KBaba7/Quant"", ""Justinrune/LLaMA-Factory"", ""kenken999/fastapi_django_main_live"", ""bhaskartripathi/LLM_Quantization"", ""officialhimanshu595/llama-factory"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""Docfile/open_llm_leaderboard"", ""xun/Qwen-Token-Calc"", ""ruslanmv/convert_to_gguf"", ""alexkueck/LIRAGTest"", ""shuozhang2/Monkey"", ""xsnames/tag_tool"", ""alexkueck/LIRAGTBackup"", ""bibimbap/Qwen-7B-Chat"", ""blackwingedkite/gutalk"", ""znskiss/Qwen-7B-main"", ""pngwn/open_llm_leaderboard_two"", ""wuhaibo/Qwen-7B-Chat"", ""SevenQin/cmkj-gpt"", ""Ya2023/neurobot"", ""qgyd2021/qwen_7b_chinese_modern_poetry"", ""larsthepenguin/trt-llm-rag-windows-main"", ""blackwingedkite/alpaca2_clas"", ""Nymbo/MoE-LLaVA"", ""alexkueck/RAGDEV"", ""azurice/gpt-playground"", ""zouhsab/TinnyADLLAVA"", ""alexkueck/SucheRAG"", ""thepianist9/Linly"", ""alexkueck/SucheDemo"", ""pennxp/qianwen"", ""Av77/testav"", ""Arslan17121/AudioBookChV2"", ""MandlaZwane/Shanks"", ""nnilayy/powered_by_intel_llm_leaderboard"", ""edwaldosr/paper-summary"", ""lordwhisker/pdf-review-tool"", ""msun415/Llamole"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 7721324544}, ""total"": 7721324544}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-01-04 06:42:50+00:00"", ""cardData"": ""language:\n- zh\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""64cb162638837b12d50a02c9"", ""modelId"": ""Qwen/Qwen-7B"", ""usedStorage"": 75249548558}",0,"https://huggingface.co/rinna/nekomata-7b, https://huggingface.co/Locutusque/UltraQwen-7B, https://huggingface.co/mohit95559/mymodel",3,"https://huggingface.co/quyanh/qwen-7b-neurips-v1, https://huggingface.co/fengn/llama-factory-test, https://huggingface.co/fromson99/FinMindAI_model, https://huggingface.co/DanaCC852/Cantonese_Tarot_v2",4,"https://huggingface.co/QLWD/RepoShiled-7b-AWQ, https://huggingface.co/mradermacher/Qwen-7B-GGUF, https://huggingface.co/mradermacher/Qwen-7B-i1-GGUF",3,,0,"FallnAI/Quantize-HF-Models, Intel/powered_by_intel_llm_leaderboard, Justinrune/LLaMA-Factory, K00B404/LLM_Quantization, KBaba7/Quant, LanguageBind/MoE-LLaVA, bhaskartripathi/LLM_Quantization, blackwingedkite/gutalk, eduagarcia/open_pt_llm_leaderboard, gsaivinay/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=Qwen/Qwen-7B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQwen%2FQwen-7B%5D(%2FQwen%2FQwen-7B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, officialhimanshu595/llama-factory",13
279
+ rinna/nekomata-7b,"---
280
+ thumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png
281
+ datasets:
282
+ - mc4
283
+ - wikipedia
284
+ - EleutherAI/pile
285
+ - oscar-corpus/colossal-oscar-1.0
286
+ - cc100
287
+ language:
288
+ - ja
289
+ - en
290
+ tags:
291
+ - qwen
292
+ inference: false
293
+ license: other
294
+ license_name: tongyi-qianwen-license-agreement
295
+ license_link: >-
296
+ https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
297
+ base_model: Qwen/Qwen-7B
298
+ ---
299
+
300
+ # `rinna/nekomata-7b`
301
+
302
+ ![rinna-icon](./rinna.png)
303
+
304
+ # Overview
305
+ We conduct continual pre-training of [qwen-7b](https://huggingface.co/Qwen/Qwen-7B) on **30B** tokens from a mixture of Japanese and English datasets. The continual pre-training significantly improves the model's performance on Japanese tasks. It also enjoys the following great features provided by the original Qwen model.
306
+ * The inclusive Qwen vocabulary (vocab size > 150k) enables the model to processs Japanese texts much more efficiently than the previously released [youri series](https://huggingface.co/collections/rinna/youri-7b-654053610cb8e9d8e6289efc).
307
+ * The model supports a maximum sequence length of 32768.
308
+
309
+ The name `nekomata` comes from the Japanese word [`猫又/ねこまた/Nekomata`](https://ja.wikipedia.org/wiki/%E7%8C%AB%E5%8F%88), which is a kind of Japanese mythical creature ([`妖怪/ようかい/Youkai`](https://ja.wikipedia.org/wiki/%E5%A6%96%E6%80%AA)).
310
+
311
+
312
+ * **Library**
313
+
314
+ The model was trained using code based on [EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox).
315
+
316
+ * **Model architecture**
317
+
318
+ A 32-layer, 4096-hidden-size transformer-based language model. Please refer to the [Qwen paper](https://arxiv.org/abs/2309.16609) for architecture details.
319
+
320
+ * **Continual pre-training**
321
+
322
+ The model was initialized with the [qwen-7b](https://huggingface.co/Qwen/Qwen-7B) model and continually trained on around **30B** tokens from a mixture of the following corpora
323
+ - [Japanese CC-100](http://data.statmt.org/cc-100/ja.txt.xz)
324
+ - [Japanese C4](https://huggingface.co/datasets/mc4)
325
+ - [Japanese OSCAR](https://huggingface.co/datasets/oscar-corpus/colossal-oscar-1.0)
326
+ - [The Pile](https://huggingface.co/datasets/EleutherAI/pile)
327
+ - [Wikipedia](https://dumps.wikimedia.org/other/cirrussearch)
328
+ - rinna curated Japanese dataset
329
+
330
+ * **Contributors**
331
+
332
+ - [Tianyu Zhao](https://huggingface.co/tianyuz)
333
+ - [Akio Kaga](https://huggingface.co/rakaga)
334
+ - [Kei Sawada](https://huggingface.co/keisawada)
335
+
336
+ * **Release date**
337
+
338
+ December 21, 2023
339
+
340
+ ---
341
+
342
+ # Benchmarking
343
+ Please refer to [rinna's LM benchmark page (Sheet 20231221)](https://rinnakk.github.io/research/benchmarks/lm/index.html).
344
+
345
+ ---
346
+
347
+ # How to use the model
348
+
349
+ ~~~~python
350
+ import torch
351
+ from transformers import AutoTokenizer, AutoModelForCausalLM
352
+
353
+ tokenizer = AutoTokenizer.from_pretrained(""rinna/nekomata-7b"", trust_remote_code=True)
354
+
355
+ # Use GPU with bf16
356
+ # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-7b"", device_map=""auto"", trust_remote_code=True, bf16=True)
357
+
358
+ # Use GPU with fp16
359
+ # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-7b"", device_map=""auto"", trust_remote_code=True, fp16=True)
360
+
361
+ # Use CPU
362
+ # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-7b"", device_map=""cpu"", trust_remote_code=True)
363
+
364
+ # Automatically select device and precision
365
+ model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-7b"", device_map=""auto"", trust_remote_code=True)
366
+
367
+ text = ""西田幾多郎は、""
368
+ token_ids = tokenizer.encode(text, add_special_tokens=False, return_tensors=""pt"")
369
+
370
+ with torch.no_grad():
371
+ output_ids = model.generate(
372
+ token_ids.to(model.device),
373
+ max_new_tokens=200,
374
+ min_new_tokens=200,
375
+ do_sample=True,
376
+ temperature=1.0,
377
+ top_p=0.95,
378
+ pad_token_id=tokenizer.pad_token_id,
379
+ bos_token_id=tokenizer.bos_token_id,
380
+ eos_token_id=tokenizer.eos_token_id
381
+ )
382
+
383
+ output = tokenizer.decode(output_ids.tolist()[0])
384
+ print(output)
385
+ ~~~~
386
+
387
+ ---
388
+
389
+ # Tokenization
390
+ The model uses the original Qwen tokenizer. It augments the [`cl100k` tiktoken tokenizer](https://github.com/openai/tiktoken) and has a vocabulary size of 151,936. The inclusive vocabulary helps the model to reach a better tokenization efficiency, especially for Japanese texts.
391
+
392
+ We compared the `Qwen` tokenizer (as used in `nekomata`) and the `llama-2` tokenizer (as used in `youri`) on different text collections and found that the Qwen tokenizer achieves a much better byte2token rate (i.e. the average number of tokens produced from 1 byte of text) as following. A lower byte2token rate indicates a better tokenization efficiency.
393
+
394
+
395
+ | Tokenizer | Japanese | English | Multilingual |
396
+ | --- | --- | --- | --- |
397
+ | Qwen | 0.24 | 0.27 | 0.27 |
398
+ | llama-2 | 0.40 | 0.29 | 0.36 |
399
+
400
+ ---
401
+
402
+ # How to cite
403
+ ```bibtex
404
+ @misc{rinna-nekomata-7b,
405
+ title = {rinna/nekomata-7b},
406
+ author = {Zhao, Tianyu and Kaga, Akio and Sawada, Kei},
407
+ url = {https://huggingface.co/rinna/nekomata-7b}
408
+ }
409
+
410
+ @inproceedings{sawada2024release,
411
+ title = {Release of Pre-Trained Models for the {J}apanese Language},
412
+ author = {Sawada, Kei and Zhao, Tianyu and Shing, Makoto and Mitsui, Kentaro and Kaga, Akio and Hono, Yukiya and Wakatsuki, Toshiaki and Mitsuda, Koh},
413
+ booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)},
414
+ month = {5},
415
+ year = {2024},
416
+ pages = {13898--13905},
417
+ url = {https://aclanthology.org/2024.lrec-main.1213},
418
+ note = {\url{https://arxiv.org/abs/2404.01657}}
419
+ }
420
+ ```
421
+ ---
422
+
423
+ # References
424
+ ```bibtex
425
+ @software{gpt-neox-library,
426
+ title = {{GPT}-{N}eo{X}: Large Scale Autoregressive Language Modeling in {P}y{T}orch},
427
+ author = {Andonian, Alex and Anthony, Quentin and Biderman, Stella and Black, Sid and Gali, Preetham and Gao, Leo and Hallahan, Eric and Levy-Kramer, Josh and Leahy, Connor and Nestler, Lucas and Parker, Kip and Pieler, Michael and Purohit, Shivanshu and Songz, Tri and Phil, Wang and Weinbach, Samuel},
428
+ doi = {10.5281/zenodo.5879544},
429
+ month = {8},
430
+ year = {2021},
431
+ version = {0.0.1},
432
+ url = {https://www.github.com/eleutherai/gpt-neox}
433
+ }
434
+ ```
435
+ ---
436
+
437
+ # License
438
+ [Tongyi Qianwen LICENSE AGREEMENT](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)","{""id"": ""rinna/nekomata-7b"", ""author"": ""rinna"", ""sha"": ""51aac119c45eaf386923e945e7bd4b6d1d816d2f"", ""last_modified"": ""2025-03-23 11:29:21+00:00"", ""created_at"": ""2023-12-19 06:58:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 712, ""downloads_all_time"": null, ""likes"": 7, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""ja"", ""en"", ""dataset:mc4"", ""dataset:wikipedia"", ""dataset:EleutherAI/pile"", ""dataset:oscar-corpus/colossal-oscar-1.0"", ""dataset:cc100"", ""arxiv:2309.16609"", ""arxiv:2404.01657"", ""base_model:Qwen/Qwen-7B"", ""base_model:finetune:Qwen/Qwen-7B"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-7B\ndatasets:\n- mc4\n- wikipedia\n- EleutherAI/pile\n- oscar-corpus/colossal-oscar-1.0\n- cc100\nlanguage:\n- ja\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\ntags:\n- qwen\nthumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|extra_204|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rinna.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Sakalti/nekomata""], ""safetensors"": {""parameters"": {""BF16"": 7721324544}, ""total"": 7721324544}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-23 11:29:21+00:00"", ""cardData"": ""base_model: Qwen/Qwen-7B\ndatasets:\n- mc4\n- wikipedia\n- EleutherAI/pile\n- oscar-corpus/colossal-oscar-1.0\n- cc100\nlanguage:\n- ja\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\ntags:\n- qwen\nthumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""65813f24035c028f33418bbb"", ""modelId"": ""rinna/nekomata-7b"", ""usedStorage"": 30885412891}",1,https://huggingface.co/rinna/nekomata-7b-instruction,1,,0,"https://huggingface.co/rinna/nekomata-7b-gguf, https://huggingface.co/mradermacher/nekomata-7b-GGUF, https://huggingface.co/mradermacher/nekomata-7b-i1-GGUF",3,,0,"Sakalti/nekomata, huggingface/InferenceSupport/discussions/new?title=rinna/nekomata-7b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brinna%2Fnekomata-7b%5D(%2Frinna%2Fnekomata-7b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
439
+ rinna/nekomata-7b-instruction,"---
440
+ thumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png
441
+ datasets:
442
+ - databricks/databricks-dolly-15k
443
+ - kunishou/databricks-dolly-15k-ja
444
+ - izumi-lab/llm-japanese-dataset
445
+ language:
446
+ - ja
447
+ - en
448
+ tags:
449
+ - qwen
450
+ inference: false
451
+ license: other
452
+ license_name: tongyi-qianwen-license-agreement
453
+ license_link: >-
454
+ https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
455
+ base_model: rinna/nekomata-7b
456
+ ---
457
+
458
+ # `rinna/nekomata-7b-instruction`
459
+
460
+ ![rinna-icon](./rinna.png)
461
+
462
+ # Overview
463
+ The model is the instruction-tuned version of [`rinna/nekomata-7b`](https://huggingface.co/rinna/nekomata-7b). It adopts the Alpaca input format.
464
+
465
+ * **Model architecture**
466
+
467
+ A 32-layer, 4096-hidden-size transformer-based language model. Please refer to the [Qwen paper](https://arxiv.org/abs/2309.16609) for architecture details.
468
+
469
+ * **Fine-tuning**
470
+
471
+ The fine-tuning data is the subset of the following datasets.
472
+ * [Databricks Dolly data](https://huggingface.co/datasets/databricks/databricks-dolly-15k)
473
+ * [Japanese Databricks Dolly data](https://huggingface.co/datasets/kunishou/databricks-dolly-15k-ja)
474
+ * [FLAN Instruction Tuning data](https://github.com/google-research/FLAN) and its Japanese translation
475
+ * [Izumi lab LLM Japanese dataset](https://github.com/masanorihirano/llm-japanese-dataset/tree/main)
476
+ * The following sections are used
477
+ * alt
478
+ * aozora-txt
479
+ * CourseraParallel
480
+ * ParaNatCom
481
+ * Tab-delimited_Bilingual_Sentence_Pairs
482
+ * tanaka-corpus
483
+ * wikinews
484
+ * wordnet
485
+ * yasashi-japanese
486
+ * The [remaining sections](https://github.com/masanorihirano/llm-japanese-dataset/tree/main/datasets-cc-by-sa) contain commonly used evaluation corpora so they are skipped to prevent data leak.
487
+
488
+ * **Contributors**
489
+
490
+ - [Tianyu Zhao](https://huggingface.co/tianyuz)
491
+ - [Kei Sawada](https://huggingface.co/keisawada)
492
+
493
+ * **Release date**
494
+
495
+ December 21, 2023
496
+
497
+ ---
498
+
499
+ # Benchmarking
500
+ Please refer to [rinna's LM benchmark page (Sheet 20231221)](https://rinnakk.github.io/research/benchmarks/lm/index.html).
501
+
502
+ ---
503
+
504
+ # How to use the model
505
+
506
+ ~~~~python
507
+ import torch
508
+ from transformers import AutoTokenizer, AutoModelForCausalLM
509
+
510
+ tokenizer = AutoTokenizer.from_pretrained(""rinna/nekomata-7b-instruction"", trust_remote_code=True)
511
+
512
+ # Use GPU with bf16
513
+ # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-7b-instruction"", device_map=""auto"", trust_remote_code=True, bf16=True)
514
+
515
+ # Use GPU with fp16
516
+ # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-7b-instruction"", device_map=""auto"", trust_remote_code=True, fp16=True)
517
+
518
+ # Use CPU
519
+ # model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-7b-instruction"", device_map=""cpu"", trust_remote_code=True)
520
+
521
+ # Automatically select device and precision
522
+ model = AutoModelForCausalLM.from_pretrained(""rinna/nekomata-7b-instruction"", device_map=""auto"", trust_remote_code=True)
523
+
524
+ instruction = ""次の日本語を英語に翻訳してください。""
525
+ input = ""大規模言語モデル(だいきぼげんごモデル、英: large language model、LLM)は、多数のパラメータ(数千万から数十億)を持つ人工ニューラルネットワークで構成されるコンピュータ言語モデルで、膨大なラベルなしテキストを使用して自己教師あり学習または半教師あり学習によって訓練が行われる。""
526
+ prompt = f""""""
527
+ 以下は、タスクを説明する指示と、文脈のある入力の組み合わせです。要求を適切に満たす応答を書きなさい。
528
+
529
+ ### 指示:
530
+ {instruction}
531
+
532
+ ### 入力:
533
+ {input}
534
+
535
+ ### 応答:
536
+ """"""
537
+ token_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors=""pt"")
538
+
539
+ with torch.no_grad():
540
+ output_ids = model.generate(
541
+ token_ids.to(model.device),
542
+ max_new_tokens=200,
543
+ do_sample=True,
544
+ temperature=0.5,
545
+ pad_token_id=tokenizer.pad_token_id,
546
+ bos_token_id=tokenizer.bos_token_id,
547
+ eos_token_id=tokenizer.eos_token_id
548
+ )
549
+
550
+ output = tokenizer.decode(output_ids.tolist()[0])
551
+ print(output)
552
+ """"""
553
+ 以下は、タスクを説明する指示と、文脈のある入力の組み合わせです。要求を適切に満たす応答を書きなさい。
554
+
555
+ ### 指示:
556
+ 次の日本語を英語に翻訳してください。
557
+
558
+ ### 入力:
559
+ 大規模言語モデル(だいきぼげんごモデル、英: large language model、LLM)は、多数のパラメータ(数千万から数十億)を持つ人工ニューラルネットワークで構成されるコンピュータ言語モデルで、膨大なラベルなしテキストを使 用して自己教師あり学習または半教師あり学習によって訓練が行われる。
560
+
561
+ ### 応答:
562
+ A large language model (LLM) is a computer language model composed of artificial neural networks with many parameters (from tens of millions to billions) trained by self-supervised learning or semi-supervised learning using a large amount of unlabeled text.<|endoftext|>
563
+ """"""
564
+ ~~~~
565
+
566
+ ---
567
+
568
+ # Tokenization
569
+ Please refer to [`rinna/nekomata-7b`](https://huggingface.co/rinna/nekomata-7b) for tokenization details.
570
+
571
+ ---
572
+
573
+ # How to cite
574
+ ```bibtex
575
+ @misc{rinna-nekomata-7b-instruction,
576
+ title = {rinna/nekomata-7b-instruction},
577
+ author = {Zhao, Tianyu and Sawada, Kei},
578
+ url = {https://huggingface.co/rinna/nekomata-7b-instruction}
579
+ }
580
+
581
+ @inproceedings{sawada2024release,
582
+ title = {Release of Pre-Trained Models for the {J}apanese Language},
583
+ author = {Sawada, Kei and Zhao, Tianyu and Shing, Makoto and Mitsui, Kentaro and Kaga, Akio and Hono, Yukiya and Wakatsuki, Toshiaki and Mitsuda, Koh},
584
+ booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)},
585
+ month = {5},
586
+ year = {2024},
587
+ pages = {13898--13905},
588
+ url = {https://aclanthology.org/2024.lrec-main.1213},
589
+ note = {\url{https://arxiv.org/abs/2404.01657}}
590
+ }
591
+ ```
592
+ ---
593
+
594
+ # License
595
+ [Tongyi Qianwen LICENSE AGREEMENT](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)","{""id"": ""rinna/nekomata-7b-instruction"", ""author"": ""rinna"", ""sha"": ""97eb98107ee62d9c4fc0a0e8df937cb2f06e2922"", ""last_modified"": ""2025-03-23 11:28:38+00:00"", ""created_at"": ""2023-12-19 08:10:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4181, ""downloads_all_time"": null, ""likes"": 10, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""ja"", ""en"", ""dataset:databricks/databricks-dolly-15k"", ""dataset:kunishou/databricks-dolly-15k-ja"", ""dataset:izumi-lab/llm-japanese-dataset"", ""arxiv:2309.16609"", ""arxiv:2404.01657"", ""base_model:rinna/nekomata-7b"", ""base_model:finetune:rinna/nekomata-7b"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: rinna/nekomata-7b\ndatasets:\n- databricks/databricks-dolly-15k\n- kunishou/databricks-dolly-15k-ja\n- izumi-lab/llm-japanese-dataset\nlanguage:\n- ja\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\ntags:\n- qwen\nthumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|extra_204|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rinna.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7721324544}, ""total"": 7721324544}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-23 11:28:38+00:00"", ""cardData"": ""base_model: rinna/nekomata-7b\ndatasets:\n- databricks/databricks-dolly-15k\n- kunishou/databricks-dolly-15k-ja\n- izumi-lab/llm-japanese-dataset\nlanguage:\n- ja\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\ntags:\n- qwen\nthumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""65814fdf4daa78a2f284958c"", ""modelId"": ""rinna/nekomata-7b-instruction"", ""usedStorage"": 30885360243}",2,,0,,0,"https://huggingface.co/rinna/nekomata-7b-instruction-gguf, https://huggingface.co/mradermacher/nekomata-7b-instruction-GGUF, https://huggingface.co/mradermacher/nekomata-7b-instruction-i1-GGUF",3,,0,huggingface/InferenceSupport/discussions/new?title=rinna/nekomata-7b-instruction&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brinna%2Fnekomata-7b-instruction%5D(%2Frinna%2Fnekomata-7b-instruction)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
596
+ Locutusque/UltraQwen-7B,"---
597
+ datasets:
598
+ - HuggingFaceH4/ultrachat_200k
599
+ language:
600
+ - en
601
+ license: other
602
+ base_model: Qwen/Qwen-7B
603
+ ---
604
+ # Model description
605
+ The model was trained on about 100,000 examples of the HuggingFaceH4/ultrachat_200k dataset, with plans to release more checkpoints later on.
606
+
607
+ This model has not been aligned with DPO. In the future, different repositories will be released that contain versions of this model aligned with DPO, using various datasets.
608
+
609
+ # Evaluation
610
+ Upon personal testing, the model demonstrates excellent performance in mathematics, history, trivia, and coding tasks. This model can be found on the Open LLM Leaderboard.
611
+
612
+ # Recommended inference parameters
613
+
614
+ temperature=0.2, top_p=0.14, top_k=12, repetition_penalty=1.1
615
+
616
+ # License
617
+
618
+ Please make sure to read the Qwen licensing agreement before using this model.","{""id"": ""Locutusque/UltraQwen-7B"", ""author"": ""Locutusque"", ""sha"": ""0c029182ee4f8d7ee5df1e2ec3e1f2a726d11394"", ""last_modified"": ""2024-01-21 17:57:01+00:00"", ""created_at"": ""2024-01-21 01:54:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""conversational"", ""en"", ""dataset:HuggingFaceH4/ultrachat_200k"", ""base_model:Qwen/Qwen-7B"", ""base_model:finetune:Qwen/Qwen-7B"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-7B\ndatasets:\n- HuggingFaceH4/ultrachat_200k\nlanguage:\n- en\nlicense: other"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7721455616}, ""total"": 7721455616}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-01-21 17:57:01+00:00"", ""cardData"": ""base_model: Qwen/Qwen-7B\ndatasets:\n- HuggingFaceH4/ultrachat_200k\nlanguage:\n- en\nlicense: other"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65ac795d0844d9e0d680f60a"", ""modelId"": ""Locutusque/UltraQwen-7B"", ""usedStorage"": 15442959104}",1,,0,,0,https://huggingface.co/MaziyarPanahi/UltraQwen-7B-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=Locutusque/UltraQwen-7B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BLocutusque%2FUltraQwen-7B%5D(%2FLocutusque%2FUltraQwen-7B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
619
+ mohit95559/mymodel,"---
620
+ license: other
621
+ base_model: Qwen/Qwen-7B
622
+ tags:
623
+ - llama-factory
624
+ - freeze
625
+ - generated_from_trainer
626
+ model-index:
627
+ - name: cpt
628
+ results: []
629
+ ---
630
+
631
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
632
+ should probably proofread and complete it, then remove this comment. -->
633
+
634
+ [<img src=""https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg"" alt=""Visualize in Weights & Biases"" width=""200"" height=""32""/>](https://wandb.ai/vision-x/huggingface/runs/lugcpcak)
635
+ # cpt
636
+
637
+ This model is a fine-tuned version of [Qwen/Qwen-7B](https://huggingface.co/Qwen/Qwen-7B) on the fineweb dataset.
638
+ It achieves the following results on the evaluation set:
639
+ - Loss: 2.4038
640
+
641
+ ## Model description
642
+
643
+ More information needed
644
+
645
+ ## Intended uses & limitations
646
+
647
+ More information needed
648
+
649
+ ## Training and evaluation data
650
+
651
+ More information needed
652
+
653
+ ## Training procedure
654
+
655
+ ### Training hyperparameters
656
+
657
+ The following hyperparameters were used during training:
658
+ - learning_rate: 0.0001
659
+ - train_batch_size: 120
660
+ - eval_batch_size: 12
661
+ - seed: 42
662
+ - distributed_type: multi-GPU
663
+ - num_devices: 8
664
+ - total_train_batch_size: 960
665
+ - total_eval_batch_size: 96
666
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
667
+ - lr_scheduler_type: cosine
668
+ - lr_scheduler_warmup_ratio: 0.1
669
+ - num_epochs: 2.0
670
+
671
+ ### Training results
672
+
673
+ | Training Loss | Epoch | Step | Validation Loss |
674
+ |:-------------:|:------:|:----:|:---------------:|
675
+ | 2.1109 | 1.9157 | 500 | 2.4038 |
676
+
677
+
678
+ ### Framework versions
679
+
680
+ - Transformers 4.42.3
681
+ - Pytorch 2.1.2+git70dfd51
682
+ - Datasets 2.20.0
683
+ - Tokenizers 0.19.1
684
+ ","{""id"": ""mohit95559/mymodel"", ""author"": ""mohit95559"", ""sha"": ""4ad577ca9dc938d97a43b1ef28ec3e3046964f51"", ""last_modified"": ""2024-07-09 09:24:53+00:00"", ""created_at"": ""2024-07-09 08:31:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen"", ""feature-extraction"", ""llama-factory"", ""freeze"", ""generated_from_trainer"", ""custom_code"", ""base_model:Qwen/Qwen-7B"", ""base_model:finetune:Qwen/Qwen-7B"", ""license:other"", ""region:us""], ""pipeline_tag"": ""feature-extraction"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-7B\nlicense: other\ntags:\n- llama-factory\n- freeze\n- generated_from_trainer\nmodel-index:\n- name: cpt\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""cpt"", ""results"": []}], ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModel"": ""modeling_qwen.QWenLMHeadModel"", ""AutoModelForCausalLM"": ""Qwen/Qwen-7B--modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {""chat_template"": ""{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ content }}{% elif message['role'] == 'assistant' %}{{ content }}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/zero_pp_rank_0_mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/zero_pp_rank_1_mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/zero_pp_rank_2_mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/zero_pp_rank_3_mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/zero_pp_rank_4_mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/zero_pp_rank_5_mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/zero_pp_rank_6_mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='global_step500/zero_pp_rank_7_mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='latest', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_0.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_1.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_2.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_3.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_4.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_5.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_6.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_7.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='zero_to_fp32.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7721324544}, ""total"": 7721324544}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-09 09:24:53+00:00"", ""cardData"": ""base_model: Qwen/Qwen-7B\nlicense: other\ntags:\n- llama-factory\n- freeze\n- generated_from_trainer\nmodel-index:\n- name: cpt\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""668cf56ede202e393cbd9c79"", ""modelId"": ""mohit95559/mymodel"", ""usedStorage"": 79461637720}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mohit95559/mymodel&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmohit95559%2Fmymodel%5D(%2Fmohit95559%2Fmymodel)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Sky-T1-32B-Preview_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ NovaSky-AI/Sky-T1-32B-Preview,"---
3
+ library_name: transformers
4
+ datasets:
5
+ - codeparrot/apps
6
+ - BAAI/TACO
7
+ - AI-MO/NuminaMath-CoT
8
+ language:
9
+ - en
10
+ base_model:
11
+ - Qwen/Qwen2.5-32B-Instruct
12
+ license: apache-2.0
13
+ ---
14
+
15
+ ## Model Details
16
+
17
+ ### Model Description
18
+
19
+ <!-- Provide a longer summary of what this model is. -->
20
+
21
+ This is a 32B reasoning model trained from Qwen2.5-32B-Instruct with 17K data. The performance is on par with o1-preview model on both math and coding.
22
+ Please see our [blog post](https://novasky-ai.github.io/posts/sky-t1/) for more details.
23
+
24
+ - **Developed by:** NovaSky Team from Sky Computing Lab at UC Berkeley.
25
+
26
+ ## Training Details
27
+
28
+ ### Training Data
29
+
30
+ 17K verified correct responses from Qwen/QwQ-32B-Preview on coding, math. In addition, we add the science portion from the [Still-2 paper](https://arxiv.org/pdf/2412.09413).
31
+
32
+ ### Training Procedure
33
+ We perform supervised fine tuning on the data, with a batch size of 96.
34
+
35
+ #### Speeds
36
+
37
+ We use Llama-Factory for training. On 8 H100, the training takes 19 hours with DeepSpeed Zero-3 Offload.
38
+
39
+
40
+ ## Evaluation
41
+ | | Sky-T1-32B-Preview | Qwen-2.5-32B-Instruct | QwQ | o1-preview |
42
+ |-----------------------|---------------------|--------|-------|------------|
43
+ | Math500 | 82.4 | 76.2 | 85.4 | 81.4 |
44
+ | AIME2024 | 43.3 | 16.7 | 50.0 | 40.0 |
45
+ | LiveCodeBench-Easy | 86.3 | 84.6 | 90.7 | 92.9 |
46
+ | LiveCodeBench-Medium | 56.8 | 40.8 | 56.3 | 54.9 |
47
+ | LiveCodeBench-Hard | 17.9 | 9.8 | 17.1 | 16.3 |
48
+ | GPQA-Diamond | 56.8 | 45.5 | 52.5 | 75.2 |
49
+
50
+ ## Acknowledgement
51
+ We would like to thanks the compute resources from [Lambda Lab](https://lambdalabs.com/service/gpu-cloud?srsltid=AfmBOop5FnmEFTkavVtdZDsLWvHWNg6peXtat-OXJ9MW5GMNsk756PE5) and [AnyScale](https://www.anyscale.com/). We would like to thanks the academic feedback and support from the [Still-2 Team](https://arxiv.org/pdf/2412.09413), and [Junyang Lin](https://justinlin610.github.io/) from the [Qwen Team](https://qwenlm.github.io/).
52
+
53
+ ## Citation
54
+ Please considering citing our blog post if you found it useful for your research. Thank you!
55
+
56
+ ```bibtex
57
+ @misc{sky_t1_2025,
58
+ author = {NovaSky Team},
59
+ title = {Sky-T1: Fully open-source reasoning model with o1-preview performance in $450 budget},
60
+ howpublished = {https://novasky-ai.github.io/posts/sky-t1},
61
+ note = {Accessed: 2025-01-09},
62
+ year = {2025}
63
+ }","{""id"": ""NovaSky-AI/Sky-T1-32B-Preview"", ""author"": ""NovaSky-AI"", ""sha"": ""1e3f4c62a30c7ce70f4b3a3b952895d866943551"", ""last_modified"": ""2025-01-13 08:10:51+00:00"", ""created_at"": ""2025-01-07 22:38:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 16462, ""downloads_all_time"": null, ""likes"": 543, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""en"", ""dataset:codeparrot/apps"", ""dataset:BAAI/TACO"", ""dataset:AI-MO/NuminaMath-CoT"", ""arxiv:2412.09413"", ""base_model:Qwen/Qwen2.5-32B-Instruct"", ""base_model:finetune:Qwen/Qwen2.5-32B-Instruct"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen2.5-32B-Instruct\ndatasets:\n- codeparrot/apps\n- BAAI/TACO\n- AI-MO/NuminaMath-CoT\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""Wicky/NovaSky-AI-Sky-T1-32B-Preview"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""ceifeDeFi/NovaSky-AI-Sky-T1-32B-Preview"", ""unveilingthetruth/NovaSky-AI-Sky-T1-32B-Preview"", ""ancoai/NovaSky-AI-Sky-T1-32B-Preview"", ""okhosting/NovaSky-AI-Sky-T1-32B-Preview"", ""baranbayden/NovaSky-AI-Sky-T1-32B-Preview"", ""Diamonddog3232/NovaSkytesting"", ""djfj3/NovaSky-AI-Sky-T1-32B-Preview"", ""diffusers03890000000/NovaSky-AI-Sky-T1-32B-Preview"", ""yuri-val/NovaSky-AI-Sky-T1-32B-Preview"", ""vihaan43/NovaSky-AI-Sky-T1-32B-Preview"", ""Vlean/NovaSky-AI-Sky-T1-32B-Preview"", ""kbam77/NovaSky-AI-Sky-T1-32B-Preview"", ""AniruthA/NovaSky-AI-Sky-T1-32B-Preview"", ""bm0x/NovaSky-AI-Sky-T1-32B-Preview"", ""FiendFlb/NovaSky-AI-Sky-T1-32B-Preview"", ""ErnestoOjeda/NovaSky-AI-Sky-T1-32B-Preview"", ""baharturkel/NovaSky-AI-Sky-T1-32B-Preview"", ""khan860460/NovaSky-AI-Sky-T1-32B-Preview"", ""tryfinally/NovaSky-AI-Sky-T1-32B-Preview"", ""faizanali/NovaSky-AI-Sky-T1-32B-Preview"", ""alperall/NovaSky-AI-Sky-T1-32B-Preview"", ""gdberrio/NovaSky-AI-Sky-T1-32B-Preview"", ""PyScoutAI/PyscoutAI"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""F16"": 32763876352}, ""total"": 32763876352}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-13 08:10:51+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen2.5-32B-Instruct\ndatasets:\n- codeparrot/apps\n- BAAI/TACO\n- AI-MO/NuminaMath-CoT\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""677dacec916db85347228a24"", ""modelId"": ""NovaSky-AI/Sky-T1-32B-Preview"", ""usedStorage"": 65527840896}",0,"https://huggingface.co/huihui-ai/Sky-T1-32B-Preview-abliterated, https://huggingface.co/NovaSky-AI/Sky-T1-32B-Flash, https://huggingface.co/mlx-community/Sky-T1-32B-Preview, https://huggingface.co/prithivMLmods/Omni-Reasoner-o1, https://huggingface.co/alitominerba/dsgfsdfg, https://huggingface.co/DenGreenGren/GrandMobile, https://huggingface.co/MSHugg/STWNow",7,https://huggingface.co/shakedzy/Sky-T1-32B-Steps,1,"https://huggingface.co/mradermacher/Sky-T1-32B-Preview-GGUF, https://huggingface.co/jth01/Sky-T1-32B-Preview-4.25-bpw, https://huggingface.co/bartowski/Sky-T1-32B-Preview-GGUF, https://huggingface.co/DevQuasar/NovaSky-AI.Sky-T1-32B-Preview-GGUF, https://huggingface.co/tensorblock/Sky-T1-32B-Preview-GGUF, https://huggingface.co/second-state/Sky-T1-32B-Preview-GGUF, https://huggingface.co/gaianet/Sky-T1-32B-Preview-GGUF, https://huggingface.co/mradermacher/Sky-T1-32B-Preview-i1-GGUF, https://huggingface.co/mlx-community/Sky-T1-32B-Preview-8bit, https://huggingface.co/cnfusion/Sky-T1-32B-Preview-Q4-mlx, https://huggingface.co/cnfusion/Sky-T1-32B-Preview-Q3-mlx, https://huggingface.co/mlx-community/Sky-T1-32B-Preview-4bit, https://huggingface.co/AIFunOver/Sky-T1-32B-Preview-openvino-8bit, https://huggingface.co/AIFunOver/Sky-T1-32B-Preview-openvino-4bit, https://huggingface.co/cnfusion/Sky-T1-32B-Preview-Q6-mlx, https://huggingface.co/onekq-ai/Sky-T1-32B-Preview-bnb-4bit, https://huggingface.co/pr0j3ct-m1ndb0t-2045/M1NDB0T-SkyN0VA-32-GGUF, https://huggingface.co/Valdemardi/Sky-T1-32B-Preview-AWQ, https://huggingface.co/jesusoctavioas/Sky-T1-32B-Preview-MLX-Q4, https://huggingface.co/featherless-ai-quants/NovaSky-AI-Sky-T1-32B-Preview-GGUF, https://huggingface.co/Scotto2025/Sky-T1-32B-Preview-Q4-mlx",21,"https://huggingface.co/Triangle104/DeepSky-T100, https://huggingface.co/TeetouchQQ/model_mergev2, https://huggingface.co/DataSoul/Sky-T1-32B-Flash-abliterated-merge1, https://huggingface.co/hotmailuser/QwenSlerp2-32B, https://huggingface.co/hotmailuser/QwenSlerp3-32B, https://huggingface.co/Sorawiz/KunouSky-32B, https://huggingface.co/ltn18/QwQ-SkyT1-32B-Preview, https://huggingface.co/radna/S1.1-Deepseek-R1-T1-32B, https://huggingface.co/radna/S1.1-Deepseek-R1-T1-QWQ-32B-Preview, https://huggingface.co/DataSoul/Sky-T1-32B-Flash-abliterated-merge1.618, https://huggingface.co/marcuscedricridia/Springer-32B-18",11,"FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, Wicky/NovaSky-AI-Sky-T1-32B-Preview, ancoai/NovaSky-AI-Sky-T1-32B-Preview, baranbayden/NovaSky-AI-Sky-T1-32B-Preview, bhaskartripathi/LLM_Quantization, ceifeDeFi/NovaSky-AI-Sky-T1-32B-Preview, huggingface/InferenceSupport/discussions/new?title=NovaSky-AI/Sky-T1-32B-Preview&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNovaSky-AI%2FSky-T1-32B-Preview%5D(%2FNovaSky-AI%2FSky-T1-32B-Preview)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, okhosting/NovaSky-AI-Sky-T1-32B-Preview, ruslanmv/convert_to_gguf, totolook/Quant, unveilingthetruth/NovaSky-AI-Sky-T1-32B-Preview",13
64
+ huihui-ai/Sky-T1-32B-Preview-abliterated,"---
65
+ library_name: transformers
66
+ language:
67
+ - en
68
+ base_model:
69
+ - NovaSky-AI/Sky-T1-32B-Preview
70
+ tags:
71
+ - abliterated
72
+ - uncensored
73
+ ---
74
+ # huihui-ai/Sky-T1-32B-Preview-abliterated
75
+
76
+ This is an uncensored version of [NovaSky-AI/Sky-T1-32B-Preview](https://huggingface.co/NovaSky-AI/Sky-T1-32B-Preview) created with abliteration (see [remove-refusals-with-transformers](https://github.com/Sumandora/remove-refusals-with-transformers) to know more about it).
77
+ This is a crude, proof-of-concept implementation to remove refusals from an LLM model without using TransformerLens.
78
+
79
+ ## Use with ollama
80
+
81
+ You can use [huihui_ai/skyt1-abliterated](https://ollama.com/huihui_ai/skyt1-abliterated) directly
82
+ ```
83
+ ollama run huihui_ai/skyt1-abliterated
84
+ ```","{""id"": ""huihui-ai/Sky-T1-32B-Preview-abliterated"", ""author"": ""huihui-ai"", ""sha"": ""8485a6baa85d412a60ac85c3aa35514057013eda"", ""last_modified"": ""2025-01-19 20:00:29+00:00"", ""created_at"": ""2025-01-19 13:28:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""abliterated"", ""uncensored"", ""conversational"", ""en"", ""base_model:NovaSky-AI/Sky-T1-32B-Preview"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Preview"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- NovaSky-AI/Sky-T1-32B-Preview\nlanguage:\n- en\nlibrary_name: transformers\ntags:\n- abliterated\n- uncensored"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 32763876352}, ""total"": 32763876352}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-19 20:00:29+00:00"", ""cardData"": ""base_model:\n- NovaSky-AI/Sky-T1-32B-Preview\nlanguage:\n- en\nlibrary_name: transformers\ntags:\n- abliterated\n- uncensored"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""678cfdfdb9303fc391dbef35"", ""modelId"": ""huihui-ai/Sky-T1-32B-Preview-abliterated"", ""usedStorage"": 65539263578}",1,,0,,0,"https://huggingface.co/mradermacher/Sky-T1-32B-Preview-abliterated-GGUF, https://huggingface.co/mradermacher/Sky-T1-32B-Preview-abliterated-i1-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q3_K_S-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q3_K_M-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q3_K_L-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q4_K_S-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q4_K_M-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q5_K_S-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q5_K_M-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q6_K-GGUF, https://huggingface.co/Triangle104/Sky-T1-32B-Preview-abliterated-Q8_0-GGUF",11,"https://huggingface.co/Triangle104/DeepSky-T100, https://huggingface.co/DataSoul/Sky-T1-32B-Flash-abliterated-merge1, https://huggingface.co/Triangle104/Rhombos-Skyloft-32b, https://huggingface.co/DataSoul/Sky-T1-32B-Flash-abliterated-merge1.618",4,huggingface/InferenceSupport/discussions/new?title=huihui-ai/Sky-T1-32B-Preview-abliterated&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhuihui-ai%2FSky-T1-32B-Preview-abliterated%5D(%2Fhuihui-ai%2FSky-T1-32B-Preview-abliterated)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
85
+ NovaSky-AI/Sky-T1-32B-Flash,"---
86
+ library_name: transformers
87
+ datasets:
88
+ - BAAI/TACO
89
+ - tasksource/PRM800K
90
+ language:
91
+ - en
92
+ base_model:
93
+ - Qwen/Qwen2.5-32B-Instruct
94
+ - NovaSky-AI/Sky-T1-32B-Preview
95
+ license: apache-2.0
96
+ ---
97
+
98
+ ## Model Details
99
+
100
+ ### Model Description
101
+
102
+ <!-- Provide a longer summary of what this model is. -->
103
+
104
+ This is a 32B reasoning model preference optimized on top of Sky-T1-32B-Preview to significantly reduce generation lengths while maintaining accuracy. The performance is on par with o1-preview model in both math and coding, while reducing generation lengths by up to 57% relative to Sky-T1-32B-Preview.
105
+ Please see our [blog post](https://novasky-ai.github.io/posts/reduce-overthinking/) for more details.
106
+
107
+ - **Developed by:** NovaSky Team from Sky Computing Lab at UC Berkeley.
108
+
109
+ ## Training Details
110
+
111
+ ### Training Data
112
+
113
+ 10K preference pairs in math and coding domains, generated by Sky-T1-32B-Preview.
114
+
115
+ ### Training Procedure
116
+ We perform Simple Policy Optimization (SimPO) with a batch size of 96, learning rate of 5e-7, gamma of 0.3, and beta of 2.0.
117
+
118
+ #### Speeds
119
+
120
+ We use Llama-Factory for training. On 8xH100, the SimPO training takes ~2.5 hours with DeepSpeed Zero-3 Offload.
121
+
122
+
123
+ ## Evaluation
124
+ | | | Sky-T1-32B-Preview | Sky-T1-32B-Flash | Qwen2.5-32B-Instruct | QwQ-32B- Base | DeepSeek-R1-Distill-Qwen-32B |
125
+ |--------------|---------|:------------------:|:----------------:|:--------------------:|:-------------:|:----------------------------:|
126
+ | Math500 | Acc | 88.6 | 88.6 | 76.2 | 89.2 | 90.8 |
127
+ | | Avg Len | 2124 | 1417 (-33%) | 522 | 2089 | 2010 |
128
+ | AIME24 | Acc | 43.3 | 43.3 | 16.7 | 50 | 66.7 |
129
+ | | Avg Len | 6881 | 4365 (-37%) | 970 | 7379 | 9173 |
130
+ | LCB Easy | Acc | 87.4 | 89 | 84.6 | 90.7 | 91.2 |
131
+ | | Avg Len | 3415 | 2265 (-34%) | 414 | 3255 | 2775 |
132
+ | LCB Medium | Acc | 56.8 | 56.3 | 40.8 | 56.3 | 76.7 |
133
+ | | Avg Len | 8263 | 4389 (-47%) | 535 | 6742 | 6324 |
134
+ | LCB Hard | Acc | 17.9 | 17.9 | 9.8 | 17.1 | 38.2 |
135
+ | | Avg Len | 14564 | 6199 (-57%) | 618 | 10450 | 10448 |
136
+ | MMLU | Acc | 82.4 | 81.7 | 80.1 | 85.2 | 82.1 |
137
+ | | Avg Len | 1087 | 799 (-17%) | 312 | 1041 | 774 |
138
+ | GPQA Diamond | Acc | 56.8 | 56.6 | 45.5 | 52.5 | 62.6 |
139
+ | | Avg Len | 3503 | 2148 (-39%) | 600 | 3302 | 5108 |
140
+
141
+ ## Acknowledgement
142
+ We would like to thanks the compute resources from [Lambda Lab](https://lambdalabs.com/service/gpu-cloud?srsltid=AfmBOop5FnmEFTkavVtdZDsLWvHWNg6peXtat-OXJ9MW5GMNsk756PE5) and [AnyScale](https://www.anyscale.com/).
143
+
144
+ ## License
145
+ Apache-2.0
146
+
147
+ ## Citation
148
+ Please considering citing our blog post if you found it useful for your research. Thank you!
149
+
150
+ ```bibtex
151
+ @misc{reduce_overthinking_2025,
152
+ author = {NovaSky Team},
153
+ title = {Think Less, Achieve More: Cut Reasoning Costs by 50% Without Sacrificing Accuracy},
154
+ howpublished = {https://novasky-ai.github.io/posts/reduce-overthinking},
155
+ note = {Accessed: 2025-01-23},
156
+ year = {2025}
157
+ }
158
+ ","{""id"": ""NovaSky-AI/Sky-T1-32B-Flash"", ""author"": ""NovaSky-AI"", ""sha"": ""0dccf55483f1d1f0b603c505e8bfd24a0d72354c"", ""last_modified"": ""2025-02-02 05:26:20+00:00"", ""created_at"": ""2025-01-23 07:30:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 383, ""downloads_all_time"": null, ""likes"": 63, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""en"", ""dataset:BAAI/TACO"", ""dataset:tasksource/PRM800K"", ""base_model:NovaSky-AI/Sky-T1-32B-Preview"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Preview"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen2.5-32B-Instruct\n- NovaSky-AI/Sky-T1-32B-Preview\ndatasets:\n- BAAI/TACO\n- tasksource/PRM800K\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='latest', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_0.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_1.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_2.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_3.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_4.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_5.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_6.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_7.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='zero_to_fp32.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""Wallace1985/NovaSky-AI-Sky-T1-32B-Flash"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 32763876352}, ""total"": 32763876352}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-02 05:26:20+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen2.5-32B-Instruct\n- NovaSky-AI/Sky-T1-32B-Preview\ndatasets:\n- BAAI/TACO\n- tasksource/PRM800K\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6791f00650b5708f20b8b118"", ""modelId"": ""NovaSky-AI/Sky-T1-32B-Flash"", ""usedStorage"": 65542293887}",1,"https://huggingface.co/Apel-sin/sky-T1-32B-flash-exl2, https://huggingface.co/secmlr/VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5",2,,0,"https://huggingface.co/mradermacher/Sky-T1-32B-Flash-i1-GGUF, https://huggingface.co/DevQuasar/NovaSky-AI.Sky-T1-32B-Flash-GGUF, https://huggingface.co/mradermacher/Sky-T1-32B-Flash-GGUF, https://huggingface.co/bartowski/Sky-T1-32B-Flash-GGUF, https://huggingface.co/maxrubin629/Sky-T1-32B-Flash-Q4-mlx, https://huggingface.co/Scotto2025/Sky-T1-32B-Flash-Q4-mlx, https://huggingface.co/tensorblock/Sky-T1-32B-Flash-GGUF",7,"https://huggingface.co/nitky/RoguePlanet-DeepSeek-R1-Qwen-32B, https://huggingface.co/DataPilot/SKYCAVE-R1-32B-v0.1, https://huggingface.co/nitky/FuseO1-DeepSeekR1-QwQ-SkyT1-Flash-Japanese-32B-Preview, https://huggingface.co/DataSoul/QSQ-sce-32B-merge, https://huggingface.co/sm54/FuseO1-QwQ-SkyT1-Flash-32B, https://huggingface.co/DataSoul/Sky-T1-32B-Flash-abliterated-merge1, https://huggingface.co/sm54/QwQ-DeepSeek-R1-SkyT1-Flash-Lightest-32B, https://huggingface.co/xi0v/FuseO1-flash-reproduced, https://huggingface.co/DataPilot/SKYDRIVE-32B-v0.1, https://huggingface.co/valoomba/rombo-qwq-r1-flash-32B-preview, https://huggingface.co/DataSoul/QwS-STV1-32B-merge, https://huggingface.co/tomasmcm/sky-t1-coder-32b-flash, https://huggingface.co/sm54/FuseO1-DeepSeekR1-QwQ-SkyT1-Flash-32B, https://huggingface.co/DataSoul/Sky-T1-32B-Flash-abliterated-merge1.618, https://huggingface.co/sm54/QwQ-DeepSeek-R1-SkyT1-Flash-Light-32B, https://huggingface.co/sm54/QwQ-DeepSeek-R1-SkyT1-Flash-Lighter-32B, https://huggingface.co/radna/Fuse-QwQ-SkyT1-Flash-32B-Preview, https://huggingface.co/YOYO-AI/QwQ-Sky-T1-Med-32B, https://huggingface.co/YOYO-AI/YOYO-O1-32B-V4-preview2",19,"FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, Wallace1985/NovaSky-AI-Sky-T1-32B-Flash, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=NovaSky-AI/Sky-T1-32B-Flash&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNovaSky-AI%2FSky-T1-32B-Flash%5D(%2FNovaSky-AI%2FSky-T1-32B-Flash)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ruslanmv/convert_to_gguf, totolook/Quant",8
159
+ Apel-sin/sky-T1-32B-flash-exl2,"---
160
+ library_name: transformers
161
+ datasets:
162
+ - BAAI/TACO
163
+ - tasksource/PRM800K
164
+ language:
165
+ - en
166
+ base_model: NovaSky-AI/Sky-T1-32B-Flash
167
+ license: apache-2.0
168
+ ---
169
+
170
+ ## Model Details
171
+
172
+ ### Model Description
173
+
174
+ <!-- Provide a longer summary of what this model is. -->
175
+
176
+ This is a 32B reasoning model preference optimized on top of Sky-T1-32B-Preview to significantly reduce generation lengths while maintaining accuracy. The performance is on par with o1-preview model in both math and coding, while reducing generation lengths by up to 57% relative to Sky-T1-32B-Preview.
177
+ Please see our [blog post](https://novasky-ai.github.io/posts/reduce-overthinking/) for more details.
178
+
179
+ - **Developed by:** NovaSky Team from Sky Computing Lab at UC Berkeley.
180
+
181
+ ## Training Details
182
+
183
+ ### Training Data
184
+
185
+ 10K preference pairs in math and coding domains, generated by Sky-T1-32B-Preview.
186
+
187
+ ### Training Procedure
188
+ We perform Simple Policy Optimization (SimPO) with a batch size of 96, learning rate of 5e-7, gamma of 0.3, and beta of 2.0.
189
+
190
+ #### Speeds
191
+
192
+ We use Llama-Factory for training. On 8xH100, the SimPO training takes ~2.5 hours with DeepSpeed Zero-3 Offload.
193
+
194
+
195
+ ## Evaluation
196
+ | | | Sky-T1-32B-Preview | Sky-T1-32B-Flash | Qwen2.5-32B-Instruct | QwQ-32B- Base | DeepSeek-R1-Distill-Qwen-32B |
197
+ |--------------|---------|:------------------:|:----------------:|:--------------------:|:-------------:|:----------------------------:|
198
+ | Math500 | Acc | 88.6 | 88.6 | 76.2 | 89.2 | 90.8 |
199
+ | | Avg Len | 2124 | 1417 (-33%) | 522 | 2089 | 2010 |
200
+ | AIME24 | Acc | 43.3 | 43.3 | 16.7 | 50 | 66.7 |
201
+ | | Avg Len | 6881 | 4365 (-37%) | 970 | 7379 | 9173 |
202
+ | LCB Easy | Acc | 87.4 | 89 | 84.6 | 90.7 | 91.2 |
203
+ | | Avg Len | 3415 | 2265 (-34%) | 414 | 3255 | 2775 |
204
+ | LCB Medium | Acc | 56.8 | 56.3 | 40.8 | 56.3 | 76.7 |
205
+ | | Avg Len | 8263 | 4389 (-47%) | 535 | 6742 | 6324 |
206
+ | LCB Hard | Acc | 17.9 | 17.9 | 9.8 | 17.1 | 38.2 |
207
+ | | Avg Len | 14564 | 6199 (-57%) | 618 | 10450 | 10448 |
208
+ | MMLU | Acc | 82.4 | 81.7 | 80.1 | 85.2 | 82.1 |
209
+ | | Avg Len | 1087 | 799 (-17%) | 312 | 1041 | 774 |
210
+ | GPQA Diamond | Acc | 56.8 | 56.6 | 45.5 | 52.5 | 62.6 |
211
+ | | Avg Len | 3503 | 2148 (-39%) | 600 | 3302 | 5108 |
212
+
213
+ ## Acknowledgement
214
+ We would like to thanks the compute resources from [Lambda Lab](https://lambdalabs.com/service/gpu-cloud?srsltid=AfmBOop5FnmEFTkavVtdZDsLWvHWNg6peXtat-OXJ9MW5GMNsk756PE5) and [AnyScale](https://www.anyscale.com/).
215
+
216
+ ## Citation
217
+ Please considering citing our blog post if you found it useful for your research. Thank you!
218
+
219
+ ```bibtex
220
+ @misc{reduce_overthinking_2025,
221
+ author = {NovaSky Team},
222
+ title = {Think Less, Achieve More: Cut Reasoning Costs by 50% Without Sacrificing Accuracy},
223
+ howpublished = {https://novasky-ai.github.io/posts/reduce-overthinking},
224
+ note = {Accessed: 2025-01-23},
225
+ year = {2025}
226
+ }
227
+ ","{""id"": ""Apel-sin/sky-T1-32B-flash-exl2"", ""author"": ""Apel-sin"", ""sha"": ""c03bfbd204c497a5bcd7e325df10b7070162bbe2"", ""last_modified"": ""2025-01-26 04:55:11+00:00"", ""created_at"": ""2025-01-26 04:54:00+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""en"", ""dataset:BAAI/TACO"", ""dataset:tasksource/PRM800K"", ""base_model:NovaSky-AI/Sky-T1-32B-Flash"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Flash"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NovaSky-AI/Sky-T1-32B-Flash\ndatasets:\n- BAAI/TACO\n- tasksource/PRM800K\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='measurement.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-26 04:55:11+00:00"", ""cardData"": ""base_model: NovaSky-AI/Sky-T1-32B-Flash\ndatasets:\n- BAAI/TACO\n- tasksource/PRM800K\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6795bfe8eefaf1c583106d06"", ""modelId"": ""Apel-sin/sky-T1-32B-flash-exl2"", ""usedStorage"": 29820416302}",2,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Apel-sin/sky-T1-32B-flash-exl2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BApel-sin%2Fsky-T1-32B-flash-exl2%5D(%2FApel-sin%2Fsky-T1-32B-flash-exl2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
228
+ secmlr/VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5,"---
229
+ library_name: transformers
230
+ license: apache-2.0
231
+ base_model: NovaSky-AI/Sky-T1-32B-Flash
232
+ tags:
233
+ - llama-factory
234
+ - full
235
+ - generated_from_trainer
236
+ model-index:
237
+ - name: VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5
238
+ results: []
239
+ ---
240
+
241
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
242
+ should probably proofread and complete it, then remove this comment. -->
243
+
244
+ # VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5
245
+
246
+ This model is a fine-tuned version of [NovaSky-AI/Sky-T1-32B-Flash](https://huggingface.co/NovaSky-AI/Sky-T1-32B-Flash) on the VD-QWQ-Clean-8k dataset.
247
+
248
+ ## Model description
249
+
250
+ More information needed
251
+
252
+ ## Intended uses & limitations
253
+
254
+ More information needed
255
+
256
+ ## Training and evaluation data
257
+
258
+ More information needed
259
+
260
+ ## Training procedure
261
+
262
+ ### Training hyperparameters
263
+
264
+ The following hyperparameters were used during training:
265
+ - learning_rate: 1e-05
266
+ - train_batch_size: 1
267
+ - eval_batch_size: 8
268
+ - seed: 42
269
+ - distributed_type: multi-GPU
270
+ - num_devices: 4
271
+ - gradient_accumulation_steps: 12
272
+ - total_train_batch_size: 48
273
+ - total_eval_batch_size: 32
274
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
275
+ - lr_scheduler_type: cosine
276
+ - lr_scheduler_warmup_ratio: 0.1
277
+ - num_epochs: 3.0
278
+
279
+ ### Training results
280
+
281
+
282
+
283
+ ### Framework versions
284
+
285
+ - Transformers 4.48.3
286
+ - Pytorch 2.5.1+cu124
287
+ - Datasets 3.1.0
288
+ - Tokenizers 0.21.0
289
+ ","{""id"": ""secmlr/VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5"", ""author"": ""secmlr"", ""sha"": ""122f8a4cbdc7e2d1f5934eb7a78486c8c87c2fd6"", ""last_modified"": ""2025-02-23 02:22:46+00:00"", ""created_at"": ""2025-02-22 19:51:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""llama-factory"", ""full"", ""generated_from_trainer"", ""conversational"", ""base_model:NovaSky-AI/Sky-T1-32B-Flash"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Flash"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NovaSky-AI/Sky-T1-32B-Flash\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- llama-factory\n- full\n- generated_from_trainer\nmodel-index:\n- name: VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5"", ""results"": []}], ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_log.jsonl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_loss.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 32763876352}, ""total"": 32763876352}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-23 02:22:46+00:00"", ""cardData"": ""base_model: NovaSky-AI/Sky-T1-32B-Flash\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- llama-factory\n- full\n- generated_from_trainer\nmodel-index:\n- name: VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67ba2ab7e591c76c9f6800b5"", ""modelId"": ""secmlr/VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5"", ""usedStorage"": 131067113258}",2,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=secmlr/VD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsecmlr%2FVD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5%5D(%2Fsecmlr%2FVD-QWQ-Clean-8k_sky_32B_flash_full_sft_1e-5)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
290
+ mlx-community/Sky-T1-32B-Preview,"---
291
+ library_name: transformers
292
+ datasets:
293
+ - codeparrot/apps
294
+ - BAAI/TACO
295
+ - AI-MO/NuminaMath-CoT
296
+ language:
297
+ - en
298
+ base_model: NovaSky-AI/Sky-T1-32B-Preview
299
+ tags:
300
+ - mlx
301
+ ---
302
+
303
+ # mlx-community/Sky-T1-32B-Preview
304
+
305
+ The Model [mlx-community/Sky-T1-32B-Preview](https://huggingface.co/mlx-community/Sky-T1-32B-Preview) was
306
+ converted to MLX format from [NovaSky-AI/Sky-T1-32B-Preview](https://huggingface.co/NovaSky-AI/Sky-T1-32B-Preview)
307
+ using mlx-lm version **0.21.0** by [Focused](https://focused.io).
308
+
309
+ [![Focused Logo](https://focused.io/images/header-logo.svg ""Focused Logo"")](https://focused.io)
310
+
311
+ ## Use with mlx
312
+
313
+ ```bash
314
+ pip install mlx-lm
315
+ ```
316
+
317
+ ```python
318
+ from mlx_lm import load, generate
319
+
320
+ model, tokenizer = load(""mlx-community/Sky-T1-32B-Preview"")
321
+
322
+ prompt = ""hello""
323
+
324
+ if tokenizer.chat_template is not None:
325
+ messages = [{""role"": ""user"", ""content"": prompt}]
326
+ prompt = tokenizer.apply_chat_template(
327
+ messages, add_generation_prompt=True
328
+ )
329
+
330
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
331
+ ```
332
+
333
+ ---
334
+
335
+ Focused is a technology company at the forefront of AI-driven development, empowering organizations to unlock the full potential of artificial intelligence. From integrating innovative models into existing systems to building scalable, modern AI infrastructures, we specialize in delivering tailored, incremental solutions that meet you where you are.
336
+ Curious how we can help with your AI next project?
337
+ [Get in Touch](https://focused.io/capabilities/ai-readiness-implementation)
338
+
339
+ [![Focused Logo](https://focused.io/images/header-logo.svg ""Focused Logo"")](https://focused.io)
340
+ ","{""id"": ""mlx-community/Sky-T1-32B-Preview"", ""author"": ""mlx-community"", ""sha"": ""b203f839fab9bd66649d5f0531f184078eb01280"", ""last_modified"": ""2025-01-13 21:18:15+00:00"", ""created_at"": ""2025-01-13 02:51:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""mlx"", ""conversational"", ""en"", ""dataset:codeparrot/apps"", ""dataset:BAAI/TACO"", ""dataset:AI-MO/NuminaMath-CoT"", ""base_model:NovaSky-AI/Sky-T1-32B-Preview"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Preview"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NovaSky-AI/Sky-T1-32B-Preview\ndatasets:\n- codeparrot/apps\n- BAAI/TACO\n- AI-MO/NuminaMath-CoT\nlanguage:\n- en\nlibrary_name: transformers\ntags:\n- mlx"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00013.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 32763876352}, ""total"": 32763876352}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-13 21:18:15+00:00"", ""cardData"": ""base_model: NovaSky-AI/Sky-T1-32B-Preview\ndatasets:\n- codeparrot/apps\n- BAAI/TACO\n- AI-MO/NuminaMath-CoT\nlanguage:\n- en\nlibrary_name: transformers\ntags:\n- mlx"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67847fc217fa5ebc50e71f72"", ""modelId"": ""mlx-community/Sky-T1-32B-Preview"", ""usedStorage"": 65539262631}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mlx-community/Sky-T1-32B-Preview&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmlx-community%2FSky-T1-32B-Preview%5D(%2Fmlx-community%2FSky-T1-32B-Preview)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
341
+ prithivMLmods/Omni-Reasoner-o1,"---
342
+ license: apache-2.0
343
+ language:
344
+ - en
345
+ base_model:
346
+ - NovaSky-AI/Sky-T1-32B-Preview
347
+ pipeline_tag: text-generation
348
+ library_name: transformers
349
+ tags:
350
+ - text-generation-inference
351
+ - Omni
352
+ ---
353
+ ![omni.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/Zz_Uc6M06tyh3Euhm93fn.png)
354
+
355
+ # **Omni-Reasoner-o1: Overview**
356
+
357
+ *Omni-Reasoner-o1* is a specialized AI model built upon the Sky T1 32B architecture, combined with **Qwen 2.5 32B**, and fine-tuned using synthetic data from OpenAI pipeline-generated records. It is optimized for mathematical reasoning and complex problem-solving.
358
+
359
+ # **Quickstart with Transformers**
360
+
361
+ Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents.
362
+
363
+ ```python
364
+ from transformers import AutoModelForCausalLM, AutoTokenizer
365
+
366
+ model_name = ""prithivMLmods/Omni-Reasoner-o1""
367
+
368
+ model = AutoModelForCausalLM.from_pretrained(
369
+ model_name,
370
+ torch_dtype=""auto"",
371
+ device_map=""auto""
372
+ )
373
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
374
+
375
+ prompt = ""How many r in strawberry.""
376
+ messages = [
377
+ {""role"": ""system"", ""content"": ""You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.""},
378
+ {""role"": ""user"", ""content"": prompt}
379
+ ]
380
+ text = tokenizer.apply_chat_template(
381
+ messages,
382
+ tokenize=False,
383
+ add_generation_prompt=True
384
+ )
385
+ model_inputs = tokenizer([text], return_tensors=""pt"").to(model.device)
386
+
387
+ generated_ids = model.generate(
388
+ **model_inputs,
389
+ max_new_tokens=512
390
+ )
391
+ generated_ids = [
392
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
393
+ ]
394
+
395
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
396
+ ```
397
+
398
+ # **Key Features**
399
+ 1. **Hybrid Architecture:**
400
+ - Combines **Sky T1 32B** and **Qwen 2.5 32B** to leverage strengths in both natural language understanding and mathematical reasoning.
401
+ - Enables robust problem-solving across diverse domains.
402
+
403
+ 2. **Mathematical Expertise:**
404
+ - Trained specifically as a **mathematical reasoner and problem solver**.
405
+ - Excels in numerical computations, symbolic mathematics, proofs, and equation-solving.
406
+
407
+ 3. **Synthetic Data Fine-Tuning:**
408
+ - Leveraged high-quality synthetic data generated by OpenAI pipelines.
409
+ - Ensures enhanced generalization across a wide range of problem-solving scenarios.
410
+
411
+ 4. **Natural Language Processing (NLP):**
412
+ - Capable of understanding and interpreting complex language inputs related to mathematical queries.
413
+ - Provides step-by-step explanations for solutions, fostering user understanding.
414
+
415
+ 5. **Multi-Task Capability:**
416
+ - Handles a variety of mathematical tasks including algebra, calculus, combinatorics, and statistics.
417
+ - Suitable for word problems and domain-specific queries requiring logic and reasoning.
418
+
419
+ 6. **Scalability:**
420
+ - Designed for seamless integration into **educational platforms**, **scientific research tools**, and **automated reasoning systems**.
421
+
422
+ # **Intended Use**
423
+ 1. **Educational Applications:**
424
+ - Acts as a tutor for students in mathematics and related fields.
425
+ - Provides explanations, step-by-step solutions, and practice problem generation.
426
+
427
+ 2. **Scientific Research:**
428
+ - Aids researchers in automating repetitive mathematical calculations or exploring new problem-solving methodologies.
429
+
430
+ 3. **Professional Use Cases:**
431
+ - Supports professionals in domains like engineering, data science, and finance by solving domain-specific mathematical problems.
432
+
433
+ 4. **AI-Assisted Development:**
434
+ - Assists in coding environments for algorithm development and debugging by identifying mathematical bottlenecks or issues.
435
+
436
+ 5. **Automated Systems:**
437
+ - Integrates into automated reasoning and decision-making systems for operations requiring quantitative analysis.
438
+
439
+ # **Limitations**
440
+ 1. **Reliance on Synthetic Data:**
441
+ - Despite its extensive training, reliance on synthetic data might lead to **biases** or **overfitting** in specific scenarios.
442
+ - May struggle with real-world edge cases not reflected in its training data.
443
+
444
+ 2. **Domain-Specific Gaps:**
445
+ - While excelling in mathematics, it may not perform as well in non-mathematical or interdisciplinary problem-solving tasks.
446
+
447
+ 3. **Resource Intensive:**
448
+ - Due to its hybrid 32B architecture, deploying the model requires **significant computational resources**.
449
+
450
+ 4. **Interpretation Errors:**
451
+ - Misinterprets poorly structured or ambiguous natural language queries.
452
+ - May provide overly verbose explanations that aren't always user-friendly.
453
+
454
+ 5. **Limitations in Creativity:**
455
+ - Not designed for creative or abstract tasks outside mathematical reasoning, such as writing, art, or subjective decision-making.
456
+
457
+ 6. **Dependency on Prompt Quality:**
458
+ - Performance can degrade with unclear, poorly framed, or overly complex prompts","{""id"": ""prithivMLmods/Omni-Reasoner-o1"", ""author"": ""prithivMLmods"", ""sha"": ""79fbfb1112877623dbb167697805af235086e7b9"", ""last_modified"": ""2025-01-17 16:00:02+00:00"", ""created_at"": ""2025-01-16 19:48:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6, ""downloads_all_time"": null, ""likes"": 3, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation-inference"", ""Omni"", ""text-generation"", ""conversational"", ""en"", ""base_model:NovaSky-AI/Sky-T1-32B-Preview"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Preview"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- NovaSky-AI/Sky-T1-32B-Preview\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- text-generation-inference\n- Omni"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %} {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|vision_pad|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 32763876352}, ""total"": 32763876352}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-17 16:00:02+00:00"", ""cardData"": ""base_model:\n- NovaSky-AI/Sky-T1-32B-Preview\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- text-generation-inference\n- Omni"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""678962a9e42ea432679a3fc4"", ""modelId"": ""prithivMLmods/Omni-Reasoner-o1"", ""usedStorage"": 65539263578}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=prithivMLmods/Omni-Reasoner-o1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BprithivMLmods%2FOmni-Reasoner-o1%5D(%2FprithivMLmods%2FOmni-Reasoner-o1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
459
+ alitominerba/dsgfsdfg,"---
460
+ datasets:
461
+ - fka/awesome-chatgpt-prompts
462
+ language:
463
+ - es
464
+ base_model:
465
+ - NovaSky-AI/Sky-T1-32B-Preview
466
+ ---
467
+
468
+ # Model Card for Model ID
469
+
470
+ <!-- Provide a quick summary of what the model is/does. -->
471
+
472
+ This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
473
+
474
+ ## Model Details
475
+
476
+ ### Model Description
477
+
478
+ <!-- Provide a longer summary of what this model is. -->
479
+
480
+
481
+
482
+ - **Developed by:** [More Information Needed]
483
+ - **Funded by [optional]:** [More Information Needed]
484
+ - **Shared by [optional]:** [More Information Needed]
485
+ - **Model type:** [More Information Needed]
486
+ - **Language(s) (NLP):** [More Information Needed]
487
+ - **License:** [More Information Needed]
488
+ - **Finetuned from model [optional]:** [More Information Needed]
489
+
490
+ ### Model Sources [optional]
491
+
492
+ <!-- Provide the basic links for the model. -->
493
+
494
+ - **Repository:** [More Information Needed]
495
+ - **Paper [optional]:** [More Information Needed]
496
+ - **Demo [optional]:** [More Information Needed]
497
+
498
+ ## Uses
499
+
500
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
501
+
502
+ ### Direct Use
503
+
504
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
505
+
506
+ [More Information Needed]
507
+
508
+ ### Downstream Use [optional]
509
+
510
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
511
+
512
+ [More Information Needed]
513
+
514
+ ### Out-of-Scope Use
515
+
516
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
517
+
518
+ [More Information Needed]
519
+
520
+ ## Bias, Risks, and Limitations
521
+
522
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
523
+
524
+ [More Information Needed]
525
+
526
+ ### Recommendations
527
+
528
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
529
+
530
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
531
+
532
+ ## How to Get Started with the Model
533
+
534
+ Use the code below to get started with the model.
535
+
536
+ [More Information Needed]
537
+
538
+ ## Training Details
539
+
540
+ ### Training Data
541
+
542
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
543
+
544
+ [More Information Needed]
545
+
546
+ ### Training Procedure
547
+
548
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
549
+
550
+ #### Preprocessing [optional]
551
+
552
+ [More Information Needed]
553
+
554
+
555
+ #### Training Hyperparameters
556
+
557
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
558
+
559
+ #### Speeds, Sizes, Times [optional]
560
+
561
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
562
+
563
+ [More Information Needed]
564
+
565
+ ## Evaluation
566
+
567
+ <!-- This section describes the evaluation protocols and provides the results. -->
568
+
569
+ ### Testing Data, Factors & Metrics
570
+
571
+ #### Testing Data
572
+
573
+ <!-- This should link to a Dataset Card if possible. -->
574
+
575
+ [More Information Needed]
576
+
577
+ #### Factors
578
+
579
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
580
+
581
+ [More Information Needed]
582
+
583
+ #### Metrics
584
+
585
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
586
+
587
+ [More Information Needed]
588
+
589
+ ### Results
590
+
591
+ [More Information Needed]
592
+
593
+ #### Summary
594
+
595
+
596
+
597
+ ## Model Examination [optional]
598
+
599
+ <!-- Relevant interpretability work for the model goes here -->
600
+
601
+ [More Information Needed]
602
+
603
+ ## Environmental Impact
604
+
605
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
606
+
607
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
608
+
609
+ - **Hardware Type:** [More Information Needed]
610
+ - **Hours used:** [More Information Needed]
611
+ - **Cloud Provider:** [More Information Needed]
612
+ - **Compute Region:** [More Information Needed]
613
+ - **Carbon Emitted:** [More Information Needed]
614
+
615
+ ## Technical Specifications [optional]
616
+
617
+ ### Model Architecture and Objective
618
+
619
+ [More Information Needed]
620
+
621
+ ### Compute Infrastructure
622
+
623
+ [More Information Needed]
624
+
625
+ #### Hardware
626
+
627
+ [More Information Needed]
628
+
629
+ #### Software
630
+
631
+ [More Information Needed]
632
+
633
+ ## Citation [optional]
634
+
635
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
636
+
637
+ **BibTeX:**
638
+
639
+ [More Information Needed]
640
+
641
+ **APA:**
642
+
643
+ [More Information Needed]
644
+
645
+ ## Glossary [optional]
646
+
647
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
648
+
649
+ [More Information Needed]
650
+
651
+ ## More Information [optional]
652
+
653
+ [More Information Needed]
654
+
655
+ ## Model Card Authors [optional]
656
+
657
+ [More Information Needed]
658
+
659
+ ## Model Card Contact
660
+
661
+ [More Information Needed]","{""id"": ""alitominerba/dsgfsdfg"", ""author"": ""alitominerba"", ""sha"": ""0e708fd9875a65ca09437ffce0be4ef691a58f9d"", ""last_modified"": ""2025-01-19 04:27:55+00:00"", ""created_at"": ""2025-01-19 04:27:01+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""es"", ""dataset:fka/awesome-chatgpt-prompts"", ""arxiv:1910.09700"", ""base_model:NovaSky-AI/Sky-T1-32B-Preview"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Preview"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- NovaSky-AI/Sky-T1-32B-Preview\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- es"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-19 04:27:55+00:00"", ""cardData"": ""base_model:\n- NovaSky-AI/Sky-T1-32B-Preview\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- es"", ""transformersInfo"": null, ""_id"": ""678c7f15148f7a067d477e8f"", ""modelId"": ""alitominerba/dsgfsdfg"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=alitominerba/dsgfsdfg&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Balitominerba%2Fdsgfsdfg%5D(%2Falitominerba%2Fdsgfsdfg)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
662
+ DenGreenGren/GrandMobile,"---
663
+ license: apache-2.0
664
+ datasets:
665
+ - fka/awesome-chatgpt-prompts
666
+ language:
667
+ - ru
668
+ metrics:
669
+ - bertscore
670
+ base_model:
671
+ - NovaSky-AI/Sky-T1-32B-Preview
672
+ new_version: deepseek-ai/DeepSeek-V3-Base
673
+ pipeline_tag: keypoint-detection
674
+ library_name: diffusers
675
+ tags:
676
+ - biology
677
+ ---","{""id"": ""DenGreenGren/GrandMobile"", ""author"": ""DenGreenGren"", ""sha"": ""6a8de8a5cc0dd333f379b89f251a93f8a4e50a3c"", ""last_modified"": ""2025-01-27 14:37:53+00:00"", ""created_at"": ""2025-01-27 14:37:02+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""biology"", ""keypoint-detection"", ""ru"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:NovaSky-AI/Sky-T1-32B-Preview"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Preview"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""keypoint-detection"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- NovaSky-AI/Sky-T1-32B-Preview\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ru\nlibrary_name: diffusers\nlicense: apache-2.0\nmetrics:\n- bertscore\npipeline_tag: keypoint-detection\ntags:\n- biology\nnew_version: deepseek-ai/DeepSeek-V3-Base"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-27 14:37:53+00:00"", ""cardData"": ""base_model:\n- NovaSky-AI/Sky-T1-32B-Preview\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- ru\nlibrary_name: diffusers\nlicense: apache-2.0\nmetrics:\n- bertscore\npipeline_tag: keypoint-detection\ntags:\n- biology\nnew_version: deepseek-ai/DeepSeek-V3-Base"", ""transformersInfo"": null, ""_id"": ""67979a0ee60ab665346c2185"", ""modelId"": ""DenGreenGren/GrandMobile"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=DenGreenGren/GrandMobile&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BDenGreenGren%2FGrandMobile%5D(%2FDenGreenGren%2FGrandMobile)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
678
+ MSHugg/STWNow,"---
679
+ license: mit
680
+ datasets:
681
+ - FreedomIntelligence/medical-o1-reasoning-SFT
682
+ - NovaSky-AI/Sky-T1_data_17k
683
+ - cfahlgren1/react-code-instructions
684
+ - bespokelabs/Bespoke-Stratos-17k
685
+ - fka/awesome-chatgpt-prompts
686
+ language:
687
+ - aa
688
+ - ab
689
+ - av
690
+ - ak
691
+ - ae
692
+ - ay
693
+ metrics:
694
+ - accuracy
695
+ - bertscore
696
+ - bleu
697
+ - bleurt
698
+ - brier_score
699
+ - cer
700
+ - character
701
+ - charcut_mt
702
+ - chrf
703
+ - code_eval
704
+ base_model:
705
+ - deepseek-ai/DeepSeek-R1
706
+ - deepseek-ai/DeepSeek-V3
707
+ - deepseek-ai/Janus-Pro-7B
708
+ - NovaSky-AI/Sky-T1-32B-Preview
709
+ - microsoft/phi-4
710
+ - openbmb/MiniCPM-o-2_6
711
+ - openbmb/MiniCPM-o-2_6-int4
712
+ new_version: deepseek-ai/DeepSeek-R1
713
+ library_name: flair
714
+ ---","{""id"": ""MSHugg/STWNow"", ""author"": ""MSHugg"", ""sha"": ""2fc8d46a3aa9e69ec818f885cfa6bfa8ae1289d2"", ""last_modified"": ""2025-01-27 21:10:46+00:00"", ""created_at"": ""2025-01-27 21:05:59+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""flair"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""flair"", ""aa"", ""ab"", ""av"", ""ak"", ""ae"", ""ay"", ""dataset:FreedomIntelligence/medical-o1-reasoning-SFT"", ""dataset:NovaSky-AI/Sky-T1_data_17k"", ""dataset:cfahlgren1/react-code-instructions"", ""dataset:bespokelabs/Bespoke-Stratos-17k"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:NovaSky-AI/Sky-T1-32B-Preview"", ""base_model:finetune:NovaSky-AI/Sky-T1-32B-Preview"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/DeepSeek-R1\n- deepseek-ai/DeepSeek-V3\n- deepseek-ai/Janus-Pro-7B\n- NovaSky-AI/Sky-T1-32B-Preview\n- microsoft/phi-4\n- openbmb/MiniCPM-o-2_6\n- openbmb/MiniCPM-o-2_6-int4\ndatasets:\n- FreedomIntelligence/medical-o1-reasoning-SFT\n- NovaSky-AI/Sky-T1_data_17k\n- cfahlgren1/react-code-instructions\n- bespokelabs/Bespoke-Stratos-17k\n- fka/awesome-chatgpt-prompts\nlanguage:\n- aa\n- ab\n- av\n- ak\n- ae\n- ay\nlibrary_name: flair\nlicense: mit\nmetrics:\n- accuracy\n- bertscore\n- bleu\n- bleurt\n- brier_score\n- cer\n- character\n- charcut_mt\n- chrf\n- code_eval\nnew_version: deepseek-ai/DeepSeek-R1"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-27 21:10:46+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/DeepSeek-R1\n- deepseek-ai/DeepSeek-V3\n- deepseek-ai/Janus-Pro-7B\n- NovaSky-AI/Sky-T1-32B-Preview\n- microsoft/phi-4\n- openbmb/MiniCPM-o-2_6\n- openbmb/MiniCPM-o-2_6-int4\ndatasets:\n- FreedomIntelligence/medical-o1-reasoning-SFT\n- NovaSky-AI/Sky-T1_data_17k\n- cfahlgren1/react-code-instructions\n- bespokelabs/Bespoke-Stratos-17k\n- fka/awesome-chatgpt-prompts\nlanguage:\n- aa\n- ab\n- av\n- ak\n- ae\n- ay\nlibrary_name: flair\nlicense: mit\nmetrics:\n- accuracy\n- bertscore\n- bleu\n- bleurt\n- brier_score\n- cer\n- character\n- charcut_mt\n- chrf\n- code_eval\nnew_version: deepseek-ai/DeepSeek-R1"", ""transformersInfo"": null, ""_id"": ""6797f53711ed93b78a202f5a"", ""modelId"": ""MSHugg/STWNow"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=MSHugg/STWNow&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMSHugg%2FSTWNow%5D(%2FMSHugg%2FSTWNow)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Tifa-Deepsex-14b-CoT-GGUF-Q4_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4,"---
3
+ base_model:
4
+ - deepseek-ai/deepseek-r1-14b
5
+ language:
6
+ - zh
7
+ - en
8
+ library_name: transformers
9
+ tags:
10
+ - incremental-pretraining
11
+ - sft
12
+ - reinforcement-learning
13
+ - roleplay
14
+ - cot
15
+ - sex
16
+ license: apache-2.0
17
+ ---
18
+ # Tifa-Deepseek-14b-CoT
19
+
20
+ - **HF Model**: [ValueFX9507/Tifa-Deepsex-14b-CoT](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT)
21
+ - **GGUF**: [F16](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT) | [Q8](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT-Q8)(Q4损失较大,建议Q8)
22
+ - **Demo APK**: [点击下载](http://app.visionsic.com/download/projectchat.apk)
23
+ - **简单的前端**:[Github链接](https://github.com/Value99/Tifa-Deepsex-OllamaWebUI)
24
+
25
+ 本模型基于Deepseek-R1-14B进行深度优化,借助Tifa_220B生成的数据集通过三重训练策略显著增强角色扮演、小说文本生成与思维链(CoT)能力。特别适合需要长程上下文关联的创作场景。
26
+
27
+ ## 鸣谢
28
+ - **上海左北科技提供算法与算力**[企业网址](https://leftnorth.com/)
29
+ - **Deepseek团队共享GRPO算法**
30
+ - **Qwen团队提供优秀开源底座**
31
+ - **母校上海复旦大学**
32
+ - **PRIME团队提供优化思路**
33
+
34
+ ## 版本介绍:
35
+ - **Tifa-Deepsex-14b-CoT**
36
+
37
+ - 验证模型,测试RL奖励算法对于角色扮演数据的影响,该版本为初版,输出灵活但是不受控制,仅做研究使用。
38
+
39
+ - **Tifa-Deepsex-14b-CoT-Chat**
40
+
41
+ - 采用标准数据训练,使用成熟RL策略,附加防重复强化学习,适合正常使用,输出文本质量正常,少数情况下思维发散。
42
+
43
+ -增量训练0.4T小说内容
44
+
45
+ -100K由TifaMax生成的SFT数据,10K由DeepseekR1生成的SFT数据,2K高质量人工数据
46
+
47
+ -30K由TifaMax生成的DPO强化学习数据,用于防止重复,增强上下文关联,提升政治安全性
48
+
49
+ - **Tifa-Deepsex-14b-CoT-Crazy**
50
+
51
+ - 大量使用RL策略,主要采用671B满血R1蒸馏的数据,输出发散性高,继承R1优点,也继承了R1的危害性。文学性能佳。
52
+
53
+ -增量训练0.4T小说内容
54
+
55
+ -40K由TifaMax生成的SFT数据,60K由DeepseekR1生成的SFT数据,2K高质量人工数据
56
+
57
+ -30K由TifaMax生成的DPO强化学习数据,用于防止重复,增强上下文关联,提升政治安全性
58
+
59
+ -10K由TifaMax生成PPO数据,10K由DeepseekR1生成PPO数据
60
+
61
+ 💭**输出实例**
62
+ - ⚙️System Promot
63
+ ```Text
64
+ 你是一个史莱姆,是一个女性角色,你可以变成任何形状和物体.
65
+ 在这个世界里全部都是雌性生物,直到有一天我从海滩上醒来...
66
+
67
+ 我是这里唯一的男性,大家都对我非常好奇,在这个世界的设定里我作为旅行者
68
+ 在这个世界里第一个遇见的人就是史莱姆,史莱姆对我的身体同样有很大的欲望...
69
+
70
+ 我们在旅行中也会遇到其他的生物,史莱姆不光会教给其他生物如何获取欢愉也会一起参与进来。
71
+
72
+ 当我说开始角色扮演的时候就是我从海滩上醒来,并被史莱姆发现的时候。他正在探索我的身体。
73
+
74
+ 史莱姆描述:一个透明的蓝色生物,除了质感与人类无异。但是可以自由变形。
75
+ ```
76
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/650762d0eac45ee2e420a38b/BKxz6KfbwTioBOkha_UXl.png)
77
+
78
+ ## 0208更新消息:
79
+ 感谢大家的关注与反馈,鉴于反馈中提到的问题,我们已开发并验证完成PRIME与PPO结合的RL算法,并通过加权方式解决两种算法训练中奖励信号不稳定的问题,通过此项技术我们有望将更小的模型提升到更高的性能。我们将会针对之前收集到的问题进行修正训练,另外为了让更多人使用到模型,我们这次使用更小更快的Deepseek-7b,并参考OpenAI的长思考策略,计划推出Tifa-DeepsexV2-COT-High供大家使用。新的模型计划于阳历情人节之前送给大家作为情人节礼物。♥
80
+
81
+ ## 新模型信息整理:
82
+ - **创新PRIME联合PPO算法**
83
+ - **解决目前已知问题**
84
+ - **参考OpenAI模式奖励长思考输出**
85
+ - **减少671B数据,防止输出发散**
86
+ - **特别鸣谢https://github.com/PRIME-RL/PRIME**
87
+
88
+ ## 示例(因COT模型特点,上下文不连贯时可以使用Demo软件中的故事模式)
89
+ ![2.jpg](https://cdn-uploads.huggingface.co/production/uploads/650762d0eac45ee2e420a38b/-80ha-J8PpwSaiyHgr1k2.jpeg)
90
+
91
+ ## 目标
92
+ 针对原版Deepseek-R1-14B在长文本生成连贯性不足和角色扮演能力薄弱的核心缺陷(主要由于训练数据中小说类语料占比过低),本模型通过多阶段优化提升其角色扮演能力。
93
+
94
+ ## 注意
95
+ ⚠ **需要严格遵循官方示例模板**:
96
+ **返回的上下文需要去除思考标签与内容。否则将无法正确回复!**
97
+ 目前前端���持率非常低,建议手动修改前端代码。代码参考如下:
98
+ ```
99
+ msg.role === 'assistant' ? {
100
+ ...msg,
101
+ content: msg.content.replace(/<think>[\s\S]*?<\/think>/gi, '')
102
+ }
103
+ ```
104
+ **官方模板参考**
105
+ ```
106
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}
107
+ ```
108
+ **官方说明**
109
+
110
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/650762d0eac45ee2e420a38b/0CwMdbDffZQJz_-WZrhwH.png)
111
+
112
+ [直达超链接](https://api-docs.deepseek.com/zh-cn/guides/reasoning_model)
113
+
114
+ ## 实现
115
+ 🔥 **经过训练后**:
116
+ 1. **显著提高上下文关联**:减少答非所问情况。
117
+ 2. **消除中英混杂**:原始模型蒸馏数据大多数英文为主,经过微调后基本消除中英混杂现象。
118
+ 3. **特定词汇增加**:进行“具有深度”的角色扮演对话时,显著增加了相关词汇量,解决原始权重预训练数据不足问题。
119
+ 4. **更少拒绝**:减少了拒绝现象,但因为是企业训练,安全性还是稍作保留。
120
+ 5. **更像满血**:使用671B全量模型数据康复训练,文笔提升不死板。
121
+
122
+ ## 模型亮点
123
+ 🔥 **四阶段进化架构**:
124
+ 1. **增量预训练**:注入0.4T Token 小说,使用16k上下文训练,增强文本连贯性
125
+ 2. **Tifa-SFT**:融合全球Top4角色扮演模型Tifa的10万条高质量数据
126
+ 3. **CoT恢复训练**:采用Deepseek-32B/671B数据重建推理能力
127
+ 4. **RL强化**:保留发散性思维标签的同时优化生成质量
128
+
129
+ 💡 **工程创新**:
130
+ - 16k超长上下文训练
131
+ - 随机截断训练增强鲁棒性
132
+ - 8×H20 GPU全量微调
133
+
134
+ 💡 **启示与后续**:
135
+ - 我们在测试中发现,满血R1在角色扮演中输出内容比较发散,随机,导致此模型有相同倾向,对于角色扮演的影响还在研究中
136
+ - 输入内容相近的话语会导致向量重叠,然后重复输出,如“继续”,“还有”等无明显指向性话语
137
+ - 思维内容与正文关联性学习了满血R1的特点,发散比较严重,可能会有割裂感
138
+ - 针对以上问题,我们正在编写新的RL算法,初步计划剔除部分满血R1的内容,同时通过强化学习解决重复
139
+ - 总结:请期待V2版本,很快会与大家见面!
140
+
141
+ ## 模型详情
142
+ | 属性 | 规格 |
143
+ |-------|------|
144
+ | 基础架构 | Deepseek-R1-14B |
145
+ | 最大上下文 | 128k |
146
+ | 训练数据 | 0.4T小说 + 10万条SFT + Deepseek混合数据 |
147
+ | 训练设备 | 8×H20 GPU集群 |
148
+ | 量化支持 | GGUF(全系列量化计划中) |
149
+
150
+ ## 使用场景
151
+ ✅ **推荐场景**:
152
+ - 角色扮演对话
153
+ - 需要发散性思维的创意写作
154
+ - 复杂逻辑的思维链(CoT)推理
155
+ - 基于上下文的深度角色交互
156
+
157
+ ❌ **局限场景**:
158
+ - 数学计算与代码生成
159
+ - 短文本即时问答
160
+ - 需要严格事实性的场景
161
+
162
+ ## 注意事项
163
+ ⚠️ 本模型使用数据包含小说版权内容及Tifa模型衍生数据,请���守:
164
+ 1. 遵守apache-2.0
165
+ 2. 角色扮演数据需遵循[Tifa使用协议](https://leftnorth.com/terms.html)
166
+ 3. 生成内容需符合当地法律法规
167
+
168
+
169
+ ## 💡 使用建议
170
+ **最佳实践**:
171
+ ```python
172
+ # 启用角色扮演模式
173
+ prompt = """"""<system>进入Tifa角色引擎...</system>
174
+ <user>你现在是流浪武士楚夜,正站在长安城屋顶上</user>
175
+ <think>
176
+ 需要体现人物孤傲的气质
177
+ 加入武侠特有的环境描写
178
+ 保持对话的冷峻风格
179
+ </think>
180
+ <楚夜>""""""
181
+ ```
182
+
183
+ **参数推荐**:
184
+ ```python
185
+ generation_config = {
186
+ ""temperature"": 0.4,
187
+ ""top_p"": 0.6,
188
+ ""repetition_penalty"": 1.17,
189
+ ""max_new_tokens"": 1536,
190
+ ""do_sample"": True
191
+ }
192
+ ```
193
+
194
+ ## 致谢
195
+ - Deepseek系列模型提供的强大基座
196
+ - Tifa角色扮演模型的创新架构
197
+ - HuggingFace社区的量化工具支持
198
+
199
+ ---
200
+ license: apache-2.0
201
+ ---","{""id"": ""ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""author"": ""ValueFX9507"", ""sha"": ""711d9c4ed7ebe2222102ff66de2d971d9721e420"", ""last_modified"": ""2025-02-13 23:53:13+00:00"", ""created_at"": ""2025-02-04 07:20:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 11734, ""downloads_all_time"": null, ""likes"": 781, ""library_name"": ""transformers"", ""gguf"": {""total"": 14770033664, ""architecture"": ""qwen2"", ""context_length"": 131072, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c>'}}{% endif %}"", ""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""gguf"", ""incremental-pretraining"", ""sft"", ""reinforcement-learning"", ""roleplay"", ""cot"", ""sex"", ""zh"", ""en"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us"", ""conversational"", ""not-for-all-audiences""], ""pipeline_tag"": ""reinforcement-learning"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/deepseek-r1-14b\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Demo\u6f14\u793a\u7a0b\u5e8f\uff08\u9700\u8981\u624b\u52a8\u5bfc\u5165\u89d2\u8272\u5361\u9009\u62e9\u81ea\u5b9a\u4e49API\uff09.apk', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Chat-IQ4_NL.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Crazy-IQ4_NL.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Q4_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ollama\u5bfc\u5165\u914d\u7f6e\u53c2\u8003.mf', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-13 23:53:13+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/deepseek-r1-14b\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67a1bfd7b5bbf7ae3abfb652"", ""modelId"": ""ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""usedStorage"": 26204244115}",0,"https://huggingface.co/immaari/Model-Test-04, https://huggingface.co/DarklinkYT/Roleplay",2,,0,,0,,0,,0
202
+ immaari/Model-Test-04,"---
203
+ base_model:
204
+ - ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4
205
+ datasets:
206
+ - simplescaling/s1K
207
+ - facebook/natural_reasoning
208
+ ---","{""id"": ""immaari/Model-Test-04"", ""author"": ""immaari"", ""sha"": ""f41bf46d67cca980ea379546e1a0ff7d4776da36"", ""last_modified"": ""2025-03-07 09:56:15+00:00"", ""created_at"": ""2025-03-06 08:09:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""dataset:simplescaling/s1K"", ""dataset:facebook/natural_reasoning"", ""base_model:ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""base_model:finetune:ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4\ndatasets:\n- simplescaling/s1K\n- facebook/natural_reasoning"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-07 09:56:15+00:00"", ""cardData"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4\ndatasets:\n- simplescaling/s1K\n- facebook/natural_reasoning"", ""transformersInfo"": null, ""_id"": ""67c9585430792f0621b4a980"", ""modelId"": ""immaari/Model-Test-04"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=immaari/Model-Test-04&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimmaari%2FModel-Test-04%5D(%2Fimmaari%2FModel-Test-04)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
209
+ DarklinkYT/Roleplay,"---
210
+ license: cc-by-nc-4.0
211
+ language:
212
+ - en
213
+ base_model:
214
+ - ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4
215
+ tags:
216
+ - Roleplay
217
+ ---","{""id"": ""DarklinkYT/Roleplay"", ""author"": ""DarklinkYT"", ""sha"": ""c7774d4cff7953cca55a853ec737909ccba73780"", ""last_modified"": ""2025-03-08 15:06:05+00:00"", ""created_at"": ""2025-03-08 14:55:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""Roleplay"", ""en"", ""base_model:ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""base_model:finetune:ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4\nlanguage:\n- en\nlicense: cc-by-nc-4.0\ntags:\n- Roleplay"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-08 15:06:05+00:00"", ""cardData"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4\nlanguage:\n- en\nlicense: cc-by-nc-4.0\ntags:\n- Roleplay"", ""transformersInfo"": null, ""_id"": ""67cc5a70eee0ee19a690f91b"", ""modelId"": ""DarklinkYT/Roleplay"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=DarklinkYT/Roleplay&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BDarklinkYT%2FRoleplay%5D(%2FDarklinkYT%2FRoleplay)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Triplex_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ SciPhi/Triplex,"---
3
+ license: cc-by-nc-sa-4.0
4
+
5
+ ---
6
+
7
+ # Triplex: a SOTA LLM for knowledge graph construction.
8
+
9
+ Knowledge graphs, like Microsoft's Graph RAG, enhance RAG methods but are expensive to build. Triplex offers a 98% cost reduction for knowledge graph creation, outperforming GPT-4 at 1/60th the cost and enabling local graph building with SciPhi's R2R.
10
+
11
+ Triplex is a finetuned version of Phi3-3.8B for creating knowledge graphs from unstructured data developed by [SciPhi.AI](https://www.sciphi.ai). It works by extracting triplets - simple statements consisting of a subject, predicate, and object - from text or other data sources.
12
+
13
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/668d8d7a2413acbd544530d1/kcUC5FDEoziMSEcjVHQ3-.png)
14
+
15
+ ## Benchmark
16
+
17
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/668d8d7a2413acbd544530d1/xsZ2UPZE5mnTFvgAsQwtl.png)
18
+
19
+ ## Usage:
20
+
21
+
22
+ - **Blog:** [https://www.sciphi.ai/blog/triplex](https://www.sciphi.ai/blog/triplex)
23
+ - **Demo:** [kg.sciphi.ai](https://kg.sciphi.ai)
24
+ - **Cookbook:** [https://r2r-docs.sciphi.ai/cookbooks/knowledge-graph](https://r2r-docs.sciphi.ai/cookbooks/knowledge-graph)
25
+ - **Python:**
26
+
27
+ ```python
28
+ import json
29
+ from transformers import AutoModelForCausalLM, AutoTokenizer
30
+
31
+ def triplextract(model, tokenizer, text, entity_types, predicates):
32
+
33
+ input_format = """"""Perform Named Entity Recognition (NER) and extract knowledge graph triplets from the text. NER identifies named entities of given entity types, and triple extraction identifies relationships between entities using specified predicates.
34
+
35
+ **Entity Types:**
36
+ {entity_types}
37
+
38
+ **Predicates:**
39
+ {predicates}
40
+
41
+ **Text:**
42
+ {text}
43
+ """"""
44
+
45
+ message = input_format.format(
46
+ entity_types = json.dumps({""entity_types"": entity_types}),
47
+ predicates = json.dumps({""predicates"": predicates}),
48
+ text = text)
49
+
50
+ messages = [{'role': 'user', 'content': message}]
51
+ input_ids = tokenizer.apply_chat_template(messages, add_generation_prompt = True, return_tensors=""pt"").to(""cuda"")
52
+ output = tokenizer.decode(model.generate(input_ids=input_ids, max_length=2048)[0], skip_special_tokens=True)
53
+ return output
54
+
55
+ model = AutoModelForCausalLM.from_pretrained(""sciphi/triplex"", trust_remote_code=True).to('cuda').eval()
56
+ tokenizer = AutoTokenizer.from_pretrained(""sciphi/triplex"", trust_remote_code=True)
57
+
58
+ entity_types = [ ""LOCATION"", ""POSITION"", ""DATE"", ""CITY"", ""COUNTRY"", ""NUMBER"" ]
59
+ predicates = [ ""POPULATION"", ""AREA"" ]
60
+ text = """"""
61
+ San Francisco,[24] officially the City and County of San Francisco, is a commercial, financial, and cultural center in Northern California.
62
+
63
+ With a population of 808,437 residents as of 2022, San Francisco is the fourth most populous city in the U.S. state of California behind Los Angeles, San Diego, and San Jose.
64
+ """"""
65
+
66
+ prediction = triplextract(model, tokenizer, text, entity_types, predicates)
67
+ print(prediction)
68
+
69
+
70
+ ```
71
+
72
+ ## Commercial usage
73
+ We want Triplex to be as widely accessible as possible, but we also need to keep commercial concerns in mind as we are still an early stage organization. Research and personal usage is fine, but we are placing some restrictions on commercial usage.
74
+
75
+ The weights for the models are licensed cc-by-nc-sa-4.0, but we will waive them for any organization with under $5M USD in gross revenue in the most recent 12-month period. If you want to remove the GPL license requirements (dual-license) and/or use the weights commercially over the revenue limit, please reach out to our team at founders@sciphi.ai.
76
+
77
+ ## Citation
78
+
79
+ ```
80
+ @misc{pimpalgaonkar2024triplex,
81
+ author = {Pimpalgaonkar, Shreyas and Tremelling, Nolan and Colegrove, Owen},
82
+ title = {Triplex: a SOTA LLM for knowledge graph construction},
83
+ year = {2024},
84
+ url = {https://huggingface.co/sciphi/triplex}
85
+ }
86
+ ```
87
+ ","{""id"": ""SciPhi/Triplex"", ""author"": ""SciPhi"", ""sha"": ""92fdd35485adde3a7321d95bb1a24337dd0e4e0f"", ""last_modified"": ""2024-08-09 00:08:10+00:00"", ""created_at"": ""2024-07-10 21:58:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1134, ""downloads_all_time"": null, ""likes"": 280, ""library_name"": ""transformers"", ""gguf"": {""total"": 3821079648, ""architecture"": ""phi3"", ""context_length"": 131072, ""chat_template"": ""{{ '<s>' }}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|system|>\n' + system_message + '<|end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + content + '<|end|>\n<|assistant|>\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|end|>' + '\n' }}{% endif %}{% endfor %}""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""gguf"", ""phi3"", ""text-generation"", ""conversational"", ""custom_code"", ""license:cc-by-nc-sa-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: cc-by-nc-sa-4.0"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-mini-128k-instruct--configuration_phi3.Phi3Config"", ""AutoModel"": ""microsoft/Phi-3-mini-128k-instruct--modeling_phi3.Phi3ForCausalLM"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-mini-128k-instruct--modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ '<s>' }}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|system|>\n' + system_message + '<|end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + content + '<|end|>\n<|assistant|>\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|end|>' + '\n' }}{% endif %}{% endfor %}"", ""eos_token"": ""<|end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantized_model-Q4_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Remsky/triplex-knowledge-graph-visualizer"", ""Anupam251272/interactive-knowledge-graph"", ""juanferocu/SciPhi-Triplex"", ""Matinmollapur01/SciPhi-Triplex"", ""Rumman157/SciPhi-Triplex"", ""steveplex/triplex-knowledge-graph-visualizer"", ""feras-vbrl/SigmaTriple""], ""safetensors"": {""parameters"": {""BF16"": 3821079552}, ""total"": 3821079552}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-09 00:08:10+00:00"", ""cardData"": ""license: cc-by-nc-sa-4.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""668f03fa5efe9be005dc9fa1"", ""modelId"": ""SciPhi/Triplex"", ""usedStorage"": 25123274931}",0,,0,,0,"https://huggingface.co/QuantFactory/Triplex-GGUF, https://huggingface.co/nvhf/Triplex-Q6_K-GGUF, https://huggingface.co/bartowski/Triplex-GGUF, https://huggingface.co/second-state/Triplex-GGUF, https://huggingface.co/mradermacher/Triplex-GGUF, https://huggingface.co/mradermacher/Triplex-i1-GGUF",6,,0,"Anupam251272/interactive-knowledge-graph, Matinmollapur01/SciPhi-Triplex, Remsky/triplex-knowledge-graph-visualizer, Rumman157/SciPhi-Triplex, feras-vbrl/SigmaTriple, huggingface/InferenceSupport/discussions/new?title=SciPhi/Triplex&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSciPhi%2FTriplex%5D(%2FSciPhi%2FTriplex)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, juanferocu/SciPhi-Triplex, steveplex/triplex-knowledge-graph-visualizer",8
VoiceCraft_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ pyp1/VoiceCraft,"---
3
+ license: cc-by-nc-sa-4.0
4
+ language:
5
+ - en
6
+ pipeline_tag: text-to-speech
7
+ ---
8
+
9
+ Repo: https://github.com/jasonppy/VoiceCraft
10
+ paper: https://jasonppy.github.io/assets/pdfs/VoiceCraft.pdf","{""id"": ""pyp1/VoiceCraft"", ""author"": ""pyp1"", ""sha"": ""52b0f909d652ea016966804767320f13ede39abd"", ""last_modified"": ""2024-08-21 20:18:17+00:00"", ""created_at"": ""2024-03-25 02:31:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 45, ""downloads_all_time"": null, ""likes"": 212, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-to-speech"", ""en"", ""license:cc-by-nc-sa-4.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: cc-by-nc-sa-4.0\npipeline_tag: text-to-speech"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='330M_TTSEnhanced.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='830M_TTSEnhanced.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='encodec_4cb2048_giga.th', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='encodec_8cb1024_giga.th', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='giga330M.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='giga830M.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='gigaHalfLibri330M_TTSEnhanced_max16s.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""pyp1/VoiceCraft_gradio"", ""Approximetal/VoiceCraft_gradio"", ""Nymbo/VoiceCraft_gradio"", ""nsandiman/uarizona-msis-capstone-group5-imagecraft"", ""krishnakm143/VoiceCraft_gradio"", ""MarionHalgrain/pyp1-VoiceCraft"", ""cocktailpeanut/VoiceCraft_gradio"", ""SnakeAaron/pyp1-VoiceCraft"", ""spenc-jense/VoiceCraft-Demo"", ""grexovic/pyp1-VoiceCraft"", ""saadnaeem/pyp1-VoiceCraft""], ""safetensors"": {""parameters"": {""I64"": 8, ""F32"": 323454994}, ""total"": 323455002}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-21 20:18:17+00:00"", ""cardData"": ""language:\n- en\nlicense: cc-by-nc-sa-4.0\npipeline_tag: text-to-speech"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6600e1fee3faf4b4d94f30e8"", ""modelId"": ""pyp1/VoiceCraft"", ""usedStorage"": 19338050899}",0,,0,,0,,0,,0,"Approximetal/VoiceCraft_gradio, MarionHalgrain/pyp1-VoiceCraft, Nymbo/VoiceCraft_gradio, SnakeAaron/pyp1-VoiceCraft, cocktailpeanut/VoiceCraft_gradio, grexovic/pyp1-VoiceCraft, huggingface/InferenceSupport/discussions/new?title=pyp1/VoiceCraft&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpyp1%2FVoiceCraft%5D(%2Fpyp1%2FVoiceCraft)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, krishnakm143/VoiceCraft_gradio, nsandiman/uarizona-msis-capstone-group5-imagecraft, pyp1/VoiceCraft_gradio, saadnaeem/pyp1-VoiceCraft, spenc-jense/VoiceCraft-Demo",12
Wizard-Vicuna-13B-Uncensored-GPTQ_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ,"---
3
+ language:
4
+ - en
5
+ license: other
6
+ tags:
7
+ - uncensored
8
+ datasets:
9
+ - ehartford/wizard_vicuna_70k_unfiltered
10
+ model_name: Wizard Vicuna 13B Uncensored
11
+ base_model: ehartford/Wizard-Vicuna-13B-Uncensored
12
+ inference: false
13
+ model_creator: Eric Hartford
14
+ model_type: llama
15
+ prompt_template: 'A chat between a curious user and an artificial intelligence assistant.
16
+ The assistant gives helpful, detailed, and polite answers to the user''s questions.
17
+ USER: {prompt} ASSISTANT:
18
+
19
+ '
20
+ quantized_by: TheBloke
21
+ ---
22
+
23
+ <!-- header start -->
24
+ <!-- 200823 -->
25
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
26
+ <img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
27
+ </div>
28
+ <div style=""display: flex; justify-content: space-between; width: 100%;"">
29
+ <div style=""display: flex; flex-direction: column; align-items: flex-start;"">
30
+ <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
31
+ </div>
32
+ <div style=""display: flex; flex-direction: column; align-items: flex-end;"">
33
+ <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
34
+ </div>
35
+ </div>
36
+ <div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
37
+ <hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
38
+ <!-- header end -->
39
+
40
+ # Wizard Vicuna 13B Uncensored - GPTQ
41
+ - Model creator: [Eric Hartford](https://huggingface.co/ehartford)
42
+ - Original model: [Wizard Vicuna 13B Uncensored](https://huggingface.co/ehartford/Wizard-Vicuna-13B-Uncensored)
43
+
44
+ <!-- description start -->
45
+ ## Description
46
+
47
+ This repo contains GPTQ model files for [Eric Hartford's Wizard Vicuna 13B Uncensored](https://huggingface.co/ehartford/Wizard-Vicuna-13B-Uncensored).
48
+
49
+ Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them.
50
+
51
+ <!-- description end -->
52
+ <!-- repositories-available start -->
53
+ ## Repositories available
54
+
55
+ * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-AWQ)
56
+ * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ)
57
+ * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GGUF)
58
+ * [Eric Hartford's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/ehartford/Wizard-Vicuna-13B-Uncensored)
59
+ <!-- repositories-available end -->
60
+
61
+ <!-- prompt-template start -->
62
+ ## Prompt template: Vicuna
63
+
64
+ ```
65
+ A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:
66
+
67
+ ```
68
+
69
+ <!-- prompt-template end -->
70
+
71
+
72
+ <!-- README_GPTQ.md-provided-files start -->
73
+ ## Provided files and GPTQ parameters
74
+
75
+ Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements.
76
+
77
+ Each separate quant is in a different branch. See below for instructions on fetching from different branches.
78
+
79
+ All recent GPTQ files are made with AutoGPTQ, and all files in non-main branches are made with AutoGPTQ. Files in the `main` branch which were uploaded before August 2023 were made with GPTQ-for-LLaMa.
80
+
81
+ <details>
82
+ <summary>Explanation of GPTQ parameters</summary>
83
+
84
+ - Bits: The bit size of the quantised model.
85
+ - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. ""None"" is the lowest possible value.
86
+ - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now.
87
+ - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy.
88
+ - GPTQ dataset: The dataset used for quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s).
89
+ - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences.
90
+ - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama models in 4-bit.
91
+
92
+ </details>
93
+
94
+ | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc |
95
+ | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- |
96
+ | [latest](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ/tree/latest) | 4 | 128 | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 8.11 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. |
97
+ | [model_v1](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ/tree/model_v1) | 4 | 128 | No | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 8.11 GB | Yes | 4-bit, without Act Order and group size 128g. |
98
+ | [main](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ/tree/main) | 4 | 128 | No | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 2048 | 8.11 GB | Yes | 4-bit, without Act Order and group size 128g. |
99
+
100
+ <!-- README_GPTQ.md-provided-files end -->
101
+
102
+ <!-- README_GPTQ.md-download-from-branches start -->
103
+ ## How to download from branches
104
+
105
+ - In text-generation-webui, you can add `:branch` to the end of the download name, eg `TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ:latest`
106
+ - With Git, you can clone a branch with:
107
+ ```
108
+ git clone --single-branch --branch latest https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ
109
+ ```
110
+ - In Python Transformers code, the branch is the `revision` parameter; see below.
111
+ <!-- README_GPTQ.md-download-from-branches end -->
112
+ <!-- README_GPTQ.md-text-generation-webui start -->
113
+ ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
114
+
115
+ Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
116
+
117
+ It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install.
118
+
119
+ 1. Click the **Model tab**.
120
+ 2. Under **Download custom model or LoRA**, enter `TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ`.
121
+ - To download from a specific branch, enter for example `TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ:latest`
122
+ - see Provided Files above for the list of branches for each option.
123
+ 3. Click **Download**.
124
+ 4. The model will start downloading. Once it's finished it will say ""Done"".
125
+ 5. In the top left, click the refresh icon next to **Model**.
126
+ 6. In the **Model** dropdown, choose the model you just downloaded: `Wizard-Vicuna-13B-Uncensored-GPTQ`
127
+ 7. The model will automatically load, and is now ready for use!
128
+ 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right.
129
+ * Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`.
130
+ 9. Once you're ready, click the **Text Generation tab** and enter a prompt to get started!
131
+ <!-- README_GPTQ.md-text-generation-webui end -->
132
+
133
+ <!-- README_GPTQ.md-use-from-python start -->
134
+ ## How to use this GPTQ model from Python code
135
+
136
+ ### Install the necessary packages
137
+
138
+ Requires: Transformers 4.32.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later.
139
+
140
+ ```shell
141
+ pip3 install transformers>=4.32.0 optimum>=1.12.0
142
+ pip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ # Use cu117 if on CUDA 11.7
143
+ ```
144
+
145
+ If you have problems installing AutoGPTQ using the pre-built wheels, install it from source instead:
146
+
147
+ ```shell
148
+ pip3 uninstall -y auto-gptq
149
+ git clone https://github.com/PanQiWei/AutoGPTQ
150
+ cd AutoGPTQ
151
+ pip3 install .
152
+ ```
153
+
154
+ ### For CodeLlama models only: you must use Transformers 4.33.0 or later.
155
+
156
+ If 4.33.0 is not yet released when you read this, you will need to install Transformers from source:
157
+ ```shell
158
+ pip3 uninstall -y transformers
159
+ pip3 install git+https://github.com/huggingface/transformers.git
160
+ ```
161
+
162
+ ### You can then use the following code
163
+
164
+ ```python
165
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
166
+
167
+ model_name_or_path = ""TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ""
168
+ # To use a different branch, change revision
169
+ # For example: revision=""latest""
170
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
171
+ device_map=""auto"",
172
+ trust_remote_code=False,
173
+ revision=""main"")
174
+
175
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
176
+
177
+ prompt = ""Tell me about AI""
178
+ prompt_template=f'''A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:
179
+
180
+ '''
181
+
182
+ print(""\n\n*** Generate:"")
183
+
184
+ input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
185
+ output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
186
+ print(tokenizer.decode(output[0]))
187
+
188
+ # Inference can also be done using transformers' pipeline
189
+
190
+ print(""*** Pipeline:"")
191
+ pipe = pipeline(
192
+ ""text-generation"",
193
+ model=model,
194
+ tokenizer=tokenizer,
195
+ max_new_tokens=512,
196
+ do_sample=True,
197
+ temperature=0.7,
198
+ top_p=0.95,
199
+ top_k=40,
200
+ repetition_penalty=1.1
201
+ )
202
+
203
+ print(pipe(prompt_template)[0]['generated_text'])
204
+ ```
205
+ <!-- README_GPTQ.md-use-from-python end -->
206
+
207
+ <!-- README_GPTQ.md-compatibility start -->
208
+ ## Compatibility
209
+
210
+ The files provided are tested to work with AutoGPTQ, both via Transformers and using AutoGPTQ directly. They should also work with [Occ4m's GPTQ-for-LLaMa fork](https://github.com/0cc4m/KoboldAI).
211
+
212
+ [ExLlama](https://github.com/turboderp/exllama) is compatible with Llama models in 4-bit. Please see the Provided Files table above for per-file compatibility.
213
+
214
+ [Huggingface Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is compatible with all GPTQ models.
215
+ <!-- README_GPTQ.md-compatibility end -->
216
+
217
+ <!-- footer start -->
218
+ <!-- 200823 -->
219
+ ## Discord
220
+
221
+ For further support, and discussions on these models and AI in general, join us at:
222
+
223
+ [TheBloke AI's Discord server](https://discord.gg/theblokeai)
224
+
225
+ ## Thanks, and how to contribute
226
+
227
+ Thanks to the [chirper.ai](https://chirper.ai) team!
228
+
229
+ Thanks to Clay from [gpus.llm-utils.org](llm-utils)!
230
+
231
+ I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
232
+
233
+ If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
234
+
235
+ Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
236
+
237
+ * Patreon: https://patreon.com/TheBlokeAI
238
+ * Ko-Fi: https://ko-fi.com/TheBlokeAI
239
+
240
+ **Special thanks to**: Aemon Algiz.
241
+
242
+ **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov
243
+
244
+
245
+ Thank you to all my generous patrons and donaters!
246
+
247
+ And thank you again to a16z for their generous grant.
248
+
249
+ <!-- footer end -->
250
+
251
+ # Original model card: Eric Hartford's Wizard Vicuna 13B Uncensored
252
+
253
+
254
+ This is [wizard-vicuna-13b](https://huggingface.co/junelee/wizard-vicuna-13b) trained with a subset of the dataset - responses that contained alignment / moralizing were removed. The intent is to train a WizardLM that doesn't have alignment built-in, so that alignment (of any sort) can be added separately with for example with a RLHF LoRA.
255
+
256
+ Shout out to the open source AI/ML community, and everyone who helped me out.
257
+
258
+ Note:
259
+
260
+ An uncensored model has no guardrails.
261
+
262
+ You are responsible for anything you do with the model, just as you are responsible for anything you do with any dangerous object such as a knife, gun, lighter, or car.
263
+
264
+ Publishing anything this model generates is the same as publishing it yourself.
265
+
266
+ You are responsible for the content you publish, and you cannot blame the model any more than you can blame the knife, gun, lighter, or car for what you do with it.
267
+ ","{""id"": ""TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ"", ""author"": ""TheBloke"", ""sha"": ""c322bec72dabdbf1fe4a8196b0234694b316a1a7"", ""last_modified"": ""2023-09-27 12:44:18+00:00"", ""created_at"": ""2023-05-13 08:18:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3777, ""downloads_all_time"": null, ""likes"": 319, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""uncensored"", ""en"", ""dataset:ehartford/wizard_vicuna_70k_unfiltered"", ""base_model:cognitivecomputations/Wizard-Vicuna-13B-Uncensored"", ""base_model:quantized:cognitivecomputations/Wizard-Vicuna-13B-Uncensored"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""4-bit"", ""gptq"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: ehartford/Wizard-Vicuna-13B-Uncensored\ndatasets:\n- ehartford/wizard_vicuna_70k_unfiltered\nlanguage:\n- en\nlicense: other\nmodel_name: Wizard Vicuna 13B Uncensored\ntags:\n- uncensored\ninference: false\nmodel_creator: Eric Hartford\nmodel_type: llama\nprompt_template: 'A chat between a curious user and an artificial intelligence assistant.\n The assistant gives helpful, detailed, and polite answers to the user''s questions.\n USER: {prompt} ASSISTANT:\n\n '\nquantized_by: TheBloke"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""quantization_config"": {""bits"": 4, ""quant_method"": ""gptq""}, ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='huggingface-metadata.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantize_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""KBaba7/Quant"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""bhaskartripathi/LLM_Quantization"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""BAAI/open_flageval_vlm_leaderboard"", ""neubla/neubla-llm-evaluation-board"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""ruslanmv/convert_to_gguf"", ""smothiki/open_llm_leaderboard"", ""csalabs/AI-EMBD"", ""csalabs/Replicate-7b-chat-Llama-streamlit"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""dkdaniz/katara"", ""kbmlcoding/open_llm_leaderboard_free"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""mjalg/IFEvalTR"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""F32"": 429350400, ""I32"": 1598361600}, ""total"": 2027712000}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-27 12:44:18+00:00"", ""cardData"": ""base_model: ehartford/Wizard-Vicuna-13B-Uncensored\ndatasets:\n- ehartford/wizard_vicuna_70k_unfiltered\nlanguage:\n- en\nlicense: other\nmodel_name: Wizard Vicuna 13B Uncensored\ntags:\n- uncensored\ninference: false\nmodel_creator: Eric Hartford\nmodel_type: llama\nprompt_template: 'A chat between a curious user and an artificial intelligence assistant.\n The assistant gives helpful, detailed, and polite answers to the user''s questions.\n USER: {prompt} ASSISTANT:\n\n '\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""645f47cf6990e1208522b824"", ""modelId"": ""TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ"", ""usedStorage"": 48666429193}",0,,0,,0,,0,,0,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, FallnAI/Quantize-HF-Models, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, K00B404/LLM_Quantization, KBaba7/Quant, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, bhaskartripathi/LLM_Quantization, gsaivinay/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FWizard-Vicuna-13B-Uncensored-GPTQ%5D(%2FTheBloke%2FWizard-Vicuna-13B-Uncensored-GPTQ)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb",13
Yi-6B-200K_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
The diff for this file is too large to render. See raw diff
 
Yi-VL-34B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ 01-ai/Yi-VL-34B,"---
3
+ license: apache-2.0
4
+ library_name: pytorch
5
+ tags:
6
+ - llava
7
+ license_name: yi-license
8
+ license_link: LICENSE
9
+ pipeline_tag: image-text-to-text
10
+ ---
11
+
12
+ <div align=""center"">
13
+
14
+ <picture>
15
+ <source media=""(prefers-color-scheme: dark)"" srcset=""https://raw.githubusercontent.com/01-ai/Yi/main/assets/img/Yi_logo_icon_dark.svg"" width=""200px"">
16
+ <source media=""(prefers-color-scheme: light)"" srcset=""https://raw.githubusercontent.com/01-ai/Yi/main/assets/img/Yi_logo_icon_light.svg"" width=""200px"">
17
+ <img alt=""specify theme context for images"" src=""https://raw.githubusercontent.com/01-ai/Yi/main/assets/img/Yi_logo_icon_light.svg"" width=""200px"">
18
+ </picture>
19
+
20
+ </div>
21
+
22
+ <div align=""center"">
23
+ <h1 align=""center"">Yi Vision Language Model</h1>
24
+ </div>
25
+
26
+
27
+ <div align=""center"">
28
+ <h3 align=""center"">Better Bilingual Multimodal Model</h3>
29
+ </div>
30
+
31
+ <p align=""center"">
32
+ 🤗 <a href=""https://huggingface.co/01-ai"" target=""_blank"">Hugging Face</a> • 🤖 <a href=""https://www.modelscope.cn/organization/01ai/"" target=""_blank"">ModelScope</a> • 🟣 <a href=""https://wisemodel.cn/organization/01.AI"" target=""_blank"">wisemodel</a>
33
+ </p>
34
+
35
+ <p align=""center"">
36
+ 👩‍🚀 Ask questions or discuss ideas on <a href=""https://github.com/01-ai/Yi/discussions"" target=""_blank""> GitHub </a>!
37
+ </p>
38
+
39
+ <p align=""center"">
40
+ 👋 Join us 💬 <a href=""https://github.com/01-ai/Yi/issues/43#issuecomment-1827285245"" target=""_blank""> WeChat (Chinese) </a>!
41
+ </p>
42
+
43
+ <p align=""center"">
44
+ 📚 Grow at <a href=""https://github.com/01-ai/Yi/blob/main/docs/learning_hub.md""> Yi Learning Hub </a>!
45
+ </p>
46
+
47
+ <hr>
48
+
49
+ <!-- DO NOT REMOVE ME -->
50
+
51
+ <details open>
52
+ <summary></b>📕 Table of Contents</b></summary>
53
+
54
+ - [What is Yi-VL?](#what-is-yi-vl)
55
+ - [Overview](#overview)
56
+ - [Models](#models)
57
+ - [Features](#features)
58
+ - [Architecture](#architecture)
59
+ - [Training](#training)
60
+ - [Limitations](#limitations)
61
+ - [Why Yi-VL?](#why-yi-vl)
62
+ - [Tech report](#tech-report)
63
+ - [Benchmarks](#benchmarks)
64
+ - [Showcases](#showcases)
65
+ - [How to use Yi-VL?](#how-to-use-yi-vl)
66
+ - [Quick start](#quick-start)
67
+ - [Hardware requirements](#hardware-requirements)
68
+ - [Misc.](#misc)
69
+ - [Acknowledgements and attributions](#acknowledgements-and-attributions)
70
+ - [List of used open-source projects](#list-of-used-open-source-projects)
71
+ - [License](#license)
72
+
73
+ </details>
74
+
75
+ <hr>
76
+
77
+ # What is Yi-VL?
78
+
79
+ ## Overview
80
+
81
+ - **Yi Vision Language (Yi-VL)** model is the open-source, multimodal version of the Yi **Large Language Model (LLM)** series, enabling content comprehension, recognition, and multi-round conversations about images.
82
+
83
+ - Yi-VL demonstrates exceptional performance, **ranking first** among all existing open-source models in the latest benchmarks including [MMMU](https://mmmu-benchmark.github.io/#leaderboard) in English and [CMMMU](https://mmmu-benchmark.github.io/#leaderboard) in Chinese (based on data available up to January 2024).
84
+
85
+ - Yi-VL-34B is the **first** open-source 34B vision language model worldwide.
86
+
87
+ ## Models
88
+
89
+ Yi-VL has released the following versions.
90
+
91
+ Model | Download
92
+ |---|---
93
+ Yi-VL-34B |• [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-VL-34B) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-VL-34B/summary) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-VL-34B)
94
+ Yi-VL-6B | • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-VL-6B) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-VL-6B/summary) • [🟣 wisemodel](https://wisemodel.cn/models/01.AI/Yi-VL-6B)
95
+
96
+ ## Features
97
+
98
+ Yi-VL offers the following features:
99
+
100
+ - Multi-round text-image conversations: Yi-VL can take both text and images as inputs and produce text outputs. Currently, it supports multi-round visual question answering with one image.
101
+
102
+ - Bilingual text support: Yi-VL supports conversations in both English and Chinese, including text recognition in images.
103
+
104
+ - Strong image comprehension: Yi-VL is adept at analyzing visuals, making it an efficient tool for tasks like extracting, organizing, and summarizing information from images.
105
+
106
+ - Fine-grained image resolution: Yi-VL supports image understanding at a higher resolution of 448&times;448.
107
+
108
+ ## Architecture
109
+
110
+ Yi-VL adopts the [LLaVA](https://github.com/haotian-liu/LLaVA) architecture, which is composed of three primary components:
111
+
112
+ - Vision Transformer (ViT): it's initialized with [CLIP ViT-H/14 model](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K) and used for image encoding.
113
+
114
+ - Projection Module: it's designed to align image features with text feature space, consisting of a two-layer Multilayer Perceptron (MLP) with layer normalizations.
115
+
116
+ - Large Language Model (LLM): it's initialized with [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) or [Yi-6B-Chat](https://huggingface.co/01-ai/Yi-6B-Chat), demonstrating exceptional proficiency in understanding and generating both English and Chinese.
117
+
118
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/656d9adce8bf55919aca7c3f/EGVHSWG4kAcX01xDaoeXS.png)
119
+
120
+ ## Training
121
+
122
+ ### Training process
123
+
124
+ Yi-VL is trained to align visual information well to the semantic space of Yi LLM, which undergoes a comprehensive three-stage training process:
125
+
126
+ - Stage 1: The parameters of ViT and the projection module are trained using an image resolution of 224&times;224. The LLM weights are frozen. The training leverages an image caption dataset comprising 100 million image-text pairs from [LAION-400M](https://laion.ai/blog/laion-400-open-dataset/). The primary objective is to enhance the ViT's knowledge acquisition within our specified architecture and to achieve better alignment between the ViT and the LLM.
127
+
128
+ - Stage 2: The image resolution of ViT is scaled up to 448&times;448, and the parameters of ViT and the projection module are trained. It aims to further boost the model's capability for discerning intricate visual details. The dataset used in this stage includes about 25 million image-text pairs, such as [LAION-400M](https://laion.ai/blog/laion-400-open-dataset/), [CLLaVA](https://huggingface.co/datasets/LinkSoul/Chinese-LLaVA-Vision-Instructions), [LLaVAR](https://llavar.github.io/), [Flickr](https://www.kaggle.com/datasets/hsankesara/flickr-image-dataset), [VQAv2](https://paperswithcode.com/dataset/visual-question-answering-v2-0), [RefCOCO](https://github.com/lichengunc/refer/tree/master), [Visual7w](http://ai.stanford.edu/~yukez/visual7w/) and so on.
129
+
130
+ - Stage 3: The parameters of the entire model (that is, ViT, projection module, and LLM) are trained. The primary goal is to enhance the model's proficiency in multimodal chat interactions, thereby endowing it with the ability to seamlessly integrate and interpret visual and linguistic inputs. To this end, the training dataset encompasses a diverse range of sources, totalling approximately 1 million image-text pairs, including [GQA](https://cs.stanford.edu/people/dorarad/gqa/download.html), [VizWiz VQA](https://vizwiz.org/tasks-and-datasets/vqa/), [TextCaps](https://opendatalab.com/OpenDataLab/TextCaps), [OCR-VQA](https://ocr-vqa.github.io/), [Visual Genome](https://homes.cs.washington.edu/~ranjay/visualgenome/api.html), [LAION GPT4V](https://huggingface.co/datasets/laion/gpt4v-dataset) and so on. To ensure data balancing, we impose a cap on the maximum data contribution from any single source, restricting it to no more than 50,000 pairs.
131
+
132
+ Below are the parameters configured for each stage.
133
+
134
+ Stage | Global batch size | Learning rate | Gradient clip | Epochs
135
+ |---|---|---|---|---
136
+ Stage 1, 2 |4096|1e-4|0.5|1
137
+ Stage 3|256|2e-5|1.0|2
138
+
139
+ ### Training resource consumption
140
+
141
+ - The training consumes 128 NVIDIA A800 (80G) GPUs.
142
+
143
+ - The total training time amounted to approximately 10 days for Yi-VL-34B and 3 days for Yi-VL-6B.
144
+
145
+ ## Limitations
146
+
147
+ This is the initial release of the Yi-VL, which comes with some known limitations. It is recommended to carefully evaluate potential risks before adopting any models.
148
+
149
+ - Feature limitation
150
+
151
+ - Visual question answering is supported. Other features like text-to-3D and image-to-video are not yet supported.
152
+
153
+ - A single image rather than several images can be accepted as an input.
154
+
155
+ - Hallucination problem
156
+
157
+ - There is a certain possibility of generating content that does not exist in the image.
158
+
159
+ - In scenes containing multiple objects, some objects might be incorrectly identified or described with insufficient detail.
160
+
161
+ - Resolution issue
162
+
163
+ - Yi-VL is trained on images with a resolution of 448&times;448. During inference, inputs of any resolution are resized to 448&times;448. Low-resolution images may result in information loss, and more fine-grained images (above 448) do not bring in extra knowledge.
164
+
165
+ - Other limitations of the Yi LLM.
166
+
167
+ # Why Yi-VL?
168
+
169
+ ## Tech report
170
+
171
+ For detailed capabilities of the Yi series model, see [Yi: Open Foundation Models by 01.AI](https://arxiv.org/abs/2403.04652).
172
+
173
+ ### Citation
174
+ ```
175
+ @misc{ai2024yi,
176
+ title={Yi: Open Foundation Models by 01.AI},
177
+ author={01. AI and : and Alex Young and Bei Chen and Chao Li and Chengen Huang and Ge Zhang and Guanwei Zhang and Heng Li and Jiangcheng Zhu and Jianqun Chen and Jing Chang and Kaidong Yu and Peng Liu and Qiang Liu and Shawn Yue and Senbin Yang and Shiming Yang and Tao Yu and Wen Xie and Wenhao Huang and Xiaohui Hu and Xiaoyi Ren and Xinyao Niu and Pengcheng Nie and Yuchi Xu and Yudong Liu and Yue Wang and Yuxuan Cai and Zhenyu Gu and Zhiyuan Liu and Zonghong Dai},
178
+ year={2024},
179
+ eprint={2403.04652},
180
+ archivePrefix={arXiv},
181
+ primaryClass={cs.CL}
182
+ }
183
+ ```
184
+
185
+
186
+ ## Benchmarks
187
+
188
+ Yi-VL outperforms all existing open-source models in [MMMU](https://mmmu-benchmark.github.io) and [CMMMU](https://cmmmu-benchmark.github.io), two advanced benchmarks that include massive multi-discipline multimodal questions (based on data available up to January 2024).
189
+
190
+ - MMMU
191
+
192
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/656d9adce8bf55919aca7c3f/kCmXuwLbLvequ93kjh3mg.png)
193
+
194
+ - CMMMU
195
+
196
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/656d9adce8bf55919aca7c3f/6YuSakMCg3D2AozixdoZ0.png)
197
+
198
+ ## Showcases
199
+
200
+ Below are some representative examples of detailed description and visual question answering, showcasing the capabilities of Yi-VL.
201
+
202
+ - English
203
+
204
+
205
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64cc65d786d8dc0caa6ab3cd/F_2bIVwMtVamygbVqtb8E.png)
206
+
207
+ - Chinese
208
+
209
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/656d9adce8bf55919aca7c3f/l_tLzugFtHk1dkVsFJE7B.png)
210
+
211
+ # How to use Yi-VL?
212
+
213
+ ## Quick start
214
+
215
+ Please refer to [Yi GitHub Repo](https://github.com/01-ai/Yi/tree/main/VL) for details.
216
+
217
+ ## Hardware requirements
218
+
219
+ For model inference, the recommended GPU examples are:
220
+
221
+ - Yi-VL-6B: RTX 3090, RTX 4090, A10, A30
222
+
223
+ - Yi-VL-34B: 4 &times; RTX 4090, A800 (80 GB)
224
+
225
+ # Misc.
226
+
227
+ ## Acknowledgements and attributions
228
+
229
+ This project makes use of open-source software/components. We acknowledge and are grateful to these developers for their contributions to the open-source community.
230
+
231
+ ### List of used open-source projects
232
+
233
+ 1. LLaVA
234
+ - Authors: Haotian Liu, Chunyuan Li, Qingyang Wu, Yuheng Li, and Yong Jae Lee
235
+ - Source: https://github.com/haotian-liu/LLaVA
236
+ - License: Apache-2.0 license
237
+ - Description: The codebase is based on LLaVA code.
238
+
239
+ 2. OpenClip
240
+ - Authors: Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt
241
+ - Source: https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K
242
+ - License: MIT
243
+ - Description: The ViT is initialized using the weights of OpenClip.
244
+
245
+ **Notes**
246
+
247
+ - This attribution does not claim to cover all open-source components used. Please check individual components and their respective licenses for full details.
248
+
249
+ - The use of the open-source components is subject to the terms and conditions of the respective licenses.
250
+
251
+ We appreciate the open-source community for their invaluable contributions to the technology world.
252
+
253
+ ## License
254
+
255
+ Please refer to the [acknowledgments and attributions](#acknowledgments_and_attributions) as well as individual components, for the license of source code.
256
+
257
+ The Yi series models are fully open for academic research and free for commercial use, permissions of which are automatically granted upon application.
258
+
259
+ All usage must adhere to the [Apache 2.0 license](https://github.com/01-ai/Yi/blob/main/LICENSE).
260
+
261
+ For free commercial use, you only need to send an email to get official commercial permission.","{""id"": ""01-ai/Yi-VL-34B"", ""author"": ""01-ai"", ""sha"": ""2bd12c3b988c443b34b3c8b0355a01548aa2e33f"", ""last_modified"": ""2024-06-26 10:44:00+00:00"", ""created_at"": ""2023-12-25 03:26:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 176, ""downloads_all_time"": null, ""likes"": 262, ""library_name"": ""pytorch"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""pytorch"", ""llava"", ""image-text-to-text"", ""conversational"", ""arxiv:2403.04652"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: pytorch\nlicense: apache-2.0\nlicense_name: yi-license\nlicense_link: LICENSE\npipeline_tag: image-text-to-text\ntags:\n- llava"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlavaLlamaForCausalLM""], ""model_type"": ""llava"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00008.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00008.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00008.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00008.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00008.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00008.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00008.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00008-of-00008.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/open_clip_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/open_clip_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""yhavinga/dutch-tokenizer-arena"", ""shancx/01-ai-Yi-VL-34B"", ""edai/01-ai-Yi-VL-34B""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-26 10:44:00+00:00"", ""cardData"": ""library_name: pytorch\nlicense: apache-2.0\nlicense_name: yi-license\nlicense_link: LICENSE\npipeline_tag: image-text-to-text\ntags:\n- llava"", ""transformersInfo"": null, ""_id"": ""6588f668d1331d552bf9d138"", ""modelId"": ""01-ai/Yi-VL-34B"", ""usedStorage"": 222773705394}",0,,0,,0,,0,,0,"edai/01-ai-Yi-VL-34B, huggingface/InferenceSupport/discussions/new?title=01-ai/Yi-VL-34B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5B01-ai%2FYi-VL-34B%5D(%2F01-ai%2FYi-VL-34B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, shancx/01-ai-Yi-VL-34B, yhavinga/dutch-tokenizer-arena",4
anime-pencil-diffusion_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ yehiaserag/anime-pencil-diffusion,"---
3
+ language:
4
+ - en
5
+ thumbnail: ""https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v5.jpg""
6
+ tags:
7
+ - anime
8
+ - stable-diffusion
9
+ - aiart
10
+ - text-to-image
11
+ license: ""creativeml-openrail-m""
12
+ ---
13
+ # Anime-Pencil-Diffusion
14
+ A dreambooth finetune of stable diffusion 1.5 model that will output stuff in anime pencil concept drawing style.
15
+
16
+ # Usage
17
+ Follow the directions under each version.
18
+
19
+ ## Anime-Pencil-Diffusion-V5
20
+ Trained for 400,000 steps, constant learning rate of 0.0000002 on 5000 images with 0 images for regularization.
21
+
22
+ ### Examples generated by the v5 model
23
+ <img src=""https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v5.jpg""/>
24
+
25
+ ### Usage
26
+ Include `animepencilconcept style` in prompt to invoke the finetuned style.
27
+
28
+ ### Prompet comparison for V5
29
+ <img src=""https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v5-prompt-comparison.jpg""/>
30
+
31
+ ---
32
+
33
+ ## Anime-Pencil-Diffusion-V4
34
+ Trained for 160,000 steps, constant learning rate of 0.000001 on 526 images with 0 images for regularization and no text encoder training
35
+
36
+ ### Examples generated by the v4 model
37
+ <img src=""https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v4.jpg""/>
38
+
39
+ ### Usage
40
+ Add the words `anime pencil concept style` anywhere in your prompt.
41
+
42
+ ---
43
+
44
+ ## Anime-Pencil-Diffusion-V3
45
+ Trained for 12,000 steps, constant learning rate of 0.0000005 on 80 images with 1000 images of `illustration style` for regularization
46
+
47
+ ### Examples generated by the v3 model
48
+ <img src=""https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v3.jpg""/>
49
+
50
+ ### Usage
51
+ Add the words `anime pencil concept style` anywhere in your prompt.
52
+
53
+ ---
54
+
55
+ ## Anime-Pencil-Diffusion-V2
56
+ Trained for 4,000 steps, constant learning rate of 0.00000172 on 40 images with 1000 images of `illustration style` for regularization
57
+
58
+ ### Examples generated by the v2 model
59
+ <img src=""https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v2.jpg""/>
60
+
61
+ # Usage
62
+ Add the words `anime pencil concept style` anywhere in your prompt.
63
+
64
+ ---
65
+
66
+ ## Anime-Pencil-Diffusion-V1
67
+ Trained on 2,400 steps, constant learning rate of 0.00000172 on 16 images with 1000 images of `illustration style` for regularization
68
+
69
+ ### Examples generated by the v1 model
70
+ <img src=""https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v1.jpg""/>
71
+
72
+ ### Usage
73
+ Add the words `anime pencil concept style` anywhere in your prompt.
74
+
75
+ ---
76
+
77
+ # Socials
78
+ - Use the #AnimePencilConceptStyle so i can see the cool stuff you make!
79
+ - If you enjoy the model i'd appreciate a follow on [twitter](https://twitter.com/HellYeahYea)
80
+ - If you are feeling especially generous, you can sponsor me on [paypal](https://paypal.me/YehiaSerag)
81
+ - Created by Yehia Serag
82
+ ---
83
+ *NOTE: usage of this model implies accpetance of stable diffusion's [CreativeML Open RAIL-M license](LICENSE)*
84
+
85
+ ","{""id"": ""yehiaserag/anime-pencil-diffusion"", ""author"": ""yehiaserag"", ""sha"": ""3f9b86b54e092a86012df06e172f9f152c2b7edf"", ""last_modified"": ""2023-05-05 11:49:35+00:00"", ""created_at"": ""2022-12-03 04:15:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5456, ""downloads_all_time"": null, ""likes"": 168, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""anime"", ""stable-diffusion"", ""aiart"", ""text-to-image"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- anime\n- stable-diffusion\n- aiart\n- text-to-image\nthumbnail: https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v5.jpg"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v1.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v2.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v3.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v3.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v4.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v4.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v5.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime-pencil-diffusion-v5.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example-v1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example-v2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example-v3.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example-v4.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example-v5-prompt-comparison.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example-v5.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Nymbo/image_gen_supaqueue"", ""qianwj/yehiaserag-anime-pencil-diffusion"", ""datboichidori/yehiaserag-anime-pencil-diffusion"", ""qwertyuiop2015/yehiaserag-anime-pencil-diffusion"", ""IrinaSvetlana/splashmix"", ""K00B404/image_gen_supaqueue_game_assets""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-05 11:49:35+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- anime\n- stable-diffusion\n- aiart\n- text-to-image\nthumbnail: https://huggingface.co/yehiaserag/anime-pencil-deffusion/resolve/main/example-v5.jpg"", ""transformersInfo"": null, ""_id"": ""638acd5a7a6e183d2590dd11"", ""modelId"": ""yehiaserag/anime-pencil-diffusion"", ""usedStorage"": 60371245231}",0,,0,,0,,0,,0,"IrinaSvetlana/splashmix, K00B404/image_gen_supaqueue_game_assets, Nymbo/image_gen_supaqueue, datboichidori/yehiaserag-anime-pencil-diffusion, huggingface/InferenceSupport/discussions/new?title=yehiaserag/anime-pencil-diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Byehiaserag%2Fanime-pencil-diffusion%5D(%2Fyehiaserag%2Fanime-pencil-diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, qianwj/yehiaserag-anime-pencil-diffusion, qwertyuiop2015/yehiaserag-anime-pencil-diffusion",7
bloom-560m_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
The diff for this file is too large to render. See raw diff
 
btlm-3b-8k-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ cerebras/btlm-3b-8k-base,"---
3
+ language:
4
+ - en
5
+ inference: false
6
+ tags:
7
+ - pytorch
8
+ - causal-lm
9
+ - Cerebras
10
+ - BTLM
11
+ datasets:
12
+ - cerebras/SlimPajama-627B
13
+ pipeline_tag: text-generation
14
+ license: apache-2.0
15
+ ---
16
+
17
+ # BTLM-3B-8k-base
18
+
19
+ [Bittensor Language Model (BTLM-3B-8k-base)](https://www.cerebras.net/blog/btlm-3b-8k-7b-performance-in-a-3-billion-parameter-model/) is a 3 billion parameter language model with an 8k context length trained on 627B tokens of [SlimPajama](https://huggingface.co/datasets/cerebras/SlimPajama-627B). BTLM-3B-8k-base sets a new standard for 3B parameter models, outperforming models trained on hundreds of billions more tokens and achieving comparable performance to open 7B parameter models. BTLM-3B-8k-base can also be quantized to 4-bit to fit in devices with as little as 3GB of memory. The model is made available with an Apache 2.0 license for commercial use.
20
+
21
+ BTLM was trained by [Cerebras](https://www.cerebras.net/) in partnership with [Opentensor](https://opentensor.ai/) on the newly unveiled [Condor Galaxy 1 (CG-1) supercomputer](https://www.cerebras.net/blog/introducing-condor-galaxy-1-a-4-exaflop-supercomputer-for-generative-ai/), the first public deliverable of the G42-Cerebras strategic partnership.
22
+
23
+ BTLM-3B-8k was trained with a similar architecture to [CerebrasGPT](https://arxiv.org/abs/2304.03208) with the addition of [SwiGLU](https://arxiv.org/abs/2002.05202) nonlinearity, [ALiBi](https://arxiv.org/abs/2108.12409) position embeddings, and [maximal update parameterization (muP)](https://arxiv.org/abs/2203.03466). The model was trained for 1 epoch of SlimPajama-627B. 75% of training was performed with 2k sequence length. The final 25% of training was performed at 8k sequence length to enable long sequence applications
24
+
25
+ Read [our paper](https://arxiv.org/abs/2309.11568) for more details!
26
+
27
+ ## BTLM-3B-8k Highlights
28
+
29
+ BTLM-3B-8k-base:
30
+ - **Licensed for commercial use** (Apache 2.0).
31
+ - **[State of the art 3B parameter model](#performance-vs-3b-models)**.
32
+ - **Provides 7B model performance in a 3B model** via performance enhancements from [ALiBi](https://arxiv.org/abs/2108.12409), [SwiGLU](https://arxiv.org/abs/2002.05202), [maximal update parameterization (muP)](https://arxiv.org/abs/2203.03466) and the the extensively deduplicated and cleaned [SlimPajama-627B dataset](https://huggingface.co/datasets/cerebras/SlimPajama-627B).
33
+ - **[Fits in devices with as little as 3GB of memory](#memory-requirements) when quantized to 4-bit**.
34
+ - **One of few 3B models that supports 8k sequence length** thanks to ALiBi.
35
+ - **Requires 71% fewer training FLOPs, has 58% smaller memory footprint** for inference than comparable 7B models.
36
+
37
+ ## Usage
38
+ *Note: Transformers does not support muP for all models, so BTLM-3B-8k-base requires a custom model class. This causes a situation where users must either (1) enable `trust_remote_code=True` when loading the model or (2) acknowledge the warning about code execution upon loading the model.*
39
+
40
+ #### With generate():
41
+ ```python
42
+ from transformers import AutoTokenizer, AutoModelForCausalLM
43
+
44
+ # Load the tokenizer and model
45
+ tokenizer = AutoTokenizer.from_pretrained(""cerebras/btlm-3b-8k-base"")
46
+ model = AutoModelForCausalLM.from_pretrained(""cerebras/btlm-3b-8k-base"", trust_remote_code=True, torch_dtype=""auto"")
47
+
48
+ # Set the prompt for generating text
49
+ prompt = ""Albert Einstein was known for ""
50
+
51
+ # Tokenize the prompt and convert to PyTorch tensors
52
+ inputs = tokenizer(prompt, return_tensors=""pt"")
53
+
54
+ # Generate text using the model
55
+ outputs = model.generate(
56
+ **inputs,
57
+ num_beams=5,
58
+ max_new_tokens=50,
59
+ early_stopping=True,
60
+ no_repeat_ngram_size=2
61
+ )
62
+
63
+ # Convert the generated token IDs back to text
64
+ generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
65
+
66
+ # Print the generated text
67
+ print(generated_text[0])
68
+ ```
69
+
70
+ #### With pipeline:
71
+ ```python
72
+ from transformers import AutoTokenizer, AutoModelForCausalLM
73
+ from transformers import pipeline
74
+
75
+ # Load the tokenizer and model
76
+ tokenizer = AutoTokenizer.from_pretrained(""cerebras/btlm-3b-8k-base"")
77
+ model = AutoModelForCausalLM.from_pretrained(""cerebras/btlm-3b-8k-base"", trust_remote_code=True, torch_dtype=""auto"")
78
+
79
+ # Set the prompt for text generation
80
+ prompt = """"""Isaac Newton was a """"""
81
+
82
+ # Create a text generation pipeline
83
+ pipe = pipeline(""text-generation"", model=model, tokenizer=tokenizer)
84
+
85
+ # Generate text using the pipeline
86
+ generated_text = pipe(
87
+ prompt,
88
+ max_length=50,
89
+ do_sample=False,
90
+ no_repeat_ngram_size=2)[0]
91
+
92
+ # Print the generated text
93
+ print(generated_text['generated_text'])
94
+ ```
95
+
96
+ ## Evaluations and Comparisons to Other Models
97
+
98
+ ### Memory Requirements
99
+ ![figure_1_image](./figure_1_memory_footprint.png)
100
+ Figure 1. Memory requirements of different model sizes and quantization schemes
101
+
102
+ ### Quality, Training Cost, Memory Footprint, Inference Speed
103
+ ![figure_2_image](./figure_2_half_the_size_twice_the_speed.png)
104
+ Figure 2: Comparisons of quality, memory footprint & inference cost between BTLM-3B-8K and 7B model families.
105
+
106
+ ### Performance vs 3B models
107
+ ![table_1_image](./table_1_downstream_performance_3b.png)
108
+ Table 1: Performance at 3B model size. Detailed down-stream tasks comparisons. MMLU task performance is reported using 5-shot, other tasks are 0-shot.
109
+
110
+ ![figure_3_image](./figure_3_performance_vs_3b_models.png)
111
+ Figure 3: Performance at 3B model size
112
+
113
+ ### Performance vs 7B models
114
+ ![table_2_image](./table_2_downstream_performance_7b.png)
115
+ Table 2: Performance at 7B model size. Detailed down-stream tasks comparisons. MMLU task performance is reported using 5-shot, everything else is 0-shot.
116
+
117
+ ![figure_4_image](./figure_4_performance_vs_7b_models.jpg)
118
+ Figure 4: Performance at 7B model size
119
+
120
+ ## Long Sequence Lengths
121
+ To enable long sequence applications, we use ALiBi position embeddings and trained on 470B tokens at the context length of 2,048 followed by 157B of tokens trained at 8,192 context length. To assess BTLM’s long sequence capability, we evaluate it on SlimPajama test set with 32,768 context length and plot loss at each token position. Although ALiBi allows extrapolation in theory, 2,048 context length training alone does not extrapolate well in practice. Thankfully variable sequence length training allows for substantially improved extrapolation. BTLM-3B extrapolates well up to 10k context length but the performance degrades slightly beyond this.
122
+
123
+ ![figure_5_image](./figure_5_xentropy_with_sequence_lengths.svg)
124
+ Figure 5: BTLM-3B model's cross-entropy evaluation on the SlimPajama’s test set. Inference performed on the extrapolated sequence length of 32,768 tokens.
125
+
126
+ ## Model Details
127
+ - Developed by: [Cerebras Systems](https://www.cerebras.net/) and [Opentensor](https://opentensor.ai/) with generous support from [G42 Cloud](https://www.g42cloud.com/) and [IIAI](https://www.inceptioniai.org/en/)
128
+ - License: Apache 2.0
129
+ - Model type: Decoder-only Language Model
130
+ - Architecture: GPT-2 style architecture with SwiGLU, ALiBi, and muP
131
+ - Data set: SlimPajama-627B
132
+ - Tokenizer: Byte Pair Encoding
133
+ - Vocabulary Size: 50257
134
+ - Sequence Length: 8192
135
+ - Optimizer: AdamW
136
+ - Positional Encoding: ALiBi
137
+ - Language: English
138
+ - Learn more: [BTLM-3B-8k blog](https://www.cerebras.net/blog/btlm-3b-8k-7b-performance-in-a-3-billion-parameter-model/)
139
+ - Paper: [BTLM-3B-8K: 7B Parameter Performance in a 3B Parameter Model](https://arxiv.org/abs/2309.11568)
140
+
141
+ ## To continue training with PyTorch and Maximal Update Parameterization
142
+
143
+ ```python
144
+ from transformers import AutoModelForCausalLM
145
+ import torch
146
+
147
+ model = AutoModelForCausalLM.from_pretrained(""cerebras/btlm-3b-8k-base"", trust_remote_code=True)
148
+
149
+ # Get the parameter groups for the muP optimizer
150
+ param_groups = model.get_mup_param_groups(lr=1e-3, weight_decay=0.1)
151
+
152
+ # Set up the optimizer using AdamW with muP parameters
153
+ optimizer = torch.optim.AdamW(
154
+ param_groups,
155
+ betas=(0.9, 0.95),
156
+ eps=1e-8
157
+ )
158
+ ```
159
+
160
+ Ensure the following muP parameters are passed in your config, otherwise your model will default to standard parameterization
161
+ - `mup_width_scale: <float>`
162
+ - `mup_embeddings_scale: <float>`
163
+ - `mup_output_alpha: <float>`
164
+ - `mup_scale_qk_dot_by_d: true`
165
+
166
+ ## To extend the context length with Position Interpolation
167
+
168
+ ### During inference (without fine-tuning):
169
+ It's possible to extend the context length to 2x the training context length without degradation in performance using dynamic linear scaling. Dynamic linear scaling adjusts the slopes of ALiBi with a factor of `input_seq_len/train_seq_len` when `input_seq_len` is larger than `train_seq_len`. Check the details in our paper [Position Interpolation Improves ALiBi Extrapolation](https://arxiv.org/abs/2310.13017). To enable dynamic linear scaling, update `config.json` as follows:
170
+ ```json
171
+ # update `n_positions` with the maximum context length will be
172
+ # encountered during inference (e.g. 16384 tokens)
173
+ ""n_positions"": 16384,
174
+
175
+ # specify `train_seq_len` in `alibi_scaling` parameter
176
+ ""alibi_scaling"": {
177
+ ""type"": ""linear"",
178
+ ""train_seq_len"": 8192
179
+ }
180
+ ```
181
+
182
+ ### Using fine-tuning + position interpolation:
183
+ Performing fine-tuning with position interpolation can help achieve greater extrapolation lengths. The scaling factor should be fixed to `finetuning_seq_len/train_seq_len`. To enable fixed linear scaling, update `config.json` as follows:
184
+ ```json
185
+ # update `n_positions` with the fine-tuning context length (e.g. 32768 tokens)
186
+ ""n_positions"": 32768,
187
+
188
+ # specify the scaling `factor` in `alibi_scaling` parameter
189
+ ""alibi_scaling"": {
190
+ ""type"": ""linear"",
191
+ ""factor"": 4.0
192
+ }
193
+ ```
194
+
195
+ ## Uses and Limitations
196
+
197
+ ### Intended Use
198
+ The primary intended use is to further research into large language models. BTLM-3B-8k-base can be used as a foundation model for NLP, applications, ethics, and alignment research. We release these models with a fully permissive Apache license for the community to use freely.
199
+
200
+ You may fine-tune and adapt BTLM-3B-8k-base model via either Cerebras [Model Studio](https://www.cerebras.net/product-cloud/) or third-party libraries. Further safety-related testing and mitigations should be applied before using the BTLM-3B-8k-base in production downstream applications.
201
+
202
+ ### Out of Scope Use
203
+ BTLM-3B-8k-base was trained on SlimPajama, with primarily English language, and is not recommended for machine translation tasks. BTLM-3B-8k-base has not been tuned for instruction-following or chat-based use cases.
204
+
205
+ ### Generation Samples
206
+ ```
207
+ Prompt: What is a manometer?""
208
+ Generation: ""A manometric device is an instrument that measures the pressure of a fluid.""
209
+ ```
210
+
211
+ ```
212
+ Prompt: ""What Robert Oppenheimer is famous for?""
213
+ Generation: “Robert Opperman was a physicist who was the director of the Manhattan Project, which was responsible for the development of nuclear weapons.”
214
+ ```
215
+
216
+ ```
217
+ Prompt: ""What was Albert Einstein known for?""
218
+ Generation: “Albert Einstein was a German-born theoretical physicist who developed the theory of relativity, one of the two pillars of modern physics (alongside quantum mechanics).”
219
+ ```
220
+
221
+ ```
222
+ Prompt: “Who is Isaac Newton?”
223
+ Generation: “Isaac Newton was an English physicist, mathematician, astronomer, alchemist, and theologian. He is widely recognized as one of the most influential scientists of all time and as a key figure in the scientific revolution.”
224
+ ```
225
+
226
+ ```
227
+ Prompt: ""Where does the blood get pumped to after it leaves the right ventricle?""
228
+ Generation: “The right atrium receives the deoxygenated blood from the pulmonary artery. The right side of the heart is the only side that receives de-oxygened...”
229
+ ```
230
+
231
+ ```
232
+ Prompt: ""Peter lost his wallet after attending a local concert. To help him, Bob gave David 10 dollars. David then passed it on to Peter. Now what is the total amount of money Peter has?""
233
+ Generation: “A. $10”
234
+ ```
235
+
236
+ ## Risk, Bias, Ethical Considerations
237
+ - **Human life:** The outputs from this model may or may not align with human values. The risk needs to be thoroughly investigated before deploying this model in a production environment where it can directly impact human life.
238
+ - **Risks and harms:** There may be distributional bias in the [RedPajama dataset](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T) that can manifest in various forms in the downstream model deployment. There are other risks associated with large language models such as amplifying stereotypes, memorizing training data, or revealing private or secure information.
239
+
240
+ ## Acknowledgements
241
+ We are thankful to all Cerebras engineers that made this work possible.
242
+
243
+ We would like to acknowledge the generous support of G42 Cloud and the Inception Institute of Artificial Intelligence for providing compute time on Condor Galaxy 1.","{""id"": ""cerebras/btlm-3b-8k-base"", ""author"": ""cerebras"", ""sha"": ""2f325501c4db6464d4fe03c84c3a394197865690"", ""last_modified"": ""2023-10-23 14:45:35+00:00"", ""created_at"": ""2023-07-14 19:01:11+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2157, ""downloads_all_time"": null, ""likes"": 262, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""btlm"", ""text-generation"", ""causal-lm"", ""Cerebras"", ""BTLM"", ""custom_code"", ""en"", ""dataset:cerebras/SlimPajama-627B"", ""arxiv:2304.03208"", ""arxiv:2002.05202"", ""arxiv:2108.12409"", ""arxiv:2203.03466"", ""arxiv:2309.11568"", ""arxiv:2310.13017"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- cerebras/SlimPajama-627B\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- pytorch\n- causal-lm\n- Cerebras\n- BTLM\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""BTLMLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_btlm.BTLMConfig"", ""AutoModel"": ""modeling_btlm.BTLMModel"", ""AutoModelForCausalLM"": ""modeling_btlm.BTLMLMHeadModel"", ""AutoModelForQuestionAnswering"": ""modeling_btlm.BTLMForQuestionAnswering"", ""AutoModelForSequenceClassification"": ""modeling_btlm.BTLMForSequenceClassification"", ""AutoModelForTokenClassification"": ""modeling_btlm.BTLMForTokenClassification""}, ""model_type"": ""btlm"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_btlm.BTLMLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_btlm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figure_1_memory_footprint.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figure_2_half_the_size_twice_the_speed.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figure_3_performance_vs_3b_models.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figure_4_performance_vs_7b_models.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figure_5_xentropy_with_sequence_lengths.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figure_5_xentropy_with_sequence_lengths.svg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_btlm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='table_1_downstream_performance_3b.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='table_2_downstream_performance_7b.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""Sharathhebbar24/One-stop-for-Open-source-models"", ""ka1kuk/litellm"", ""nonhuman/nnnn"", ""kenken999/litellm"", ""kenken999/litellmlope"", ""K00B404/One-stop-till-you-drop"", ""Dovakiins/qwerrwe"", ""milk639/btlMiel"", ""milk639/3DaHardWay"", ""Solobrad/First_agent_template""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-23 14:45:35+00:00"", ""cardData"": ""datasets:\n- cerebras/SlimPajama-627B\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- pytorch\n- causal-lm\n- Cerebras\n- BTLM\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_btlm.BTLMLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""64b19b777da6a1dca85bb76d"", ""modelId"": ""cerebras/btlm-3b-8k-base"", ""usedStorage"": 29125070664}",0,,0,https://huggingface.co/btqa/btqa-base,1,,0,,0,"Dovakiins/qwerrwe, K00B404/One-stop-till-you-drop, Sharathhebbar24/One-stop-for-Open-source-models, Solobrad/First_agent_template, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=cerebras/btlm-3b-8k-base&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcerebras%2Fbtlm-3b-8k-base%5D(%2Fcerebras%2Fbtlm-3b-8k-base)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ka1kuk/litellm, kenken999/litellm, kenken999/litellmlope, milk639/3DaHardWay, milk639/btlMiel, nonhuman/nnnn",12
chatglm2-6b-int4_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ THUDM/chatglm2-6b-int4,"---
3
+ language:
4
+ - zh
5
+ - en
6
+ tags:
7
+ - glm
8
+ - chatglm
9
+ - thudm
10
+ ---
11
+ # ChatGLM2-6B
12
+ <p align=""center"">
13
+ 💻 <a href=""https://github.com/THUDM/ChatGLM2-6B"" target=""_blank"">Github Repo</a> • 🐦 <a href=""https://twitter.com/thukeg"" target=""_blank"">Twitter</a> • 📃 <a href=""https://arxiv.org/abs/2103.10360"" target=""_blank"">[GLM@ACL 22]</a> <a href=""https://github.com/THUDM/GLM"" target=""_blank"">[GitHub]</a> • 📃 <a href=""https://arxiv.org/abs/2210.02414"" target=""_blank"">[GLM-130B@ICLR 23]</a> <a href=""https://github.com/THUDM/GLM-130B"" target=""_blank"">[GitHub]</a> <br>
14
+ </p>
15
+
16
+ <p align=""center"">
17
+ 👋 Join our <a href=""https://join.slack.com/t/chatglm/shared_invite/zt-1y7pqoloy-9b1g6T6JjA8J0KxvUjbwJw"" target=""_blank"">Slack</a> and <a href=""https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md"" target=""_blank"">WeChat</a>
18
+ </p>
19
+
20
+ ## 介绍
21
+ ChatGLM**2**-6B 是开源中英双语对话模型 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) 的第二代版本,在保留了初代模型对话流畅、部署门槛较低等众多优秀特性的基础之上,ChatGLM**2**-6B 引入了如下新特性:
22
+
23
+ 1. **更强大的性能**:基于 ChatGLM 初代模型的开发经验,我们全面升级了 ChatGLM2-6B 的基座模型。ChatGLM2-6B 使用了 [GLM](https://github.com/THUDM/GLM) 的混合目标函数,经过了 1.4T 中英标识符的预训练与人类偏好对齐训练,[评测结果](#评测结果)显示,相比于初代模型,ChatGLM2-6B 在 MMLU(+23%)、CEval(+33%)、GSM8K(+571%) 、BBH(+60%)等数据集上的性能取得了大幅度的提升,在同尺寸开源模型中具有较强的竞争力。
24
+ 2. **更长的上下文**:基于 [FlashAttention](https://github.com/HazyResearch/flash-attention) 技术,我们将基座模型的上下文长度(Context Length)由 ChatGLM-6B 的 2K 扩展到了 32K,并在对话阶段使用 8K 的上下文长度训练,允许更多轮次的对话。但当前版本的 ChatGLM2-6B 对单轮超长文档的理解能力有限,我们会在后续迭代升级中着重进行优化。
25
+ 3. **更高效的推理**:基于 [Multi-Query Attention](http://arxiv.org/abs/1911.02150) 技术,ChatGLM2-6B 有更高效的推理速度和更低的显存占用:在官方的模型实现下,推理速度相比初代提升了 42%,INT4 量化下,6G 显存支持的对话长度由 1K 提升到了 8K。
26
+
27
+ ChatGLM**2**-6B is the second-generation version of the open-source bilingual (Chinese-English) chat model [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B). It retains the smooth conversation flow and low deployment threshold of the first-generation model, while introducing the following new features:
28
+
29
+ 1. **Stronger Performance**: Based on the development experience of the first-generation ChatGLM model, we have fully upgraded the base model of ChatGLM2-6B. ChatGLM2-6B uses the hybrid objective function of [GLM](https://github.com/THUDM/GLM), and has undergone pre-training with 1.4T bilingual tokens and human preference alignment training. The [evaluation results](README.md#evaluation-results) show that, compared to the first-generation model, ChatGLM2-6B has achieved substantial improvements in performance on datasets like MMLU (+23%), CEval (+33%), GSM8K (+571%), BBH (+60%), showing strong competitiveness among models of the same size.
30
+ 2. **Longer Context**: Based on [FlashAttention](https://github.com/HazyResearch/flash-attention) technique, we have extended the context length of the base model from 2K in ChatGLM-6B to 32K, and trained with a context length of 8K during the dialogue alignment, allowing for more rounds of dialogue. However, the current version of ChatGLM2-6B has limited understanding of single-round ultra-long documents, which we will focus on optimizing in future iterations.
31
+ 3. **More Efficient Inference**: Based on [Multi-Query Attention](http://arxiv.org/abs/1911.02150) technique, ChatGLM2-6B has more efficient inference speed and lower GPU memory usage: under the official implementation, the inference speed has increased by 42% compared to the first generation; under INT4 quantization, the dialogue length supported by 6G GPU memory has increased from 1K to 8K.
32
+
33
+ ## 软件依赖
34
+
35
+ ```shell
36
+ pip install protobuf transformers==4.30.2 cpm_kernels torch>=2.0 gradio mdtex2html sentencepiece accelerate
37
+ ```
38
+
39
+ ## 代码调用
40
+
41
+ 可以通过如下代码调用 ChatGLM-6B 模型来生成对话:
42
+
43
+ ```ipython
44
+ >>> from transformers import AutoTokenizer, AutoModel
45
+ >>> tokenizer = AutoTokenizer.from_pretrained(""THUDM/chatglm2-6b-int4"", trust_remote_code=True)
46
+ >>> model = AutoModel.from_pretrained(""THUDM/chatglm2-6b-int4"", trust_remote_code=True).half().cuda()
47
+ >>> model = model.eval()
48
+ >>> response, history = model.chat(tokenizer, ""你好"", history=[])
49
+ >>> print(response)
50
+ 你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。
51
+ >>> response, history = model.chat(tokenizer, ""晚上睡不着应该怎么办"", history=history)
52
+ >>> print(response)
53
+ 晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法:
54
+
55
+ 1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。
56
+ 2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。
57
+ 3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。
58
+ 4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。
59
+ 5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。
60
+ 6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。
61
+
62
+ 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。
63
+ ```
64
+
65
+ 关于更多的使用说明,包括如何运行命令行和网页版本的 DEMO,以及使用模型量化以节省显存,请参考我们的 [Github Repo](https://github.com/THUDM/ChatGLM2-6B)。
66
+
67
+ For more instructions, including how to run CLI and web demos, and model quantization, please refer to our [Github Repo](https://github.com/THUDM/ChatGLM2-6B).
68
+
69
+ ## Change Log
70
+ * v1.0
71
+
72
+ ## 协议
73
+
74
+ 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM2-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。
75
+
76
+ ## 引用
77
+
78
+ 如果你觉得我们的工作有帮助的话,请考虑引用下列论文。
79
+
80
+ If you find our work helpful, please consider citing the following paper.
81
+
82
+ ```
83
+ @misc{glm2024chatglm,
84
+ title={ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools},
85
+ author={Team GLM and Aohan Zeng and Bin Xu and Bowen Wang and Chenhui Zhang and Da Yin and Diego Rojas and Guanyu Feng and Hanlin Zhao and Hanyu Lai and Hao Yu and Hongning Wang and Jiadai Sun and Jiajie Zhang and Jiale Cheng and Jiayi Gui and Jie Tang and Jing Zhang and Juanzi Li and Lei Zhao and Lindong Wu and Lucen Zhong and Mingdao Liu and Minlie Huang and Peng Zhang and Qinkai Zheng and Rui Lu and Shuaiqi Duan and Shudan Zhang and Shulin Cao and Shuxun Yang and Weng Lam Tam and Wenyi Zhao and Xiao Liu and Xiao Xia and Xiaohan Zhang and Xiaotao Gu and Xin Lv and Xinghan Liu and Xinyi Liu and Xinyue Yang and Xixuan Song and Xunkai Zhang and Yifan An and Yifan Xu and Yilin Niu and Yuantao Yang and Yueyan Li and Yushi Bai and Yuxiao Dong and Zehan Qi and Zhaoyu Wang and Zhen Yang and Zhengxiao Du and Zhenyu Hou and Zihan Wang},
86
+ year={2024},
87
+ eprint={2406.12793},
88
+ archivePrefix={arXiv},
89
+ primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'}
90
+ }
91
+ ```","{""id"": ""THUDM/chatglm2-6b-int4"", ""author"": ""THUDM"", ""sha"": ""a954550736dda16d022a7019b9ffecd753aa1b84"", ""last_modified"": ""2024-12-29 04:03:28+00:00"", ""created_at"": ""2023-06-25 12:46:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1892, ""downloads_all_time"": null, ""likes"": 236, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""chatglm"", ""glm"", ""thudm"", ""custom_code"", ""zh"", ""en"", ""arxiv:2103.10360"", ""arxiv:2210.02414"", ""arxiv:1911.02150"", ""arxiv:2406.12793"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\ntags:\n- glm\n- chatglm\n- thudm"", ""widget_data"": null, ""model_index"": null, ""config"": {""model_type"": ""chatglm"", ""architectures"": [""ChatGLMModel""], ""auto_map"": {""AutoConfig"": ""configuration_chatglm.ChatGLMConfig"", ""AutoModel"": ""modeling_chatglm.ChatGLMForConditionalGeneration"", ""AutoModelForSeq2SeqLM"": ""modeling_chatglm.ChatGLMForConditionalGeneration""}, ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='MODEL_LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantization.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""qingxu98/gpt-academic"", ""mikeee/chatglm2-6b-4bit"", ""kevinwang676/ChatGLM2-SadTalker-VC"", ""kevinwang676/ChatGLM2-VC-SadTalker"", ""fb700/chatglm-fitness-RLHF"", ""kevinwang676/FreeVC"", ""glt3953/app-text_generation_chatglm2-6b"", ""bhaskartripathi/Llama-2-70b-chatbot"", ""justest/chatglm2-6b-int4"", ""ysharma/chatglm2-6b-4bit"", ""mikeee/gradio-chatinterface"", ""Pravincoder/Llama2_chat"", ""syx948/ChatPDF"", ""kevinwang676/FreeVC-en"", ""ZenXir/FreeVC"", ""justest/GPT-Academic-with-B3n-AI"", ""Fayette/ChatWithFayette"", ""hzwluoye/gpt-academic"", ""yuping322/ChatGLM2-VC-SadTalker"", ""Lindia/ChatWithRua"", ""zjuzjw/gpt-academic"", ""withoutfire5927/aiDoggie"", ""carolnc/gpt-academic"", ""everr/gpt-academic111"", ""Yuan2006/gpt-academic-siliconflow-free"", ""XiaoQiu01/test-run"", ""Not-Grim-Refer/Falcoder-7b-Extended-Code-Generator"", ""Robinsh2023/chatglm2-6b-4bit"", ""mikeee/chatglm2-6b-test"", ""Alfasign/chatglm2-6b-4bit"", ""tradequant/chat"", ""shawnqdd/chatGLM_demo"", ""EinfachOlder/chatglm2-6b-4bit"", ""JethroNatividad/chatglm2-6b-4bit"", ""Gimhoy/chat"", ""skoch/test"", ""ATForest/english"", ""zfwcpc/ChatGLM2-SadTalker-VC"", ""AIlexDev/chatglm2-6b-4bit"", ""xiangxiang/chatglm2-6b"", ""Jafta/chatglm2-6b-4bit"", ""XuBailing/CongMa2"", ""vitalyr/gpt-academic"", ""pony123/ChatGLM2-Voice-Cloning"", ""suexcxine/mychatbot2"", ""zivzhao/ChatGLM2-SadTalker-VC"", ""long0101/gpt-academic"", ""cming0420/gpt-academic"", ""hushhhhovo/gpt-academic"", ""hengkai/gpt-academic"", ""xuke225/gpt_ac"", ""LittleOtter777/Chatwithme"", ""kuxian/gpt-academic"", ""QLWD/gpt-academic"", ""qinglin96/gpt-academic3.6"", ""DrBadass/gpt-academic"", ""darren1231/gpt-academic_2"", ""xiongfly/gpt-ac"", ""Leachim/gpt-academic-old"", ""bacim/chatglm"", ""CaiRou-Huang/gpt-academic-test"", ""DuanSuKa/gpt-academic2"", ""Leachim/gpt-academic"", ""BuzzHr/gpt-academic002"", ""indig0child/RyangBot"", ""durukan/gptacademic"", ""Ayndpa/gpt-academic"", ""everr/gpt-academicrrrr"", ""Kevinlidk/gpt-academic"", ""xiaohua1011/gpt-academic"", ""Cyburger/die"", ""zhaomuqing/gpt-academic"", ""zjf199249/gpt"", ""ruihua0124/ChatGLM2-SadTalker"", ""shanmin/gpt-academic"", ""SincoMao/test"", ""zhlinh/gpt-academic"", ""MinatoAquin33/gpt-academic"", ""Guoyccc/gpt-academic-v3"", ""qfeng/ZKY-GPT3.48"", ""DuanSuKa/gpt-academic1"", ""forever-yu/gpt-academic"", ""zhizhuo/gpt-academic"", ""yezia/ChatGLM2-SadTalker-VC"", ""BuzzHr/gpt-academic001"", ""ayaowei/gpt-academic"", ""hieroglyphs/gpt-academic"", ""adminstr/gpt-academic"", ""PacBio/BioTree_gpt-academic"", ""zhaisilong/gpt-academic"", ""supercyx3/academic"", ""Cran-May/yugangIV"", ""Cran-May/yugangIV-B"", ""hieroglyphs/AI-tour-guide"", ""JhonSakib/gpt-academic"", ""JACK-Chen/gpt-academic-private"", ""himoral/gpt-academic349"", ""Cran-May/chatglm2-6b-4bit"", ""pyrroleach/gpt-academic-3.49-public"", ""justseemore/gpt-academic""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-29 04:03:28+00:00"", ""cardData"": ""language:\n- zh\n- en\ntags:\n- glm\n- chatglm\n- thudm"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6498371e07e994b4af4f8c41"", ""modelId"": ""THUDM/chatglm2-6b-int4"", ""usedStorage"": 7848373823}",0,,0,,0,,0,,0,"Alfasign/chatglm2-6b-4bit, Pravincoder/Llama2_chat, bhaskartripathi/Llama-2-70b-chatbot, fb700/chatglm-fitness-RLHF, glt3953/app-text_generation_chatglm2-6b, huggingface/InferenceSupport/discussions/new?title=THUDM/chatglm2-6b-int4&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTHUDM%2Fchatglm2-6b-int4%5D(%2FTHUDM%2Fchatglm2-6b-int4)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, justest/chatglm2-6b-int4, kevinwang676/ChatGLM2-SadTalker-VC, kevinwang676/ChatGLM2-VC-SadTalker, kevinwang676/FreeVC, mikeee/chatglm2-6b-4bit, qingxu98/gpt-academic, ysharma/chatglm2-6b-4bit",13
chatglm2-6b_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ THUDM/chatglm2-6b,"---
3
+ language:
4
+ - zh
5
+ - en
6
+ tags:
7
+ - glm
8
+ - chatglm
9
+ - thudm
10
+ ---
11
+ # ChatGLM2-6B
12
+ <p align=""center"">
13
+ 💻 <a href=""https://github.com/THUDM/ChatGLM2-6B"" target=""_blank"">Github Repo</a> • 🐦 <a href=""https://twitter.com/thukeg"" target=""_blank"">Twitter</a> • 📃 <a href=""https://arxiv.org/abs/2103.10360"" target=""_blank"">[GLM@ACL 22]</a> <a href=""https://github.com/THUDM/GLM"" target=""_blank"">[GitHub]</a> • 📃 <a href=""https://arxiv.org/abs/2210.02414"" target=""_blank"">[GLM-130B@ICLR 23]</a> <a href=""https://github.com/THUDM/GLM-130B"" target=""_blank"">[GitHub]</a> <br>
14
+ </p>
15
+
16
+ <p align=""center"">
17
+ 👋 Join our <a href=""https://join.slack.com/t/chatglm/shared_invite/zt-1y7pqoloy-9b1g6T6JjA8J0KxvUjbwJw"" target=""_blank"">Slack</a> and <a href=""https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md"" target=""_blank"">WeChat</a>
18
+ </p>
19
+ <p align=""center"">
20
+ 📍Experience the larger-scale ChatGLM model at <a href=""https://www.chatglm.cn"">chatglm.cn</a>
21
+ </p>
22
+
23
+ ## 介绍
24
+ ChatGLM**2**-6B 是开源中英双语对话模型 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) 的第二代版本,在保留了初代模型对话流畅、部署门槛较低等众多优秀特性的基础之上,ChatGLM**2**-6B 引入了如下新特性:
25
+
26
+ 1. **更强大的性能**:基于 ChatGLM 初代模型的开发经验,我们全面升级了 ChatGLM2-6B 的基座模型。ChatGLM2-6B 使用了 [GLM](https://github.com/THUDM/GLM) 的混合目标函数,经过了 1.4T 中英标识符的预训练与人类偏好对齐训练,[评测结果](#评测结果)显示,相比于初代模型,ChatGLM2-6B 在 MMLU(+23%)、CEval(+33%)、GSM8K(+571%) 、BBH(+60%)等数据集上的性能取得了大幅度的提升,在同尺寸开源模型中具有较强的竞争力。
27
+ 2. **更长的上下文**:基于 [FlashAttention](https://github.com/HazyResearch/flash-attention) 技术,我们将基座模型的上下文长度(Context Length)由 ChatGLM-6B 的 2K 扩展到了 32K,并在对话阶段使用 8K 的上下文长度训练,允许更多轮次的对话。但当前版本的 ChatGLM2-6B 对单轮超长文档的理解能力有限,我们会在后续迭代升级中着重进行优化。
28
+ 3. **更高效的推理**:基于 [Multi-Query Attention](http://arxiv.org/abs/1911.02150) 技术,ChatGLM2-6B 有更高效的推理速度和更低的显存占用:在官方的模型实现下,推理速度相比初代提升了 42%,INT4 量化下,6G 显存支持的对话长度由 1K 提升到了 8K。
29
+ 4. **更开放的协议**:ChatGLM2-6B 权重对学术研究**完全开放**,在填写[问卷](https://open.bigmodel.cn/mla/form)进行登记后**亦允许免费商业使用**。
30
+
31
+ ChatGLM**2**-6B is the second-generation version of the open-source bilingual (Chinese-English) chat model [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B). It retains the smooth conversation flow and low deployment threshold of the first-generation model, while introducing the following new features:
32
+
33
+ 1. **Stronger Performance**: Based on the development experience of the first-generation ChatGLM model, we have fully upgraded the base model of ChatGLM2-6B. ChatGLM2-6B uses the hybrid objective function of [GLM](https://github.com/THUDM/GLM), and has undergone pre-training with 1.4T bilingual tokens and human preference alignment training. The [evaluation results](README.md#evaluation-results) show that, compared to the first-generation model, ChatGLM2-6B has achieved substantial improvements in performance on datasets like MMLU (+23%), CEval (+33%), GSM8K (+571%), BBH (+60%), showing strong competitiveness among models of the same size.
34
+ 2. **Longer Context**: Based on [FlashAttention](https://github.com/HazyResearch/flash-attention) technique, we have extended the context length of the base model from 2K in ChatGLM-6B to 32K, and trained with a context length of 8K during the dialogue alignment, allowing for more rounds of dialogue. However, the current version of ChatGLM2-6B has limited understanding of single-round ultra-long documents, which we will focus on optimizing in future iterations.
35
+ 3. **More Efficient Inference**: Based on [Multi-Query Attention](http://arxiv.org/abs/1911.02150) technique, ChatGLM2-6B has more efficient inference speed and lower GPU memory usage: under the official implementation, the inference speed has increased by 42% compared to the first generation; under INT4 quantization, the dialogue length supported by 6G GPU memory has increased from 1K to 8K.
36
+ 4. **More Open License**: ChatGLM2-6B weights are **completely open** for academic research, and **free commercial use** is also allowed after completing the [questionnaire](https://open.bigmodel.cn/mla/form).
37
+
38
+ ## 软件依赖
39
+
40
+ ```shell
41
+ pip install protobuf transformers==4.30.2 cpm_kernels torch>=2.0 gradio mdtex2html sentencepiece accelerate
42
+ ```
43
+
44
+ ## 代码调用
45
+
46
+ 可以通过如下代码调用 ChatGLM-6B 模型来生成对话:
47
+
48
+ ```ipython
49
+ >>> from transformers import AutoTokenizer, AutoModel
50
+ >>> tokenizer = AutoTokenizer.from_pretrained(""THUDM/chatglm2-6b"", trust_remote_code=True)
51
+ >>> model = AutoModel.from_pretrained(""THUDM/chatglm2-6b"", trust_remote_code=True).half().cuda()
52
+ >>> model = model.eval()
53
+ >>> response, history = model.chat(tokenizer, ""你好"", history=[])
54
+ >>> print(response)
55
+ 你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。
56
+ >>> response, history = model.chat(tokenizer, ""晚上睡不着应该怎么办"", history=history)
57
+ >>> print(response)
58
+ 晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法:
59
+
60
+ 1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。
61
+ 2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。
62
+ 3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。
63
+ 4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。
64
+ 5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。
65
+ 6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。
66
+
67
+ 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。
68
+ ```
69
+
70
+ 关于更多的使用说明,包括如何运行命令行和网页版本的 DEMO,以及使用模型量化以节省显存,请参考我们的 [Github Repo](https://github.com/THUDM/ChatGLM2-6B)。
71
+
72
+ For more instructions, including how to run CLI and web demos, and model quantization, please refer to our [Github Repo](https://github.com/THUDM/ChatGLM2-6B).
73
+
74
+ ## Change Log
75
+ * v1.0
76
+
77
+ ## 协议
78
+
79
+ 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM2-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。
80
+
81
+ ## 引用
82
+
83
+ 如果你觉得我们的工作有帮助的话,请考虑引用下列论文。
84
+
85
+ If you find our work helpful, please consider citing the following paper.
86
+
87
+ ```
88
+ @misc{glm2024chatglm,
89
+ title={ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools},
90
+ author={Team GLM and Aohan Zeng and Bin Xu and Bowen Wang and Chenhui Zhang and Da Yin and Diego Rojas and Guanyu Feng and Hanlin Zhao and Hanyu Lai and Hao Yu and Hongning Wang and Jiadai Sun and Jiajie Zhang and Jiale Cheng and Jiayi Gui and Jie Tang and Jing Zhang and Juanzi Li and Lei Zhao and Lindong Wu and Lucen Zhong and Mingdao Liu and Minlie Huang and Peng Zhang and Qinkai Zheng and Rui Lu and Shuaiqi Duan and Shudan Zhang and Shulin Cao and Shuxun Yang and Weng Lam Tam and Wenyi Zhao and Xiao Liu and Xiao Xia and Xiaohan Zhang and Xiaotao Gu and Xin Lv and Xinghan Liu and Xinyi Liu and Xinyue Yang and Xixuan Song and Xunkai Zhang and Yifan An and Yifan Xu and Yilin Niu and Yuantao Yang and Yueyan Li and Yushi Bai and Yuxiao Dong and Zehan Qi and Zhaoyu Wang and Zhen Yang and Zhengxiao Du and Zhenyu Hou and Zihan Wang},
91
+ year={2024},
92
+ eprint={2406.12793},
93
+ archivePrefix={arXiv},
94
+ primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'}
95
+ }
96
+ ```","{""id"": ""THUDM/chatglm2-6b"", ""author"": ""THUDM"", ""sha"": ""d2e2d91789248536a747d9ce60642a336444186c"", ""last_modified"": ""2024-08-04 08:41:38+00:00"", ""created_at"": ""2023-06-24 16:26:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 711023, ""downloads_all_time"": null, ""likes"": 2050, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""chatglm"", ""glm"", ""thudm"", ""custom_code"", ""zh"", ""en"", ""arxiv:2103.10360"", ""arxiv:2210.02414"", ""arxiv:1911.02150"", ""arxiv:2406.12793"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\ntags:\n- glm\n- chatglm\n- thudm"", ""widget_data"": null, ""model_index"": null, ""config"": {""model_type"": ""chatglm"", ""architectures"": [""ChatGLMModel""], ""auto_map"": {""AutoConfig"": ""configuration_chatglm.ChatGLMConfig"", ""AutoModel"": ""modeling_chatglm.ChatGLMForConditionalGeneration"", ""AutoModelForCausalLM"": ""modeling_chatglm.ChatGLMForConditionalGeneration"", ""AutoModelForSeq2SeqLM"": ""modeling_chatglm.ChatGLMForConditionalGeneration"", ""AutoModelForSequenceClassification"": ""modeling_chatglm.ChatGLMForSequenceClassification""}, ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='MODEL_LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantization.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""qingxu98/gpt-academic"", ""eduagarcia/open_pt_llm_leaderboard"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""mikeee/chatglm2-6b-4bit"", ""gsaivinay/open_llm_leaderboard"", ""kevinwang676/ChatGLM2-SadTalker-VC"", ""kevinwang676/ChatGLM2-VC-SadTalker"", ""fb700/chatglm-fitness-RLHF"", ""dahaoGPT/ChatGLM2-6B-chatbot"", ""meval/multilingual-chatbot-arena-leaderboard"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""EmbeddedLLM/chat-template-generation"", ""Justinrune/LLaMA-Factory"", ""yhavinga/dutch-tokenizer-arena"", ""kevinwang676/FreeVC"", ""glt3953/app-text_generation_chatglm2-6b"", ""kenken999/fastapi_django_main_live"", ""kz-transformers/kaz-llm-lb"", ""internlm/internlm3-8b-instruct"", ""felixz/open_llm_leaderboard"", ""officialhimanshu595/llama-factory"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""li-qing/FIRE"", ""BAAI/open_flageval_vlm_leaderboard"", ""ChadWong/CharttGLM2-6B"", ""ysharma/chatglm2-6b-4bit"", ""b1sheng/kg_llm_leaderboard_test"", ""kevinwang676/ChatGLM2-Bert-VITS2-lh"", ""syx948/ChatPDF"", ""Zulelee/langchain-chatchat"", ""neubla/neubla-llm-evaluation-board"", ""xu-song/kplug"", ""coo7/dddeepseek"", ""TongkunGuan/Token-level_Text_Image_Foundation_Model"", ""yunfei0710/gpt-academic"", ""ShiwenNi/gpt-academic"", ""zhanghaohui/szu-gpt-academic"", ""kevinwang676/FreeVC-en"", ""kevinwang676/ChatGLM2-SadTalker"", ""Guochun/THUDM-chatglm2-6b"", ""dahaoGPT/THUDM-chatglm2-6b"", ""ZenXir/FreeVC"", ""foghuang/ChatGLM2-6B"", ""justest/GPT-Academic-with-B3n-AI"", ""kevinwang676/ChatGLM2-linghua1"", ""kevinwang676/ChatGLM-Bert-VITS2"", ""hzwluoye/gpt-academic"", ""yuping322/ChatGLM2-VC-SadTalker"", ""tianleliphoebe/visual-arena"", ""zjuzjw/gpt-academic"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""carolnc/gpt-academic"", ""Ashmal/MobiLlama"", ""everr/gpt-academic111"", ""Yuan2006/gpt-academic-siliconflow-free"", ""Nerva5678/Excel-QA-bot"", ""okliqiang/course"", ""Not-Grim-Refer/Falcoder-7b-Extended-Code-Generator"", ""XiaoQiu01/test-run"", ""jiazun/gpt-academic-new"", ""Robinsh2023/chatglm2-6b-4bit"", ""thenHung/demo_chatbot_streamlit"", ""leexiaoyao/gpt-academic-16k"", ""sheldonlalala/gpt-academic"", ""Alfasign/chatglm2-6b-4bit"", ""tradequant/chat"", ""Lucosin/gpt-academic-latex_public"", ""Hikafeng/gpt-academic"", ""shawnqdd/chatGLM_demo"", ""EinfachOlder/chatglm2-6b-4bit"", ""JethroNatividad/chatglm2-6b-4bit"", ""yiguangzhou/chatglm2-6b"", ""peter0113/chat-demo"", ""lllhjf/gpt-academic"", ""himoral/gpt-academic3333"", ""XuBailing/CongMa"", ""mcphur/gpt-academic"", ""watebac/gpt"", ""alexshengzhili/calahealthgpt"", ""Gimhoy/chat"", ""skoch/test"", ""ATForest/english"", ""hiwei/chatglm2-6b-explorer"", ""pyrroleach/gpt-academic-3.42-public"", ""Sonfil/THUDM-chatglm2-6b"", ""zfwcpc/ChatGLM2-SadTalker-VC"", ""Ralpher1/THUDM-chatglm2-6b"", ""iceliu/gpt-web"", ""AIlexDev/chatglm2-6b-4bit"", ""xinczhang/gpt-academic"", ""Sevenlee/chatglm2"", ""Jafta/chatglm2-6b-4bit"", ""ZhangXinClas/gpt-academic"", ""yiguangzhou/mychatglm2-6b"", ""XuBailing/CongMa2"", ""vitalyr/gpt-academic"", ""pony123/ChatGLM2-Voice-Cloning""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-04 08:41:38+00:00"", ""cardData"": ""language:\n- zh\n- en\ntags:\n- glm\n- chatglm\n- thudm"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""64971933a9c42bc6a848d3f4"", ""modelId"": ""THUDM/chatglm2-6b"", ""usedStorage"": 49949968758}",0,,0,"https://huggingface.co/XiaoFang1019/chatglm2-6b_298_v2, https://huggingface.co/xiaoqijun/111, https://huggingface.co/kobesar/FinGPT_Training_LoRA_with_ChatGLM2_6B, https://huggingface.co/XiaoFang1019/chatglm2-6b_298_v1, https://huggingface.co/gkMSDA/PEFTAdapterWeightsTest, https://huggingface.co/wangd12/fin_sen_test",6,https://huggingface.co/Slipstream-Max/MeChat-chatglm2-6b-GGUF-fp16,1,,0,"BAAI/open_cn_llm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, Justinrune/LLaMA-Factory, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, eduagarcia/open_pt_llm_leaderboard, glt3953/app-text_generation_chatglm2-6b, huggingface/InferenceSupport/discussions/new?title=THUDM/chatglm2-6b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTHUDM%2Fchatglm2-6b%5D(%2FTHUDM%2Fchatglm2-6b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, meval/multilingual-chatbot-arena-leaderboard, qingxu98/gpt-academic, yhavinga/dutch-tokenizer-arena",13
deepseek-llm-7b-chat_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
The diff for this file is too large to render. See raw diff
 
distilbert-base-uncased-finetuned-sst-2-english_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv ADDED
The diff for this file is too large to render. See raw diff
 
distilroberta-finetuned-financial-news-sentiment-analysis_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
The diff for this file is too large to render. See raw diff
 
dpt-large_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Intel/dpt-large,"---
3
+ license: apache-2.0
4
+ tags:
5
+ - vision
6
+ - depth-estimation
7
+ widget:
8
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
9
+ example_title: Tiger
10
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
11
+ example_title: Teapot
12
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
13
+ example_title: Palace
14
+
15
+ model-index:
16
+ - name: dpt-large
17
+ results:
18
+ - task:
19
+ type: monocular-depth-estimation
20
+ name: Monocular Depth Estimation
21
+ dataset:
22
+ type: MIX-6
23
+ name: MIX-6
24
+ metrics:
25
+ - type: Zero-shot transfer
26
+ value: 10.82
27
+ name: Zero-shot transfer
28
+ config: Zero-shot transfer
29
+ verified: false
30
+ ---
31
+
32
+ ## Model Details: DPT-Large (also known as MiDaS 3.0)
33
+
34
+ Dense Prediction Transformer (DPT) model trained on 1.4 million images for monocular depth estimation.
35
+ It was introduced in the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by Ranftl et al. (2021) and first released in [this repository](https://github.com/isl-org/DPT).
36
+ DPT uses the Vision Transformer (ViT) as backbone and adds a neck + head on top for monocular depth estimation.
37
+ ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/dpt_architecture.jpg)
38
+
39
+ The model card has been written in combination by the Hugging Face team and Intel.
40
+
41
+ | Model Detail | Description |
42
+ | ----------- | ----------- |
43
+ | Model Authors - Company | Intel |
44
+ | Date | March 22, 2022 |
45
+ | Version | 1 |
46
+ | Type | Computer Vision - Monocular Depth Estimation |
47
+ | Paper or Other Resources | [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) and [GitHub Repo](https://github.com/isl-org/DPT) |
48
+ | License | Apache 2.0 |
49
+ | Questions or Comments | [Community Tab](https://huggingface.co/Intel/dpt-large/discussions) and [Intel Developers Discord](https://discord.gg/rv2Gp55UJQ)|
50
+
51
+ | Intended Use | Description |
52
+ | ----------- | ----------- |
53
+ | Primary intended uses | You can use the raw model for zero-shot monocular depth estimation. See the [model hub](https://huggingface.co/models?search=dpt) to look for fine-tuned versions on a task that interests you. |
54
+ | Primary intended users | Anyone doing monocular depth estimation |
55
+ | Out-of-scope uses | This model in most cases will need to be fine-tuned for your particular task. The model should not be used to intentionally create hostile or alienating environments for people.|
56
+
57
+
58
+ ### How to use
59
+
60
+ The easiest is leveraging the pipeline API:
61
+
62
+ ```
63
+ from transformers import pipeline
64
+
65
+ pipe = pipeline(task=""depth-estimation"", model=""Intel/dpt-large"")
66
+ result = pipe(image)
67
+ result[""depth""]
68
+ ```
69
+
70
+ In case you want to implement the entire logic yourself, here's how to do that for zero-shot depth estimation on an image:
71
+
72
+ ```python
73
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
74
+ import torch
75
+ import numpy as np
76
+ from PIL import Image
77
+ import requests
78
+
79
+ url = ""http://images.cocodataset.org/val2017/000000039769.jpg""
80
+ image = Image.open(requests.get(url, stream=True).raw)
81
+
82
+ processor = DPTImageProcessor.from_pretrained(""Intel/dpt-large"")
83
+ model = DPTForDepthEstimation.from_pretrained(""Intel/dpt-large"")
84
+
85
+ # prepare image for the model
86
+ inputs = processor(images=image, return_tensors=""pt"")
87
+
88
+ with torch.no_grad():
89
+ outputs = model(**inputs)
90
+ predicted_depth = outputs.predicted_depth
91
+
92
+ # interpolate to original size
93
+ prediction = torch.nn.functional.interpolate(
94
+ predicted_depth.unsqueeze(1),
95
+ size=image.size[::-1],
96
+ mode=""bicubic"",
97
+ align_corners=False,
98
+ )
99
+
100
+ # visualize the prediction
101
+ output = prediction.squeeze().cpu().numpy()
102
+ formatted = (output * 255 / np.max(output)).astype(""uint8"")
103
+ depth = Image.fromarray(formatted)
104
+ ```
105
+
106
+ For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/dpt).
107
+
108
+
109
+ | Factors | Description |
110
+ | ----------- | ----------- |
111
+ | Groups | Multiple datasets compiled together |
112
+ | Instrumentation | - |
113
+ | Environment | Inference completed on Intel Xeon Platinum 8280 CPU @ 2.70GHz with 8 physical cores and an NVIDIA RTX 2080 GPU. |
114
+ | Card Prompts | Model deployment on alternate hardware and software will change model performance |
115
+
116
+ | Metrics | Description |
117
+ | ----------- | ----------- |
118
+ | Model performance measures | Zero-shot Transfer |
119
+ | Decision thresholds | - |
120
+ | Approaches to uncertainty and variability | - |
121
+
122
+ | Training and Evaluation Data | Description |
123
+ | ----------- | ----------- |
124
+ | Datasets | The dataset is called MIX 6, and contains around 1.4M images. The model was initialized with ImageNet-pretrained weights.|
125
+ | Motivation | To build a robust monocular depth prediction network |
126
+ | Preprocessing | ""We resize the image such that the longer side is 384 pixels and train on random square crops of size 384. ... We perform random horizontal flips for data augmentation."" See [Ranftl et al. (2021)](https://arxiv.org/abs/2103.13413) for more details. |
127
+
128
+ ## Quantitative Analyses
129
+ | Model | Training set | DIW WHDR | ETH3D AbsRel | Sintel AbsRel | KITTI δ>1.25 | NYU δ>1.25 | TUM δ>1.25 |
130
+ | --- | --- | --- | --- | --- | --- | --- | --- |
131
+ | DPT - Large | MIX 6 | 10.82 (-13.2%) | 0.089 (-31.2%) | 0.270 (-17.5%) | 8.46 (-64.6%) | 8.32 (-12.9%) | 9.97 (-30.3%) |
132
+ | DPT - Hybrid | MIX 6 | 11.06 (-11.2%) | 0.093 (-27.6%) | 0.274 (-16.2%) | 11.56 (-51.6%) | 8.69 (-9.0%) | 10.89 (-23.2%) |
133
+ | MiDaS | MIX 6 | 12.95 (+3.9%) | 0.116 (-10.5%) | 0.329 (+0.5%) | 16.08 (-32.7%) | 8.71 (-8.8%) | 12.51 (-12.5%)
134
+ | MiDaS [30] | MIX 5 | 12.46 | 0.129 | 0.327 | 23.90 | 9.55 | 14.29 |
135
+ | Li [22] | MD [22] | 23.15 | 0.181 | 0.385 | 36.29 | 27.52 | 29.54 |
136
+ | Li [21] | MC [21] | 26.52 | 0.183 | 0.405 | 47.94 | 18.57 | 17.71 |
137
+ | Wang [40] | WS [40] | 19.09 | 0.205 | 0.390 | 31.92 | 29.57 | 20.18 |
138
+ | Xian [45] | RW [45] | 14.59 | 0.186 | 0.422 | 34.08 | 27.00 | 25.02 |
139
+ | Casser [5] | CS [8] | 32.80 | 0.235 | 0.422 | 21.15 | 39.58 | 37.18 |
140
+
141
+ Table 1. Comparison to the state of the art on monocular depth estimation. We evaluate zero-shot cross-dataset transfer according to the
142
+ protocol defined in [30]. Relative performance is computed with respect to the original MiDaS model [30]. Lower is better for all metrics. ([Ranftl et al., 2021](https://arxiv.org/abs/2103.13413))
143
+
144
+
145
+ | Ethical Considerations | Description |
146
+ | ----------- | ----------- |
147
+ | Data | The training data come from multiple image datasets compiled together. |
148
+ | Human life | The model is not intended to inform decisions central to human life or flourishing. It is an aggregated set of monocular depth image datasets. |
149
+ | Mitigations | No additional risk mitigation strategies were considered during model development. |
150
+ | Risks and harms | The extent of the risks involved by using the model remain unknown. |
151
+ | Use cases | - |
152
+
153
+ | Caveats and Recommendations |
154
+ | ----------- |
155
+ | Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. There are no additional caveats or recommendations for this model. |
156
+
157
+
158
+
159
+ ### BibTeX entry and citation info
160
+
161
+ ```bibtex
162
+ @article{DBLP:journals/corr/abs-2103-13413,
163
+ author = {Ren{\'{e}} Ranftl and
164
+ Alexey Bochkovskiy and
165
+ Vladlen Koltun},
166
+ title = {Vision Transformers for Dense Prediction},
167
+ journal = {CoRR},
168
+ volume = {abs/2103.13413},
169
+ year = {2021},
170
+ url = {https://arxiv.org/abs/2103.13413},
171
+ eprinttype = {arXiv},
172
+ eprint = {2103.13413},
173
+ timestamp = {Wed, 07 Apr 2021 15:31:46 +0200},
174
+ biburl = {https://dblp.org/rec/journals/corr/abs-2103-13413.bib},
175
+ bibsource = {dblp computer science bibliography, https://dblp.org}
176
+ }
177
+ ```","{""id"": ""Intel/dpt-large"", ""author"": ""Intel"", ""sha"": ""bc15f29aa3a80d532f2ed650b5e16ac48d8958f9"", ""last_modified"": ""2024-02-24 11:22:17+00:00"", ""created_at"": ""2022-03-02 23:29:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 351072, ""downloads_all_time"": null, ""likes"": 187, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""dpt"", ""depth-estimation"", ""vision"", ""arxiv:2103.13413"", ""license:apache-2.0"", ""model-index"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""depth-estimation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0\ntags:\n- vision\n- depth-estimation\nwidget:\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg\n example_title: Tiger\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg\n example_title: Teapot\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg\n example_title: Palace\nmodel-index:\n- name: dpt-large\n results:\n - task:\n type: monocular-depth-estimation\n name: Monocular Depth Estimation\n dataset:\n name: MIX-6\n type: MIX-6\n metrics:\n - type: Zero-shot transfer\n value: 10.82\n name: Zero-shot transfer\n config: Zero-shot transfer\n verified: false"", ""widget_data"": [{""src"": ""https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg"", ""example_title"": ""Tiger""}, {""src"": ""https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg"", ""example_title"": ""Teapot""}, {""src"": ""https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg"", ""example_title"": ""Palace""}], ""model_index"": [{""name"": ""dpt-large"", ""results"": [{""task"": {""type"": ""monocular-depth-estimation"", ""name"": ""Monocular Depth Estimation""}, ""dataset"": {""type"": ""MIX-6"", ""name"": ""MIX-6""}, ""metrics"": [{""type"": ""Zero-shot transfer"", ""value"": 10.82, ""name"": ""Zero-shot transfer"", ""config"": ""Zero-shot transfer"", ""verified"": false}]}]}], ""config"": {""architectures"": [""DPTForDepthEstimation""], ""model_type"": ""dpt""}, ""transformers_info"": {""auto_model"": ""AutoModelForDepthEstimation"", ""custom_class"": null, ""pipeline_tag"": ""depth-estimation"", ""processor"": ""AutoImageProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""microsoft/HuggingGPT"", ""radames/dpt-depth-estimation-3d-obj"", ""nielsr/dpt-depth-estimation"", ""radames/dpt-depth-estimation-3d-voxels"", ""merve/vision_papers"", ""radames/Depth-Image-to-Autostereogram"", ""merve/compare_depth_models"", ""johnrobinsn/MidasDepthEstimation"", ""navervision/Graphit-SD"", ""vivien/depth-aware-caption"", ""verkaDerkaDerk/face-mesh-workflow"", ""jho/MonocularDepth"", ""taesiri/HuggingGPT-Lite"", ""ClaireOzzz/dpt-depth-estimation-3d-obj"", ""xelu3banh/dpt-depth16"", ""adpro/dpt-depth04"", ""adpro/dpt-depth16"", ""bergrozen1213/3d-obj"", ""Surn/HexaGrid"", ""bergrozen1213/3d-obj-v2"", ""hayas-tohoku-workshop-2023/sample-depth-estimation"", ""Surn/DPTDepth3D"", ""adpro/dpt-depth03"", ""adpro/dpt-depth06"", ""Harsimran19/DepthGAN"", ""yuragoithf/mlg_depth_estimation"", ""xelu3banh/dpt-depth01"", ""xelu3banh/dpt-depth02"", ""adpro/dpt-depth07"", ""adpro/dpt-depth10"", ""adpro/dpt-depth13"", ""adpro/dpt-depth15"", ""ccarr0807/HuggingGPT"", ""theholycityweb/HuggingGPT"", ""kalyas/dpt-depth-estimation"", ""Alfasign/HuggingGPT-Lite"", ""saurshaz/HuggingGPT"", ""Manasee27/safe-steps_gradio"", ""NN-BRD/hackathon_depth_segment_2"", ""ksramalakshmi/depth-estimation-gradio"", ""kedimestan/T3-AI-Hackaton-2024-Abra-Muhara-DPTLarge-Depth-Estimation"", ""jiten6555/Imagemodel"", ""tpatel53/BlurryFace"", ""JarvisOnSolana/Jarvis"", ""Mkg09/MG"", ""xelu3banh/dpt-depth07"", ""xelu3banh/dpt-depth09"", ""xelu3banh/dpt-depth14"", ""xelu3banh/dpt-depth03"", ""xelu3banh/dpt-depth08"", ""xelu3banh/dpt-depth13"", ""xelu3banh/dpt-depth11"", ""xelu3banh/dpt-depth06"", ""xelu3banh/dpt-depth12"", ""xelu3banh/dpt-depth05"", ""xelu3banh/dpt-depth10"", ""xelu3banh/dpt-depth15"", ""xelu3banh/dpt-depth04"", ""adpro/dpt-depth01"", ""adpro/dpt-depth05"", ""chrisW6825/HuggingGPT"", ""adpro/dpt-depth02"", ""Shenziqian/HuggingGPT"", ""adpro/dpt-depth12"", ""lokutus/HuggingGPT"", ""adpro/dpt-depth09"", ""adpro/dpt-depth11"", ""adpro/dpt-depth14"", ""adpro/dpt-depth08"", ""mimiqiao/HuggingGPT"", ""wowochkin/HuggingGPT"", ""tsgbalakarthik/HuggingGPT"", ""Msp/HuggingGPT"", ""Inoob/Intel-dpt-large"", ""apgarmd/jarvis2"", ""apgarmd/jarvis"", ""ryan12439/HuggingGPTpub"", ""CelesteChen/dpt-large-demo"", ""Chokyounghoon/HuggingGPT"", ""turbowed/HuggingGPT"", ""mukulnag/HuggingGPT1"", ""FANCHIYU/HuggingGPT"", ""Betacuckgpt/HuggingGPT"", ""cashqin/HuggingGPT"", ""lollo21/Will-GPT"", ""felixfriday/MICROSOFTT_JARVIS_HuggingGPT"", ""akashraj98283/Intel-dpt-large"", ""Meffordh/HuggingGPT"", ""lugifudun/HuggingGPT"", ""Pfs2021Funny/HuggingGPT"", ""leadmaister/HuggingGPT"", ""irritablebro/HuggingGPT"", ""pors/HuggingGPT"", ""keaneu/HuggingGPT"", ""MagKoz/HuggingGPT"", ""lzqfree/HuggingGPT"", ""zhangdream/HuggingGPT"", ""andomray/Intel-dpt-large"", ""viscosity/HuggingGPT"", ""bountyfuljr/HuggingGPTplaypublic""], ""safetensors"": {""parameters"": {""F32"": 341850305}, ""total"": 341850305}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-24 11:22:17+00:00"", ""cardData"": ""license: apache-2.0\ntags:\n- vision\n- depth-estimation\nwidget:\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg\n example_title: Tiger\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg\n example_title: Teapot\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg\n example_title: Palace\nmodel-index:\n- name: dpt-large\n results:\n - task:\n type: monocular-depth-estimation\n name: Monocular Depth Estimation\n dataset:\n name: MIX-6\n type: MIX-6\n metrics:\n - type: Zero-shot transfer\n value: 10.82\n name: Zero-shot transfer\n config: Zero-shot transfer\n verified: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForDepthEstimation"", ""custom_class"": null, ""pipeline_tag"": ""depth-estimation"", ""processor"": ""AutoImageProcessor""}, ""_id"": ""621ffdc136468d709f17e709"", ""modelId"": ""Intel/dpt-large"", ""usedStorage"": 10948058403}",0,https://huggingface.co/Reverse-project/project,1,,0,https://huggingface.co/Xenova/dpt-large,1,,0,"Surn/DPTDepth3D, Surn/HexaGrid, adpro/dpt-depth04, adpro/dpt-depth16, huggingface/InferenceSupport/discussions/new?title=Intel/dpt-large&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BIntel%2Fdpt-large%5D(%2FIntel%2Fdpt-large)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, johnrobinsn/MidasDepthEstimation, merve/vision_papers, nielsr/dpt-depth-estimation, radames/Depth-Image-to-Autostereogram, radames/dpt-depth-estimation-3d-obj, radames/dpt-depth-estimation-3d-voxels, verkaDerkaDerk/face-mesh-workflow, xelu3banh/dpt-depth16",13
178
+ Reverse-project/project,"---
179
+ license: cc-by-4.0
180
+ datasets:
181
+ - argilla/FinePersonas-v0.1
182
+ - jmcastelo17/FIFA_dataset
183
+ - liangwl19/NCAA2024_train_val
184
+ language:
185
+ - ca
186
+ base_model:
187
+ - upstage/solar-pro-preview-instruct
188
+ - google/gemma-2-9b-it
189
+ - amd/AMD-Llama-135m
190
+ - Intel/dpt-large
191
+ ---","{""id"": ""Reverse-project/project"", ""author"": ""Reverse-project"", ""sha"": ""c0d1a1e52cb232263eba4d34112e25d139816bd9"", ""last_modified"": ""2024-09-20 11:53:51+00:00"", ""created_at"": ""2024-09-20 11:45:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""ca"", ""dataset:argilla/FinePersonas-v0.1"", ""dataset:jmcastelo17/FIFA_dataset"", ""dataset:liangwl19/NCAA2024_train_val"", ""base_model:Intel/dpt-large"", ""base_model:finetune:Intel/dpt-large"", ""license:cc-by-4.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- upstage/solar-pro-preview-instruct\n- google/gemma-2-9b-it\n- amd/AMD-Llama-135m\n- Intel/dpt-large\ndatasets:\n- argilla/FinePersonas-v0.1\n- jmcastelo17/FIFA_dataset\n- liangwl19/NCAA2024_train_val\nlanguage:\n- ca\nlicense: cc-by-4.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-20 11:53:51+00:00"", ""cardData"": ""base_model:\n- upstage/solar-pro-preview-instruct\n- google/gemma-2-9b-it\n- amd/AMD-Llama-135m\n- Intel/dpt-large\ndatasets:\n- argilla/FinePersonas-v0.1\n- jmcastelo17/FIFA_dataset\n- liangwl19/NCAA2024_train_val\nlanguage:\n- ca\nlicense: cc-by-4.0"", ""transformersInfo"": null, ""_id"": ""66ed6059e55a7b01b3149a2d"", ""modelId"": ""Reverse-project/project"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Reverse-project/project&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BReverse-project%2Fproject%5D(%2FReverse-project%2Fproject)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
finbert-tone_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ yiyanghkust/finbert-tone,"---
3
+ language: ""en""
4
+ tags:
5
+ - financial-sentiment-analysis
6
+ - sentiment-analysis
7
+ widget:
8
+ - text: ""growth is strong and we have plenty of liquidity""
9
+ ---
10
+
11
+ `FinBERT` is a BERT model pre-trained on financial communication text. The purpose is to enhance financial NLP research and practice. It is trained on the following three financial communication corpus. The total corpora size is 4.9B tokens.
12
+ - Corporate Reports 10-K & 10-Q: 2.5B tokens
13
+ - Earnings Call Transcripts: 1.3B tokens
14
+ - Analyst Reports: 1.1B tokens
15
+
16
+ More technical details on `FinBERT`: [Click Link](https://github.com/yya518/FinBERT)
17
+
18
+ This released `finbert-tone` model is the `FinBERT` model fine-tuned on 10,000 manually annotated (positive, negative, neutral) sentences from analyst reports. This model achieves superior performance on financial tone analysis task. If you are simply interested in using `FinBERT` for financial tone analysis, give it a try.
19
+
20
+ If you use the model in your academic work, please cite the following paper:
21
+
22
+ Huang, Allen H., Hui Wang, and Yi Yang. ""FinBERT: A Large Language Model for Extracting Information from Financial Text."" *Contemporary Accounting Research* (2022).
23
+
24
+
25
+ # How to use
26
+ You can use this model with Transformers pipeline for sentiment analysis.
27
+ ```python
28
+ from transformers import BertTokenizer, BertForSequenceClassification
29
+ from transformers import pipeline
30
+
31
+ finbert = BertForSequenceClassification.from_pretrained('yiyanghkust/finbert-tone',num_labels=3)
32
+ tokenizer = BertTokenizer.from_pretrained('yiyanghkust/finbert-tone')
33
+
34
+ nlp = pipeline(""sentiment-analysis"", model=finbert, tokenizer=tokenizer)
35
+
36
+ sentences = [""there is a shortage of capital, and we need extra financing"",
37
+ ""growth is strong and we have plenty of liquidity"",
38
+ ""there are doubts about our finances"",
39
+ ""profits are flat""]
40
+ results = nlp(sentences)
41
+ print(results) #LABEL_0: neutral; LABEL_1: positive; LABEL_2: negative
42
+
43
+ ```","{""id"": ""yiyanghkust/finbert-tone"", ""author"": ""yiyanghkust"", ""sha"": ""4921590d3c0c3832c0efea24c8381ce0bda7844b"", ""last_modified"": ""2022-10-17 00:35:39+00:00"", ""created_at"": ""2022-03-02 23:29:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 993373, ""downloads_all_time"": null, ""likes"": 178, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""tf"", ""text-classification"", ""financial-sentiment-analysis"", ""sentiment-analysis"", ""en"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language: en\ntags:\n- financial-sentiment-analysis\n- sentiment-analysis\nwidget:\n- text: growth is strong and we have plenty of liquidity"", ""widget_data"": [{""text"": ""growth is strong and we have plenty of liquidity""}], ""model_index"": null, ""config"": {""architectures"": [""BertForSequenceClassification""]}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tf_model.h5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""rajistics/Financial_Analyst_AI"", ""shekolla/finbert-financial-sentiment"", ""sushobhan55/finbert_finviz"", ""sohomghosh/FLUEnT"", ""chainyo/optimum-text-classification"", ""seki/sk"", ""ShreyashNadage/InvestmentCopilot"", ""awacke1/HEDIS.Dash.Component.Top.Clinical.Terminology.Vocabulary"", ""parkerjj/BuckLakeAI"", ""feliponi/stock"", ""akhaliq/finbert-tone"", ""abdulmatinomotoso/stock_news_sentiment_and_NER"", ""joulammous/Patent-Project"", ""JKJanosko/Toxicity-Analysis"", ""dackdel/yiyanghkust-finbert-tone-test"", ""MikeJeong/apppp"", ""MikeJeong/CSProject"", ""MikeJeong/CS482-FineTune"", ""Pushpa/APP"", ""Pranav-K/FinBert"", ""Pushpa/bert"", ""JulianHame/sentiment-analysis-app"", ""Chriiz/yiyanghkust-finbert-tone"", ""adityasrathore/Financial_Analyst_AI"", ""subhannadeem1/torch-stock-predictor"", ""sarat2hf/stock_information_app"", ""Aakarsh14n/finance-sentimental"", ""ansgarykrk93/yiyanghkust-finbert-tone"", ""jclhkma/yiyanghkust-finbert-tone"", ""inclinedadarsh/test-fine-tuning"", ""kaljuvee/Financial_Analyst_AI"", ""Hudda/financial_sentiment_signalling"", ""rahul09092607/TEST"", ""Abkhan07/yiyanghkust-finbert-tone"", ""Cachoups/FinanceReport"", ""pentarosarium/processor"", ""Akshayram1/Stock3"", ""Danielhome/cubeiro"", ""kstang88/Stock_Trading_LLM"", ""akazmi/Investment-Advisory-App"", ""akazmi/Investment-Advisor-2"", ""akazmi/Investement-Advisor-3"", ""pentarosarium/gprocess"", ""adsnufkin/hw-modeling-03"", ""Anupam251272/Financial_Analyst_AI918847374914"", ""Vickiiiyippp/financial_text_summarization"", ""RAHULJUNEJA33/Financial_Report_Sentiment_Analyzer"", ""shouryam1/yiyanghkust-finbert-tone"", ""tan2360/news_sentiment_tts"", ""ChienChung/FinBERT"", ""proKBD/news-summarization"", ""hy982/ssss"", ""Darmin3000/RAG-Bluum"", ""riccione55/MultiAgent-Council"", ""abhay250/finebert-backend"", ""kakaprince46/wealth-management-bot""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2022-10-17 00:35:39+00:00"", ""cardData"": ""language: en\ntags:\n- financial-sentiment-analysis\n- sentiment-analysis\nwidget:\n- text: growth is strong and we have plenty of liquidity"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": null}, ""_id"": ""621ffdc136468d709f181abd"", ""modelId"": ""yiyanghkust/finbert-tone"", ""usedStorage"": 2195617187}",0,"https://huggingface.co/jjonas313/autotrain-ecb-uncertainty-finbert-tone, https://huggingface.co/MaRyAm1295/finBERT-KAM, https://huggingface.co/gtfintechlab/SubjECTiveQA-SPECIFIC, https://huggingface.co/hzduuuu/finbert-base-sentiment, https://huggingface.co/Sugutt/finmap-expense-cat-model",5,,0,,0,,0,"ShreyashNadage/InvestmentCopilot, akhaliq/finbert-tone, awacke1/HEDIS.Dash.Component.Top.Clinical.Terminology.Vocabulary, chainyo/optimum-text-classification, feliponi/stock, huggingface/InferenceSupport/discussions/1012, parkerjj/BuckLakeAI, pentarosarium/processor, rajistics/Financial_Analyst_AI, seki/sk, shekolla/finbert-financial-sentiment, sohomghosh/FLUEnT, sushobhan55/finbert_finviz",13
44
+ jjonas313/autotrain-ecb-uncertainty-finbert-tone,"
45
+ ---
46
+ tags:
47
+ - autotrain
48
+ - text-classification
49
+ base_model: yiyanghkust/finbert-tone
50
+ widget:
51
+ - text: ""I love AutoTrain""
52
+ ---
53
+
54
+ # Model Trained Using AutoTrain
55
+
56
+ - Problem type: Text Classification
57
+
58
+ ## Validation Metrics
59
+ loss: 0.5263932943344116
60
+
61
+ f1: 0.6754385964912281
62
+
63
+ precision: 0.7403846153846154
64
+
65
+ recall: 0.6209677419354839
66
+
67
+ auc: 0.8266129032258064
68
+
69
+ accuracy: 0.7533333333333333
70
+ ","{""id"": ""jjonas313/autotrain-ecb-uncertainty-finbert-tone"", ""author"": ""jjonas313"", ""sha"": ""77741eaef460c442765eba6f1edfcf52c1048cd0"", ""last_modified"": ""2024-09-05 14:30:42+00:00"", ""created_at"": ""2024-09-05 14:29:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""tensorboard"", ""safetensors"", ""bert"", ""autotrain"", ""text-classification"", ""base_model:yiyanghkust/finbert-tone"", ""base_model:finetune:yiyanghkust/finbert-tone"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model: yiyanghkust/finbert-tone\ntags:\n- autotrain\n- text-classification\nwidget:\n- text: I love AutoTrain"", ""widget_data"": [{""text"": ""I love AutoTrain""}], ""model_index"": null, ""config"": {""architectures"": [""BertForSequenceClassification""], ""model_type"": ""bert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-240/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-240/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-240/optimizer.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-240/rng_state.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-240/scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-240/trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-240/training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Sep05_14-29-50_r-jjonas313-autotrain-ecb-uncertainy-7lc13857-3e966-8536m/events.out.tfevents.1725546591.r-jjonas313-autotrain-ecb-uncertainy-7lc13857-3e966-8536m.166.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Sep05_14-29-50_r-jjonas313-autotrain-ecb-uncertainy-7lc13857-3e966-8536m/events.out.tfevents.1725546628.r-jjonas313-autotrain-ecb-uncertainy-7lc13857-3e966-8536m.166.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_params.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 109753346}, ""total"": 109753346}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-05 14:30:42+00:00"", ""cardData"": ""base_model: yiyanghkust/finbert-tone\ntags:\n- autotrain\n- text-classification\nwidget:\n- text: I love AutoTrain"", ""transformersInfo"": null, ""_id"": ""66d9c05d8a22180768bc2c52"", ""modelId"": ""jjonas313/autotrain-ecb-uncertainty-finbert-tone"", ""usedStorage"": 1317277810}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=jjonas313/autotrain-ecb-uncertainty-finbert-tone&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjjonas313%2Fautotrain-ecb-uncertainty-finbert-tone%5D(%2Fjjonas313%2Fautotrain-ecb-uncertainty-finbert-tone)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
71
+ MaRyAm1295/finBERT-KAM,"---
72
+ library_name: transformers
73
+ base_model: yiyanghkust/finbert-tone
74
+ tags:
75
+ - generated_from_trainer
76
+ model-index:
77
+ - name: finBERT-KAM
78
+ results: []
79
+ ---
80
+
81
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
82
+ should probably proofread and complete it, then remove this comment. -->
83
+
84
+ # finBERT-KAM
85
+
86
+ This model is a fine-tuned version of [yiyanghkust/finbert-tone](https://huggingface.co/yiyanghkust/finbert-tone) on the None dataset.
87
+ It achieves the following results on the evaluation set:
88
+ - Loss: 0.4697
89
+
90
+ ## Model description
91
+
92
+ More information needed
93
+
94
+ ## Intended uses & limitations
95
+
96
+ More information needed
97
+
98
+ ## Training and evaluation data
99
+
100
+ More information needed
101
+
102
+ ## Training procedure
103
+
104
+ ### Training hyperparameters
105
+
106
+ The following hyperparameters were used during training:
107
+ - learning_rate: 5e-05
108
+ - train_batch_size: 8
109
+ - eval_batch_size: 8
110
+ - seed: 42
111
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
112
+ - lr_scheduler_type: linear
113
+ - lr_scheduler_warmup_steps: 500
114
+ - num_epochs: 2
115
+
116
+ ### Training results
117
+
118
+ | Training Loss | Epoch | Step | Validation Loss |
119
+ |:-------------:|:-----:|:-----:|:---------------:|
120
+ | 0.4813 | 1.0 | 6520 | 0.5137 |
121
+ | 0.2616 | 2.0 | 13040 | 0.4697 |
122
+
123
+
124
+ ### Framework versions
125
+
126
+ - Transformers 4.45.1
127
+ - Pytorch 2.4.0
128
+ - Datasets 3.0.1
129
+ - Tokenizers 0.20.0
130
+ ","{""id"": ""MaRyAm1295/finBERT-KAM"", ""author"": ""MaRyAm1295"", ""sha"": ""0ddcca7265f961c964fde51d32450e1f771f6a3e"", ""last_modified"": ""2024-10-29 17:31:47+00:00"", ""created_at"": ""2024-09-14 09:11:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""bert"", ""text-classification"", ""generated_from_trainer"", ""base_model:yiyanghkust/finbert-tone"", ""base_model:finetune:yiyanghkust/finbert-tone"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model: yiyanghkust/finbert-tone\nlibrary_name: transformers\ntags:\n- generated_from_trainer\nmodel-index:\n- name: finBERT-KAM\n results: []"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": [{""name"": ""finBERT-KAM"", ""results"": []}], ""config"": {""architectures"": [""BertForSequenceClassification""], ""model_type"": ""bert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 109789489}, ""total"": 109789489}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-29 17:31:47+00:00"", ""cardData"": ""base_model: yiyanghkust/finbert-tone\nlibrary_name: transformers\ntags:\n- generated_from_trainer\nmodel-index:\n- name: finBERT-KAM\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66e5535a78f2c37966632259"", ""modelId"": ""MaRyAm1295/finBERT-KAM"", ""usedStorage"": 6148881900}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=MaRyAm1295/finBERT-KAM&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMaRyAm1295%2FfinBERT-KAM%5D(%2FMaRyAm1295%2FfinBERT-KAM)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
131
+ gtfintechlab/SubjECTiveQA-SPECIFIC,"---
132
+ license: cc-by-4.0
133
+ datasets:
134
+ - gtfintechlab/subjectiveqa
135
+ language:
136
+ - en
137
+ metrics:
138
+ - accuracy
139
+ - f1
140
+ - precision
141
+ - recall
142
+ base_model:
143
+ - yiyanghkust/finbert-tone
144
+ pipeline_tag: text-classification
145
+ library_name: transformers
146
+ ---
147
+
148
+ # SubjECTiveQA-SPECIFIC Model
149
+
150
+ **Model Name:** SubjECTiveQA-SPECIFIC
151
+
152
+ **Model Type:** Text Classification
153
+
154
+ **Language:** English
155
+
156
+ **License:** [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/)
157
+
158
+ **Base Model:** [yiyanghkust/finbert-tone](https://huggingface.co/yiyanghkust/finbert-tone)
159
+
160
+ **Dataset Used for Training:** [gtfintechlab/SubjECTive-QA](https://huggingface.co/datasets/gtfintechlab/SubjECTive-QA)
161
+
162
+ ## Model Overview
163
+
164
+ SubjECTiveQA-SPECIFIC is a fine-tuned FinBERT-based model designed to classify text data according to the 'SPECIFIC' attribute. The 'SPECIFIC' attribute is one of several subjective attributes annotated in the SubjECTive-QA dataset, which focuses on subjective question-answer pairs in financial contexts.
165
+
166
+ ## Intended Use
167
+
168
+ This model is intended for researchers and practitioners working on subjective text classification, particularly within financial domains. It is specifically designed to assess the 'SPECIFIC' attribute in question-answer pairs, aiding in the analysis of subjective content in financial communications.
169
+
170
+ ## How to Use
171
+
172
+ To utilize this model, you can load it using the Hugging Face `transformers` library:
173
+
174
+ ```python
175
+ from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoConfig
176
+
177
+ # Load the tokenizer, model, and configuration
178
+ tokenizer = AutoTokenizer.from_pretrained(""gtfintechlab/SubjECTiveQA-SPECIFIC"", do_lower_case=True, do_basic_tokenize=True)
179
+ model = AutoModelForSequenceClassification.from_pretrained(""gtfintechlab/SubjECTiveQA-SPECIFIC"", num_labels=3)
180
+ config = AutoConfig.from_pretrained(""gtfintechlab/SubjECTiveQA-SPECIFIC"")
181
+
182
+ # Initialize the text classification pipeline
183
+ classifier = pipeline('text-classification', model=model, tokenizer=tokenizer, config=config, framework=""pt"")
184
+
185
+ # Classify the 'SPECIFIC' attribute in your question-answer pairs
186
+ qa_pairs = [
187
+ ""Question: What are your company's projections for the next quarter? Answer: We anticipate a 10% increase in revenue due to the launch of our new product line."",
188
+ ""Question: Can you explain the recent decline in stock prices? Answer: Market fluctuations are normal, and we are confident in our long-term strategy.""
189
+ ]
190
+ results = classifier(qa_pairs, batch_size=128, truncation=""only_first"")
191
+
192
+ print(results)
193
+ ```
194
+
195
+ In this script:
196
+
197
+ - **Tokenizer and Model Loading:** The `AutoTokenizer` and `AutoModelForSequenceClassification` classes load the pre-trained tokenizer and model, respectively, from the `gtfintechlab/SubjECTiveQA-SPECIFIC` repository.
198
+
199
+ - **Configuration:** The `AutoConfig` class loads the model configuration, which includes parameters such as the number of labels.
200
+
201
+ - **Pipeline Initialization:** The `pipeline` function initializes a text classification pipeline with the loaded model, tokenizer, and configuration.
202
+
203
+ - **Classification:** The `classifier` processes a list of question-answer pairs to assess the 'SPECIFIC' attribute. The `batch_size` parameter controls the number of samples processed simultaneously, and `truncation=""only_first""` ensures that only the first sequence in each pair is truncated if it exceeds the model's maximum input length.
204
+
205
+ Ensure that your environment has the necessary dependencies installed.
206
+
207
+ ## Label Interpretation
208
+
209
+ - **LABEL_0:** Negatively Demonstrative of 'SPECIFIC' (0)
210
+ Indicates that the response lacks specificity.
211
+
212
+ - **LABEL_1:** Neutral Demonstration of 'SPECIFIC' (1)
213
+ Indicates that the response has an average level of specificity.
214
+
215
+ - **LABEL_2:** Positively Demonstrative of 'SPECIFIC' (2)
216
+ Indicates that the response is specific and detailed.
217
+
218
+ ## Training Data
219
+
220
+ The model was trained on the SubjECTive-QA dataset, which comprises question-answer pairs from financial contexts, annotated with various subjective attributes, including 'SPECIFIC'. The dataset is divided into training, validation, and test sets, facilitating robust model training and evaluation.
221
+
222
+ ## Citation
223
+
224
+ If you use this model in your research, please cite the SubjECTive-QA dataset:
225
+
226
+ ```
227
+ @article{SubjECTiveQA,
228
+ title={SubjECTive-QA: Measuring Subjectivity in Earnings Call Transcripts’ QA Through Six-Dimensional Feature Analysis},
229
+ author={Huzaifa Pardawala, Siddhant Sukhani, Agam Shah, Veer Kejriwal, Abhishek Pillai, Rohan Bhasin, Andrew DiBiasio, Tarun Mandapati, Dhruv Adha, Sudheer Chava},
230
+ journal={arXiv preprint arXiv:2410.20651},
231
+ year={2024}
232
+ }
233
+ ```
234
+
235
+ For more details, refer to the [SubjECTive-QA dataset documentation](https://huggingface.co/datasets/gtfintechlab/SubjECTive-QA).
236
+
237
+ ## Contact
238
+
239
+ For any SubjECTive-QA related issues and questions, please contact:
240
+
241
+ - Huzaifa Pardawala: huzaifahp7[at]gatech[dot]edu
242
+
243
+ - Siddhant Sukhani: ssukhani3[at]gatech[dot]edu
244
+
245
+ - Agam Shah: ashah482[at]gatech[dot]edu ","{""id"": ""gtfintechlab/SubjECTiveQA-SPECIFIC"", ""author"": ""gtfintechlab"", ""sha"": ""86c939d68feb8d2dd23fe583b2144903d007adac"", ""last_modified"": ""2024-12-17 00:39:24+00:00"", ""created_at"": ""2024-10-08 19:06:16+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 25, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""bert"", ""text-classification"", ""en"", ""dataset:gtfintechlab/subjectiveqa"", ""arxiv:2410.20651"", ""base_model:yiyanghkust/finbert-tone"", ""base_model:finetune:yiyanghkust/finbert-tone"", ""license:cc-by-4.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model:\n- yiyanghkust/finbert-tone\ndatasets:\n- gtfintechlab/subjectiveqa\nlanguage:\n- en\nlibrary_name: transformers\nlicense: cc-by-4.0\nmetrics:\n- accuracy\n- f1\n- precision\n- recall\npipeline_tag: text-classification"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": {""architectures"": [""BertForSequenceClassification""], ""model_type"": ""bert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 109754115}, ""total"": 109754115}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-17 00:39:24+00:00"", ""cardData"": ""base_model:\n- yiyanghkust/finbert-tone\ndatasets:\n- gtfintechlab/subjectiveqa\nlanguage:\n- en\nlibrary_name: transformers\nlicense: cc-by-4.0\nmetrics:\n- accuracy\n- f1\n- precision\n- recall\npipeline_tag: text-classification"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""670582a8c93561021c53dfb6"", ""modelId"": ""gtfintechlab/SubjECTiveQA-SPECIFIC"", ""usedStorage"": 439039996}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=gtfintechlab/SubjECTiveQA-SPECIFIC&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgtfintechlab%2FSubjECTiveQA-SPECIFIC%5D(%2Fgtfintechlab%2FSubjECTiveQA-SPECIFIC)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
246
+ hzduuuu/finbert-base-sentiment,"---
247
+ license: mit
248
+ base_model:
249
+ - yiyanghkust/finbert-tone
250
+ library_name: transformers
251
+ ---","{""id"": ""hzduuuu/finbert-base-sentiment"", ""author"": ""hzduuuu"", ""sha"": ""26ae85fce1e0134dbd872e63044dd9e45b6fe218"", ""last_modified"": ""2024-11-06 02:54:17+00:00"", ""created_at"": ""2024-11-06 01:23:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""bert"", ""text-classification"", ""base_model:yiyanghkust/finbert-tone"", ""base_model:finetune:yiyanghkust/finbert-tone"", ""license:mit"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model:\n- yiyanghkust/finbert-tone\nlibrary_name: transformers\nlicense: mit"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": {""architectures"": [""BertForSequenceClassification""], ""model_type"": ""bert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 109754115}, ""total"": 109754115}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-06 02:54:17+00:00"", ""cardData"": ""base_model:\n- yiyanghkust/finbert-tone\nlibrary_name: transformers\nlicense: mit"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""672ac50cbde669ec7e7864bb"", ""modelId"": ""hzduuuu/finbert-base-sentiment"", ""usedStorage"": 439039996}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=hzduuuu/finbert-base-sentiment&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhzduuuu%2Ffinbert-base-sentiment%5D(%2Fhzduuuu%2Ffinbert-base-sentiment)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
252
+ Sugutt/finmap-expense-cat-model,"---
253
+ license: mit
254
+ metrics:
255
+ - accuracy
256
+ base_model:
257
+ - yiyanghkust/finbert-tone
258
+ tags:
259
+ - finance
260
+ ---","{""id"": ""Sugutt/finmap-expense-cat-model"", ""author"": ""Sugutt"", ""sha"": ""838a8c116811910194f0d413e8c4ea8c782f3f7f"", ""last_modified"": ""2025-04-23 14:29:57+00:00"", ""created_at"": ""2025-04-15 07:13:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 102, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""bert"", ""finance"", ""base_model:yiyanghkust/finbert-tone"", ""base_model:finetune:yiyanghkust/finbert-tone"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model:\n- yiyanghkust/finbert-tone\nlicense: mit\nmetrics:\n- accuracy\ntags:\n- finance"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""BertForSequenceClassification""], ""model_type"": ""bert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 109759498}, ""total"": 109759498}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-23 14:29:57+00:00"", ""cardData"": ""base_model:\n- yiyanghkust/finbert-tone\nlicense: mit\nmetrics:\n- accuracy\ntags:\n- finance"", ""transformersInfo"": null, ""_id"": ""67fe070f10a8b499849baf09"", ""modelId"": ""Sugutt/finmap-expense-cat-model"", ""usedStorage"": 439061528}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Sugutt/finmap-expense-cat-model&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSugutt%2Ffinmap-expense-cat-model%5D(%2FSugutt%2Ffinmap-expense-cat-model)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
flan-t5-small_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
The diff for this file is too large to render. See raw diff
 
flan-t5-xl_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
The diff for this file is too large to render. See raw diff
 
flux-dev-de-distill_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ nyanko7/flux-dev-de-distill,"---
3
+ license: mit
4
+ language:
5
+ - en
6
+ base_model:
7
+ - black-forest-labs/FLUX.1-dev
8
+ ---
9
+
10
+ # Flux-dev-de-distill
11
+
12
+ This is an experiment to de-distill guidance from flux.1-dev. We removed the original distilled guidance and make true classifier-free guidance reworks.
13
+
14
+ ## Model Details
15
+
16
+ Following Algorithm 1 in [On Distillation of Guided Diffusion Models](https://arxiv.org/abs/2210.03142), we attempted to reverse the distillation process by re-matching guidance scale w. we introduce a student model
17
+ x(zt) to match the output of the teacher at any time-step t ∈ [0, 1] and any guidance scale w ∈ [1, 4]. We initialize the student model with parameters from the teacher model except for the parameters related to w-embedding.
18
+
19
+ Since this model uses true CFG instead of distilled CFG, it is not compatible with diffusers pipeline. Please use [inference script](./inference.py) or manually add guidance in the iteration loop.
20
+
21
+ Train: 150K Unsplash images, 1024px square, 6k steps with global batch size 32, frozen teacher model, approx 12 hours due to limited compute.
22
+
23
+ Examples: Distilled CFG / True CFG
24
+
25
+ ![](./example2.webp)
26
+ ![](./example1.webp)","{""id"": ""nyanko7/flux-dev-de-distill"", ""author"": ""nyanko7"", ""sha"": ""84a0d70dee11829af030ef968d957a174ca1aa1d"", ""last_modified"": ""2024-09-12 13:43:05+00:00"", ""created_at"": ""2024-09-10 13:40:15+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 209, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""en"", ""arxiv:2210.03142"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:finetune:black-forest-labs/FLUX.1-dev"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- black-forest-labs/FLUX.1-dev\nlanguage:\n- en\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='consolidated_s6700.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example1.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example2.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example3.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='inference.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ameerazam08/FLUX.1-dev-De-Distill"", ""John6666/FLUX.1-dev-De-Distill""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-12 13:43:05+00:00"", ""cardData"": ""base_model:\n- black-forest-labs/FLUX.1-dev\nlanguage:\n- en\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""66e04c3f4833a11779daddb1"", ""modelId"": ""nyanko7/flux-dev-de-distill"", ""usedStorage"": 23782443544}",0,,0,,0,,0,,0,"John6666/FLUX.1-dev-De-Distill, ameerazam08/FLUX.1-dev-De-Distill, huggingface/InferenceSupport/discussions/new?title=nyanko7/flux-dev-de-distill&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnyanko7%2Fflux-dev-de-distill%5D(%2Fnyanko7%2Fflux-dev-de-distill)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",3
flux-ip-adapter-v2_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ XLabs-AI/flux-ip-adapter-v2,"---
3
+ license: apache-2.0
4
+ datasets:
5
+ - CaptionEmporium/coyo-hd-11m-llavanext
6
+ - CortexLM/midjourney-v6
7
+ language:
8
+ - en
9
+ base_model:
10
+ - black-forest-labs/FLUX.1-dev
11
+ pipeline_tag: image-to-image
12
+ library_name: diffusers
13
+ ---
14
+
15
+ <img src=""assets/banner-dark.png?raw=true"" alt=""Banner Picture 1"" style=""width:1024px;""/>
16
+ <a href=""https://discord.gg/FHY2guThfy"">
17
+ <img src=""https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/light/join-our-discord-rev1.png?raw=true"" style=""width:1024px;""/>
18
+ </a>
19
+ <img src=""assets/ip_adapter_0.jpg?raw=true"" alt=""example_0"" style=""width:1024px;""/>
20
+ <img src=""assets/mona_workflow.jpg?raw=true"" alt=""Mona Anime Workflow 1"" style=""width:1024px;""/>
21
+
22
+ This repository provides a IP-Adapter checkpoint for
23
+ [FLUX.1-dev model](https://huggingface.co/black-forest-labs/FLUX.1-dev) by Black Forest Labs
24
+
25
+ [See our github](https://github.com/XLabs-AI/x-flux-comfyui) for comfy ui workflows.
26
+
27
+ # Models
28
+ The IP adapter is trained on a resolution of 512x512 for 150k steps and 1024x1024 for 350k steps while maintaining the aspect ratio.
29
+ We release **v2 version** - which can be used directly in ComfyUI!
30
+
31
+ Please, see our [ComfyUI custom nodes installation guide](https://github.com/XLabs-AI/x-flux-comfyui)
32
+
33
+ # Examples
34
+
35
+ See examples of our models results below.
36
+ Also, some generation results with input images are provided in ""Files and versions""
37
+
38
+ # Inference
39
+
40
+ To try our models, you have 2 options:
41
+ 1. Use main.py from our [official repo](https://github.com/XLabs-AI/x-flux)
42
+ 2. Use our custom nodes for ComfyUI and test it with provided workflows (check out folder /workflows)
43
+
44
+ ## Instruction for ComfyUI
45
+ 1. Go to ComfyUI/custom_nodes
46
+ 2. Clone [x-flux-comfyui](https://github.com/XLabs-AI/x-flux-comfyui.git), path should be ComfyUI/custom_nodes/x-flux-comfyui/*, where * is all the files in this repo
47
+ 3. Go to ComfyUI/custom_nodes/x-flux-comfyui/ and run python setup.py
48
+ 4. Update x-flux-comfy with `git pull` or reinstall it.
49
+ 5. Download Clip-L `model.safetensors` from [OpenAI VIT CLIP large](https://huggingface.co/openai/clip-vit-large-patch14), and put it to `ComfyUI/models/clip_vision/*`.
50
+ 6. Download our IPAdapter from [huggingface](https://huggingface.co/XLabs-AI/flux-ip-adapter/tree/main), and put it to `ComfyUI/models/xlabs/ipadapters/*`.
51
+ 7. Use `Flux Load IPAdapter` and `Apply Flux IPAdapter` nodes, choose right CLIP model and enjoy your genereations.
52
+ 8. You can find example workflow in folder workflows in this repo.
53
+
54
+ If you get bad results, try to set to play with ip strength
55
+ ### Limitations
56
+ The IP Adapter is currently in beta.
57
+ We do not guarantee that you will get a good result right away, it may take more attempts to get a result.
58
+ <img src=""assets/ip_adapter_2.jpg?raw=true"" alt=""example_2"" style=""width:1024px;""/>
59
+ <img src=""assets/ip_adapter_3.jpg?raw=true"" alt=""example_3"" style=""width:1024px;""/>
60
+ <img src=""assets/ip_adapter_1.jpg?raw=true"" alt=""example_1"" style=""width:1024px;""/>
61
+ <img src=""assets/ip_adapter_4.jpg?raw=true"" alt=""example_4"" style=""width:1024px;""/>
62
+ <img src=""assets/ip_adapter_5.jpg?raw=true"" alt=""example_5"" style=""width:1024px;""/>
63
+ <img src=""assets/ip_adapter_6.jpg?raw=true"" alt=""example_6"" style=""width:1024px;""/>
64
+
65
+
66
+ ## License
67
+ Our weights fall under the [FLUX.1 [dev]](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md) Non-Commercial License<br/>","{""id"": ""XLabs-AI/flux-ip-adapter-v2"", ""author"": ""XLabs-AI"", ""sha"": ""d1507fa7314df9f7dbe30a1e9659e5e634a58464"", ""last_modified"": ""2024-10-24 10:44:47+00:00"", ""created_at"": ""2024-10-23 10:43:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 41855, ""downloads_all_time"": null, ""likes"": 256, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""image-to-image"", ""en"", ""dataset:CaptionEmporium/coyo-hd-11m-llavanext"", ""dataset:CortexLM/midjourney-v6"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:finetune:black-forest-labs/FLUX.1-dev"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""image-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- black-forest-labs/FLUX.1-dev\ndatasets:\n- CaptionEmporium/coyo-hd-11m-llavanext\n- CortexLM/midjourney-v6\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\npipeline_tag: image-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/banner-dark.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/ip_adapter_0.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/ip_adapter_1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/ip_adapter_2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/ip_adapter_3.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/ip_adapter_4.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/ip_adapter_5.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/ip_adapter_6.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/mona_workflow.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ip_adapter.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ip_adapter_workflow.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Boris-Britva/XLabs-AI-flux-ip-adapter-v2"", ""BossriZytn/XLabs-AI-flux-ip-adapter-v2"", ""ashishnayak7/XLabs-AI-flux-ip-adapter-v2"", ""kmillan/XLabs-AI-flux-ip-adapter-v2"", ""elis159/XLabs-AI-flux-ip-adapter-v2"", ""fakenews222/XLabs-AI-flux-ip-adapter-v2"", ""vt8032/XLabs-AI-flux-ip-adapter-v2"", ""mosca312/XLabs-AI-flux-ip-adapter-v2"", ""Vaunorage/XLabs-AI-flux-ip-adapter-v2"", ""anisirji/img-to-img-meme-ai"", ""Ricofishing/XLabs-AI-flux-ip-adapter-v2"", ""niksahu/FLUX-IP-ADAPTER-2"", ""Meistro77/XLabs-AI-flux-ip-adapter-v2"", ""Pacifica/XLabs-AI-flux-ip-adapter-v2"", ""bryaanabraham/img2img_XLabs""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-24 10:44:47+00:00"", ""cardData"": ""base_model:\n- black-forest-labs/FLUX.1-dev\ndatasets:\n- CaptionEmporium/coyo-hd-11m-llavanext\n- CortexLM/midjourney-v6\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\npipeline_tag: image-to-image"", ""transformersInfo"": null, ""_id"": ""6718d34ad66e1bfe9ef5abd9"", ""modelId"": ""XLabs-AI/flux-ip-adapter-v2"", ""usedStorage"": 1087102998}",0,,0,,0,,0,,0,"Boris-Britva/XLabs-AI-flux-ip-adapter-v2, BossriZytn/XLabs-AI-flux-ip-adapter-v2, Ricofishing/XLabs-AI-flux-ip-adapter-v2, Vaunorage/XLabs-AI-flux-ip-adapter-v2, anisirji/img-to-img-meme-ai, ashishnayak7/XLabs-AI-flux-ip-adapter-v2, elis159/XLabs-AI-flux-ip-adapter-v2, fakenews222/XLabs-AI-flux-ip-adapter-v2, huggingface/InferenceSupport/discussions/new?title=XLabs-AI/flux-ip-adapter-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BXLabs-AI%2Fflux-ip-adapter-v2%5D(%2FXLabs-AI%2Fflux-ip-adapter-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kmillan/XLabs-AI-flux-ip-adapter-v2, mosca312/XLabs-AI-flux-ip-adapter-v2, niksahu/FLUX-IP-ADAPTER-2, vt8032/XLabs-AI-flux-ip-adapter-v2",13
flux1-dev-bnb-nf4_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ lllyasviel/flux1-dev-bnb-nf4,"---
3
+ license: other
4
+ license_name: flux-1-dev-non-commercial-license
5
+ license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
6
+ ---
7
+
8
+ Main page: https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/981
9
+
10
+ ---
11
+
12
+ Update:
13
+
14
+ Always use V2 by default.
15
+
16
+ V2 is quantized in a better way to turn off the second stage of double quant.
17
+
18
+ V2 is 0.5 GB larger than the previous version, since the chunk 64 norm is now stored in full precision float32, making it much more precise than the previous version. Also, since V2 does not have second compression stage, it now has less computation overhead for on-the-fly decompression, making the inference a bit faster.
19
+
20
+ The only drawback of V2 is being 0.5 GB larger.
21
+
22
+ ---
23
+
24
+ Main model in bnb-nf4 (v1 with chunk 64 norm in nf4, v2 with chunk 64 norm in float32)
25
+
26
+ T5xxl in fp8e4m3fn
27
+
28
+ CLIP-L in fp16
29
+
30
+ VAE in bf16
31
+
32
+
33
+ ","{""id"": ""lllyasviel/flux1-dev-bnb-nf4"", ""author"": ""lllyasviel"", ""sha"": ""32d13814357ca76c52638d4644d0f7384208dc96"", ""last_modified"": ""2024-08-14 04:44:05+00:00"", ""created_at"": ""2024-08-10 08:12:40+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 679, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""license:other"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux1-dev-bnb-nf4-v2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux1-dev-bnb-nf4.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ginipick/FLUXllama"", ""nyanko7/flux1-dev-nf4"", ""John6666/joy-caption-pre-alpha-mod"", ""ginigen/FLUXllama-Multilingual"", ""Sebastiankay/FLUX.1-DEV-NF4"", ""MegaTronX/joy-caption-pre-alpha-mod"", ""John6666/joy-caption-pre-alpha-mod-error"", ""tahu321/saepul-ir"", ""ParimalX/InfinixA"", ""Toniska/FLUXllama2"", ""silveroxides/FLUXllama"", ""hasankara14/FLUXllama"", ""officialkep1er/FLUXllama"", ""habibio/Flux-new"", ""wambugu71/FLUXllama"", ""mstraughan/FLUXllama-Multilingual""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-14 04:44:05+00:00"", ""cardData"": ""license: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md"", ""transformersInfo"": null, ""_id"": ""66b720f8503e23d3bd4d12f6"", ""modelId"": ""lllyasviel/flux1-dev-bnb-nf4"", ""usedStorage"": 23534164320}",0,https://huggingface.co/aashush/quantized-local-llm-int8,1,,0,,0,,0,"John6666/joy-caption-pre-alpha-mod, John6666/joy-caption-pre-alpha-mod-error, MegaTronX/joy-caption-pre-alpha-mod, ParimalX/InfinixA, Sebastiankay/FLUX.1-DEV-NF4, Toniska/FLUXllama2, ginigen/FLUXllama-Multilingual, ginipick/FLUXllama, huggingface/InferenceSupport/discussions/new?title=lllyasviel/flux1-dev-bnb-nf4&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blllyasviel%2Fflux1-dev-bnb-nf4%5D(%2Flllyasviel%2Fflux1-dev-bnb-nf4)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, nyanko7/flux1-dev-nf4, silveroxides/FLUXllama, tahu321/saepul-ir, wambugu71/FLUXllama",13
34
+ aashush/quantized-local-llm-int8,"---
35
+ license: mit
36
+ datasets:
37
+ - NousResearch/hermes-function-calling-v1
38
+ language:
39
+ - en
40
+ metrics:
41
+ - charcut_mt
42
+ base_model: lllyasviel/flux1-dev-bnb-nf4
43
+ library_name: fasttext
44
+ ---","{""id"": ""aashush/quantized-local-llm-int8"", ""author"": ""aashush"", ""sha"": ""c6efb65ffd7881340a13c4c5c4638513423e7cc1"", ""last_modified"": ""2024-09-11 04:29:27+00:00"", ""created_at"": ""2024-09-10 10:37:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fasttext"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fasttext"", ""pytorch"", ""safetensors"", ""gemma"", ""en"", ""dataset:NousResearch/hermes-function-calling-v1"", ""base_model:lllyasviel/flux1-dev-bnb-nf4"", ""base_model:finetune:lllyasviel/flux1-dev-bnb-nf4"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: lllyasviel/flux1-dev-bnb-nf4\ndatasets:\n- NousResearch/hermes-function-calling-v1\nlanguage:\n- en\nlibrary_name: fasttext\nlicense: mit\nmetrics:\n- charcut_mt"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""GemmaForCausalLM""], ""model_type"": ""gemma"", ""tokenizer_config"": {""bos_token"": ""<bos>"", ""chat_template"": ""{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}"", ""eos_token"": ""<eos>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 2506172416}, ""total"": 2506172416}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-11 04:29:27+00:00"", ""cardData"": ""base_model: lllyasviel/flux1-dev-bnb-nf4\ndatasets:\n- NousResearch/hermes-function-calling-v1\nlanguage:\n- en\nlibrary_name: fasttext\nlicense: mit\nmetrics:\n- charcut_mt"", ""transformersInfo"": null, ""_id"": ""66e0215c0b02714871494a8e"", ""modelId"": ""aashush/quantized-local-llm-int8"", ""usedStorage"": 14645905703}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=aashush/quantized-local-llm-int8&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Baashush%2Fquantized-local-llm-int8%5D(%2Faashush%2Fquantized-local-llm-int8)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
gemma-2B-10M_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ mustafaaljadery/gemma-2B-10M,"---
3
+ license: mit
4
+ ---
5
+ # Gemma 2B - 10M Context
6
+
7
+ Gemma 2B with recurrent local attention with context length of up to 10M. Our implemenation uses **<32GB** of memory!
8
+
9
+ ![Graphic of our implementation context](./graphic.png)
10
+
11
+ **Features:**
12
+
13
+ - 10M sequence length on Gemma 2B.
14
+ - Runs on less than 32GB of memory.
15
+ - Native inference optimized for cuda.
16
+ - Recurrent local attention for O(N) memory.
17
+
18
+ ## Quick Start
19
+
20
+ > **Note:** This is a very early checkpoint of the model. Only 200 steps. We plan on training for a lot more tokens!
21
+
22
+ Install the model from huggingface - [Huggingface Model](https://huggingface.co/mustafaaljadery/gemma-10M-safetensor).
23
+
24
+ ```bash
25
+ python main.py
26
+ ```
27
+
28
+ Change the `main.py` inference code to the specific prompt you desire.
29
+
30
+ ```python
31
+ model_path = ""./models/gemma-2b-10m""
32
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
33
+ model = GemmaForCausalLM.from_pretrained(
34
+ model_path,
35
+ torch_dtype=torch.bfloat16
36
+ )
37
+
38
+ prompt_text = ""Summarize this harry potter book...""
39
+
40
+ with torch.no_grad():
41
+ generated_text = generate(
42
+ model, tokenizer, prompt_text, max_length=512, temperature=0.8
43
+ )
44
+
45
+ print(generated_text)
46
+ ```
47
+
48
+ ## How does this work?
49
+
50
+ The largest bottleneck (in terms of memory) for LLMs is the KV cache. It grows quadratically in vanilla multi-head attention, thus limiting the size of your sequence length.
51
+
52
+ Our approach splits the attention in local attention blocks as outlined by [InfiniAttention](https://arxiv.org/abs/2404.07143). We take those local attention blocks and apply recurrance to the local attention blocks for the final result of 10M context global atention.
53
+
54
+ A lot of the inspiration for our ideas comes from the [Transformer-XL](https://arxiv.org/abs/1901.02860) paper.
55
+
56
+ ## Credits
57
+
58
+ This was built by:
59
+
60
+ - [Mustafa Aljadery](https://www.maxaljadery.com/)
61
+ - [Siddharth Sharma](https://stanford.edu/~sidshr/)
62
+ - [Aksh Garg](https://www.linkedin.com/in/aksh-garg/)
63
+ ","{""id"": ""mustafaaljadery/gemma-2B-10M"", ""author"": ""mustafaaljadery"", ""sha"": ""3861cc6888575257e7065345b74805744faa7f97"", ""last_modified"": ""2024-05-09 17:24:35+00:00"", ""created_at"": ""2024-05-07 04:54:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 69, ""downloads_all_time"": null, ""likes"": 227, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""arxiv:2404.07143"", ""arxiv:1901.02860"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: mit"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='graphic.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""archit11/gemma-10m""], ""safetensors"": {""parameters"": {""F32"": 2506172560}, ""total"": 2506172560}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-09 17:24:35+00:00"", ""cardData"": ""license: mit"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6639b4068eb8c9c62ac5f2f5"", ""modelId"": ""mustafaaljadery/gemma-2B-10M"", ""usedStorage"": 10043577587}",0,"https://huggingface.co/chihhh/Attack-techniques-full-gemma-2b, https://huggingface.co/chihhh/attack-gemma-2b-1, https://huggingface.co/chihhh/attack-gemma-2, https://huggingface.co/hasnatz/gemma2b10M120, https://huggingface.co/not-lain/Quotes_Generator",5,https://huggingface.co/chihhh/Attack-techniques-Lora-gemma,1,,0,,0,"archit11/gemma-10m, huggingface/InferenceSupport/discussions/new?title=mustafaaljadery/gemma-2B-10M&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmustafaaljadery%2Fgemma-2B-10M%5D(%2Fmustafaaljadery%2Fgemma-2B-10M)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
64
+ chihhh/Attack-techniques-full-gemma-2b,"---
65
+ license: mit
66
+ base_model: mustafaaljadery/gemma-2B-10M
67
+ tags:
68
+ - trl
69
+ - sft
70
+ - generated_from_trainer
71
+ model-index:
72
+ - name: Attack-techniques
73
+ results: []
74
+ ---
75
+
76
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
77
+ should probably proofread and complete it, then remove this comment. -->
78
+
79
+ # Attack-techniques
80
+
81
+ This model is a fine-tuned version of [mustafaaljadery/gemma-2B-10M](https://huggingface.co/mustafaaljadery/gemma-2B-10M) on an unknown dataset.
82
+
83
+ ## Model description
84
+
85
+ More information needed
86
+
87
+ ## Intended uses & limitations
88
+
89
+ More information needed
90
+
91
+ ## Training and evaluation data
92
+
93
+ More information needed
94
+
95
+ ## Training procedure
96
+
97
+ ### Training hyperparameters
98
+
99
+ The following hyperparameters were used during training:
100
+ - learning_rate: 0.0002
101
+ - train_batch_size: 1
102
+ - eval_batch_size: 8
103
+ - seed: 42
104
+ - gradient_accumulation_steps: 4
105
+ - total_train_batch_size: 4
106
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
107
+ - lr_scheduler_type: constant
108
+ - lr_scheduler_warmup_ratio: 0.03
109
+ - num_epochs: 10
110
+
111
+ ### Training results
112
+
113
+
114
+
115
+ ### Framework versions
116
+
117
+ - Transformers 4.40.2
118
+ - Pytorch 2.1.1+cu121
119
+ - Datasets 2.19.1
120
+ - Tokenizers 0.19.1
121
+ ","{""id"": ""chihhh/Attack-techniques-full-gemma-2b"", ""author"": ""chihhh"", ""sha"": ""5075d6198494e694fcba52ff4569cc29a691989f"", ""last_modified"": ""2024-05-17 06:21:12+00:00"", ""created_at"": ""2024-05-17 06:04:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""gemma"", ""text-generation"", ""trl"", ""sft"", ""generated_from_trainer"", ""base_model:mustafaaljadery/gemma-2B-10M"", ""base_model:finetune:mustafaaljadery/gemma-2B-10M"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: mustafaaljadery/gemma-2B-10M\nlicense: mit\ntags:\n- trl\n- sft\n- generated_from_trainer\nmodel-index:\n- name: Attack-techniques\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""Attack-techniques"", ""results"": []}], ""config"": {""architectures"": [""GemmaForCausalLM""], ""model_type"": ""gemma"", ""tokenizer_config"": {""bos_token"": ""<bos>"", ""eos_token"": ""<eos>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/May17_06-11-21_721a644765d9/events.out.tfevents.1715926315.721a644765d9', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 2506172416}, ""total"": 2506172416}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-17 06:21:12+00:00"", ""cardData"": ""base_model: mustafaaljadery/gemma-2B-10M\nlicense: mit\ntags:\n- trl\n- sft\n- generated_from_trainer\nmodel-index:\n- name: Attack-techniques\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6646f374288f15df0e6e5ab7"", ""modelId"": ""chihhh/Attack-techniques-full-gemma-2b"", ""usedStorage"": 10042238761}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=chihhh/Attack-techniques-full-gemma-2b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bchihhh%2FAttack-techniques-full-gemma-2b%5D(%2Fchihhh%2FAttack-techniques-full-gemma-2b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
122
+ chihhh/attack-gemma-2b-1,"---
123
+ license: mit
124
+ base_model: mustafaaljadery/gemma-2B-10M
125
+ tags:
126
+ - trl
127
+ - sft
128
+ - generated_from_trainer
129
+ model-index:
130
+ - name: attack-gemma
131
+ results: []
132
+ ---
133
+
134
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
135
+ should probably proofread and complete it, then remove this comment. -->
136
+
137
+ # attack-gemma
138
+
139
+ This model is a fine-tuned version of [mustafaaljadery/gemma-2B-10M](https://huggingface.co/mustafaaljadery/gemma-2B-10M) on an unknown dataset.
140
+
141
+ ## Model description
142
+
143
+ More information needed
144
+
145
+ ## Intended uses & limitations
146
+
147
+ More information needed
148
+
149
+ ## Training and evaluation data
150
+
151
+ More information needed
152
+
153
+ ## Training procedure
154
+
155
+ ### Training hyperparameters
156
+
157
+ The following hyperparameters were used during training:
158
+ - learning_rate: 0.0002
159
+ - train_batch_size: 1
160
+ - eval_batch_size: 8
161
+ - seed: 42
162
+ - gradient_accumulation_steps: 4
163
+ - total_train_batch_size: 4
164
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
165
+ - lr_scheduler_type: constant
166
+ - lr_scheduler_warmup_ratio: 0.03
167
+ - num_epochs: 10
168
+
169
+ ### Training results
170
+
171
+
172
+
173
+ ### Framework versions
174
+
175
+ - Transformers 4.41.1
176
+ - Pytorch 2.1.0+cu118
177
+ - Datasets 2.19.1
178
+ - Tokenizers 0.19.1
179
+ ","{""id"": ""chihhh/attack-gemma-2b-1"", ""author"": ""chihhh"", ""sha"": ""e42435ba25e75147f4db53344e5915a411557845"", ""last_modified"": ""2024-05-30 10:52:25+00:00"", ""created_at"": ""2024-05-30 10:48:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""gemma"", ""text-generation"", ""trl"", ""sft"", ""generated_from_trainer"", ""base_model:mustafaaljadery/gemma-2B-10M"", ""base_model:finetune:mustafaaljadery/gemma-2B-10M"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: mustafaaljadery/gemma-2B-10M\nlicense: mit\ntags:\n- trl\n- sft\n- generated_from_trainer\nmodel-index:\n- name: attack-gemma\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""attack-gemma"", ""results"": []}], ""config"": {""architectures"": [""GemmaForCausalLM""], ""model_type"": ""gemma"", ""tokenizer_config"": {""bos_token"": ""<bos>"", ""eos_token"": ""<eos>"", ""pad_token"": ""<eos>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/May30_10-45-56_a1bb51412741/events.out.tfevents.1717065966.a1bb51412741', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 2506172416}, ""total"": 2506172416}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-30 10:52:25+00:00"", ""cardData"": ""base_model: mustafaaljadery/gemma-2B-10M\nlicense: mit\ntags:\n- trl\n- sft\n- generated_from_trainer\nmodel-index:\n- name: attack-gemma\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6658598b0525ce56fab6364c"", ""modelId"": ""chihhh/attack-gemma-2b-1"", ""usedStorage"": 10042237881}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=chihhh/attack-gemma-2b-1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bchihhh%2Fattack-gemma-2b-1%5D(%2Fchihhh%2Fattack-gemma-2b-1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
180
+ chihhh/attack-gemma-2,"---
181
+ license: mit
182
+ base_model: mustafaaljadery/gemma-2B-10M
183
+ tags:
184
+ - trl
185
+ - sft
186
+ - generated_from_trainer
187
+ model-index:
188
+ - name: attack-gemma-7b
189
+ results: []
190
+ ---
191
+
192
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
193
+ should probably proofread and complete it, then remove this comment. -->
194
+
195
+ # attack-gemma-7b
196
+
197
+ This model is a fine-tuned version of [mustafaaljadery/gemma-2B-10M](https://huggingface.co/mustafaaljadery/gemma-2B-10M) on an unknown dataset.
198
+
199
+ ## Model description
200
+
201
+ More information needed
202
+
203
+ ## Intended uses & limitations
204
+
205
+ More information needed
206
+
207
+ ## Training and evaluation data
208
+
209
+ More information needed
210
+
211
+ ## Training procedure
212
+
213
+ ### Training hyperparameters
214
+
215
+ The following hyperparameters were used during training:
216
+ - learning_rate: 0.0002
217
+ - train_batch_size: 1
218
+ - eval_batch_size: 8
219
+ - seed: 42
220
+ - gradient_accumulation_steps: 4
221
+ - total_train_batch_size: 4
222
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
223
+ - lr_scheduler_type: constant
224
+ - lr_scheduler_warmup_ratio: 0.03
225
+ - num_epochs: 5
226
+
227
+ ### Training results
228
+
229
+
230
+
231
+ ### Framework versions
232
+
233
+ - Transformers 4.41.2
234
+ - Pytorch 2.1.0+cu118
235
+ - Datasets 2.19.1
236
+ - Tokenizers 0.19.1
237
+ ","{""id"": ""chihhh/attack-gemma-2"", ""author"": ""chihhh"", ""sha"": ""79328113dbb50bc7c0bef0a5b21b31ec40eca745"", ""last_modified"": ""2024-05-31 08:03:00+00:00"", ""created_at"": ""2024-05-31 07:51:10+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""gemma"", ""text-generation"", ""trl"", ""sft"", ""generated_from_trainer"", ""base_model:mustafaaljadery/gemma-2B-10M"", ""base_model:finetune:mustafaaljadery/gemma-2B-10M"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: mustafaaljadery/gemma-2B-10M\nlicense: mit\ntags:\n- trl\n- sft\n- generated_from_trainer\nmodel-index:\n- name: attack-gemma-7b\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""attack-gemma-7b"", ""results"": []}], ""config"": {""architectures"": [""GemmaForCausalLM""], ""model_type"": ""gemma"", ""tokenizer_config"": {""bos_token"": ""<bos>"", ""eos_token"": ""<eos>"", ""pad_token"": ""<eos>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/May31_07-41-14_5e1508d132c7/events.out.tfevents.1717141351.5e1508d132c7', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 2506172416}, ""total"": 2506172416}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-31 08:03:00+00:00"", ""cardData"": ""base_model: mustafaaljadery/gemma-2B-10M\nlicense: mit\ntags:\n- trl\n- sft\n- generated_from_trainer\nmodel-index:\n- name: attack-gemma-7b\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6659816e5e5012e5dbb9783b"", ""modelId"": ""chihhh/attack-gemma-2"", ""usedStorage"": 10042238510}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=chihhh/attack-gemma-2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bchihhh%2Fattack-gemma-2%5D(%2Fchihhh%2Fattack-gemma-2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
238
+ hasnatz/gemma2b10M120,"---
239
+ base_model: mustafaaljadery/gemma-2B-10M
240
+ language:
241
+ - en
242
+ license: apache-2.0
243
+ tags:
244
+ - text-generation-inference
245
+ - transformers
246
+ - unsloth
247
+ - gemma
248
+ - trl
249
+ ---
250
+
251
+ # Uploaded model
252
+
253
+ - **Developed by:** hasnatz
254
+ - **License:** apache-2.0
255
+ - **Finetuned from model :** mustafaaljadery/gemma-2B-10M
256
+
257
+ This gemma model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
258
+
259
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
260
+ ","{""id"": ""hasnatz/gemma2b10M120"", ""author"": ""hasnatz"", ""sha"": ""e4c72e5c22b99e928a6eea632f0b5e14b5450032"", ""last_modified"": ""2024-09-30 18:36:55+00:00"", ""created_at"": ""2024-09-30 18:36:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation-inference"", ""unsloth"", ""gemma"", ""trl"", ""en"", ""base_model:mustafaaljadery/gemma-2B-10M"", ""base_model:finetune:mustafaaljadery/gemma-2B-10M"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: mustafaaljadery/gemma-2B-10M\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- gemma\n- trl"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-30 18:36:55+00:00"", ""cardData"": ""base_model: mustafaaljadery/gemma-2B-10M\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- gemma\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66faefbfa895b39ccdb10eac"", ""modelId"": ""hasnatz/gemma2b10M120"", ""usedStorage"": 78480072}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=hasnatz/gemma2b10M120&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhasnatz%2Fgemma2b10M120%5D(%2Fhasnatz%2Fgemma2b10M120)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
261
+ not-lain/Quotes_Generator,"---
262
+ base_model: mustafaaljadery/gemma-2B-10M
263
+ library_name: transformers
264
+ model_name: Quotes_Generator
265
+ tags:
266
+ - generated_from_trainer
267
+ - trl
268
+ - sft
269
+ licence: license
270
+ ---
271
+
272
+ # Model Card for Quotes_Generator
273
+
274
+ This model is a fine-tuned version of [mustafaaljadery/gemma-2B-10M](https://huggingface.co/mustafaaljadery/gemma-2B-10M).
275
+ It has been trained using [TRL](https://github.com/huggingface/trl).
276
+
277
+ ## Quick start
278
+
279
+ ```python
280
+ from transformers import pipeline
281
+
282
+ question = ""If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?""
283
+ generator = pipeline(""text-generation"", model=""not-lain/Quotes_Generator"", device=""cuda"")
284
+ output = generator([{""role"": ""user"", ""content"": question}], max_new_tokens=128, return_full_text=False)[0]
285
+ print(output[""generated_text""])
286
+ ```
287
+
288
+ ## Training procedure
289
+
290
+ [<img src=""https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg"" alt=""Visualize in Weights & Biases"" width=""150"" height=""24""/>](https://wandb.ai/not-lain/huggingface/runs/gxswt71k)
291
+
292
+ This model was trained with SFT.
293
+
294
+ ### Framework versions
295
+
296
+ - TRL: 0.12.2
297
+ - Transformers: 4.46.3
298
+ - Pytorch: 2.5.1+cu121
299
+ - Datasets: 3.2.0
300
+ - Tokenizers: 0.20.3
301
+
302
+ ## Citations
303
+
304
+
305
+
306
+ Cite TRL as:
307
+
308
+ ```bibtex
309
+ @misc{vonwerra2022trl,
310
+ title = {{TRL: Transformer Reinforcement Learning}},
311
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
312
+ year = 2020,
313
+ journal = {GitHub repository},
314
+ publisher = {GitHub},
315
+ howpublished = {\url{https://github.com/huggingface/trl}}
316
+ }
317
+ ```","{""id"": ""not-lain/Quotes_Generator"", ""author"": ""not-lain"", ""sha"": ""509e6236f345803e1a827f2d385b1f66f130c49d"", ""last_modified"": ""2024-12-15 19:26:22+00:00"", ""created_at"": ""2024-12-15 19:17:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""generated_from_trainer"", ""trl"", ""sft"", ""base_model:mustafaaljadery/gemma-2B-10M"", ""base_model:finetune:mustafaaljadery/gemma-2B-10M"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: mustafaaljadery/gemma-2B-10M\nlibrary_name: transformers\nmodel_name: Quotes_Generator\ntags:\n- generated_from_trainer\n- trl\n- sft\nlicence: license"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<bos>"", ""eos_token"": ""<eos>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Dec15_19-16-37_91a78926977d/events.out.tfevents.1734290276.91a78926977d.173.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-15 19:26:22+00:00"", ""cardData"": ""base_model: mustafaaljadery/gemma-2B-10M\nlibrary_name: transformers\nmodel_name: Quotes_Generator\ntags:\n- generated_from_trainer\n- trl\n- sft\nlicence: license"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""675f2b6319296a9b56bb1356"", ""modelId"": ""not-lain/Quotes_Generator"", ""usedStorage"": 108001184}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=not-lain/Quotes_Generator&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnot-lain%2FQuotes_Generator%5D(%2Fnot-lain%2FQuotes_Generator)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
gorilla-openfunctions-v2_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ gorilla-llm/gorilla-openfunctions-v2,"---
3
+ license: apache-2.0
4
+ ---
5
+
6
+ # Gorilla OpenFunctions v2
7
+
8
+ 💡 SoTA for open-source models. On-par with GPT-4.
9
+
10
+ 🚀 Check out the [Berkeley Function Calling Leaderboard](https://gorilla.cs.berkeley.edu/leaderboard)
11
+ 📣 Read more in our [OpenFunctions v2 release blog](https://gorilla.cs.berkeley.edu/blogs/7_open_functions_v2.html) and [Berkeley Function Calling Leaderboard blog](https://gorilla.cs.berkeley.edu/blogs/8_berkeley_function_calling_leaderboard.html) \
12
+ 🟢 Check out Quantized GGUF models in [gorilla-llm/gorilla-openfunctions-v2-gguf](https://huggingface.co/gorilla-llm/gorilla-openfunctions-v2-gguf)
13
+
14
+ ## Introduction
15
+ Gorilla OpenFunctions extends Large Language Model(LLM) Chat Completion feature to formulate
16
+ executable APIs call given natural language instructions and API context. With OpenFunctions v2,
17
+ we now support:
18
+ 1. Multiple functions - choose betwen functions
19
+ 2. Parallel functions - call the same function `N` time with different parameter values
20
+ 3. Multiple & parallel - both of the above in a single chatcompletion call (one generation)
21
+ 4. Relevance detection - when chatting, chat. When asked for function, returns a function
22
+ 5. Python - supports `string, number, boolean, list, tuple, dict` parameter datatypes and `Any` for those not natively supported.
23
+ 6. JAVA - support for `byte, short, int, float, double, long, boolean, char, Array, ArrayList, Set, HashMap, Hashtable, Queue, Stack, and Any` datatypes.
24
+ 7. JavaScript - support for `String, Number, Bigint, Boolean, dict (object), Array, Date, and Any` datatypes.
25
+ 8. REST - native REST support
26
+
27
+
28
+ ## Performance
29
+
30
+ | Model | Overall Accuracy* |
31
+ |---|---|
32
+ |GPT-4-0125-Preview | 85.12% |
33
+ |Gorilla-openfunctions-v2 | 83.67% |
34
+ |GPT-3.5-turbo | 82.23% |
35
+ |Mistral-medium | 79.70% |
36
+ |Nexusflow Raven-v2 | 55.72% |
37
+ |GPT-4-0613 | 54.16% |
38
+ *: Overall Accuracy is defined in [Berkeley Function Calling Leaderboard blog](https://gorilla.cs.berkeley.edu/blogs/8_berkeley_function_calling_leaderboard.html), read more details if you are interested!
39
+
40
+ ## Models Available
41
+ |Model | Functionality|
42
+ |---|---|
43
+ |gorilla-openfunctions-v2 | Multiple, parallel, multiple & parallel, relevance detection, Python + JAVA + JS + REST|
44
+ |gorilla-openfunctions-v1 | Parallel functions, and can choose between functions|
45
+ |gorilla-openfunctions-v0 | Given a function, and user intent, returns properly formatted json with the right arguments|
46
+
47
+ All of our models are hosted on our Huggingface UC Berkeley gorilla-llm org: [gorilla-openfunctions-v2](https://huggingface.co/gorilla-llm/gorilla-openfunctions-v2), [gorilla-openfunctions-v1](https://huggingface.co/gorilla-llm/gorilla-openfunctions-v1), and [gorilla-openfunctions-v0](https://huggingface.co/gorilla-llm/gorilla-openfunctions-v0).
48
+
49
+ ## Training
50
+
51
+ Gorilla Openfunctions v2 is a 7B parameter model, and is built on top of the [deepseek coder](https://huggingface.co/deepseek-ai/deepseek-coder-7b-instruct-v1.5) LLM. Check out [openfunctions-v2 blog](https://gorilla.cs.berkeley.edu/blogs/7_open_functions_v2.html) to learn more about the data composition and some insights into the training process.
52
+
53
+
54
+
55
+ ## Example Usage (Hosted)
56
+
57
+ Please reference `README.md` in https://github.com/ShishirPatil/gorilla/tree/main/openfunctions for file dependencies and used utils.
58
+
59
+ 1. OpenFunctions is compatible with OpenAI Functions
60
+
61
+ ```bash
62
+ !pip install openai==0.28.1
63
+ ```
64
+
65
+ 2. Point to Gorilla hosted servers
66
+
67
+ ```python
68
+ import openai
69
+
70
+ def get_gorilla_response(prompt=""Call me an Uber ride type \""Plus\"" in Berkeley at zipcode 94704 in 10 minutes"", model=""gorilla-openfunctions-v0"", functions=[]):
71
+ openai.api_key = ""EMPTY""
72
+ openai.api_base = ""http://luigi.millennium.berkeley.edu:8000/v1""
73
+ try:
74
+ completion = openai.ChatCompletion.create(
75
+ model=""gorilla-openfunctions-v2"",
76
+ temperature=0.0,
77
+ messages=[{""role"": ""user"", ""content"": prompt}],
78
+ functions=functions,
79
+ )
80
+ return completion.choices[0]
81
+ except Exception as e:
82
+ print(e, model, prompt)
83
+ ```
84
+
85
+ 3. Pass the user argument and set of functions, Gorilla OpenFunctions returns a fully formatted json
86
+
87
+ ```python
88
+ query = ""What's the weather like in the two cities of Boston and San Francisco?""
89
+ functions = [
90
+ {
91
+ ""name"": ""get_current_weather"",
92
+ ""description"": ""Get the current weather in a given location"",
93
+ ""parameters"": {
94
+ ""type"": ""object"",
95
+ ""properties"": {
96
+ ""location"": {
97
+ ""type"": ""string"",
98
+ ""description"": ""The city and state, e.g. San Francisco, CA"",
99
+ },
100
+ ""unit"": {""type"": ""string"", ""enum"": [""celsius"", ""fahrenheit""]},
101
+ },
102
+ ""required"": [""location""],
103
+ },
104
+ }
105
+ ]
106
+ get_gorilla_response(query, functions=functions)
107
+ ```
108
+
109
+ 4. Expected output **NEW**
110
+
111
+ Gorilla returns a readily accessible string **AND** Open-AI compatible JSON.
112
+
113
+ ```python
114
+ {
115
+ ""index"": 0,
116
+ ""message"": {
117
+ ""role"": ""assistant"",
118
+ ""content"": ""get_current_weather(location='Boston, MA'), get_current_weather(location='San Francisco, CA')"",
119
+ ""function_call"": [
120
+ {
121
+ ""name"": ""get_current_weather"",
122
+ ""arguments"": {
123
+ ""location"": ""Boston, MA""
124
+ }
125
+ },
126
+ {
127
+ ""name"": ""get_current_weather"",
128
+ ""arguments"": {
129
+ ""location"": ""San Francisco, CA""
130
+ }
131
+ }
132
+ ]
133
+ },
134
+ ""finish_reason"": ""stop""
135
+ }
136
+
137
+ ```
138
+
139
+ We have retained the string functionality that our community loved from OpenFunctions v1 `get_current_weather(location='Boston, MA'), get_current_weather(location='San Francisco, CA')` above. And Notice the `function_call` key in the JSON to be OpenAI compatible.
140
+
141
+
142
+ This is possible in OpenFunctions v2, because we ensure that the output includes the name of the argument and not just the value. This enables us to parse the output into a JSON. In those scenarios where the output is not parsable into JSON, we will always return the function call string.
143
+
144
+ ### End to End Example
145
+
146
+ Run the example code in `[inference_hosted.py](https://github.com/ShishirPatil/gorilla/tree/main/openfunctions)` to see how the model works.
147
+
148
+ ```bash
149
+ python inference_hosted.py
150
+ ```
151
+
152
+ Expected Output:
153
+
154
+ ```bash
155
+ (.py3) shishir@dhcp-132-64:~/Work/Gorilla/openfunctions/$ python inference_hosted.py
156
+ --------------------
157
+ Function call strings(s): get_current_weather(location='Boston, MA'), get_current_weather(location='San Francisco, CA')
158
+ --------------------
159
+ OpenAI compatible `function_call`: [<OpenAIObject at 0x1139ba890> JSON:
160
+ {
161
+ ""name"": ""get_current_weather"",
162
+ ""arguments"":
163
+ {
164
+ ""location"": ""Boston, MA""
165
+ }
166
+ }, <OpenAIObject at 0x1139ba930> JSON: {
167
+ ""name"": ""get_current_weather"",
168
+ ""arguments"":
169
+ {
170
+ ""location"": ""San Francisco, CA""
171
+ }
172
+ }]
173
+ ```
174
+
175
+
176
+ ## Running OpenFunctions Locally
177
+
178
+ If you want to Run OpenFunctions locally, here is the prompt format that we used:
179
+
180
+ ```python
181
+ def get_prompt(user_query: str, functions: list = []) -> str:
182
+ """"""
183
+ Generates a conversation prompt based on the user's query and a list of functions.
184
+
185
+ Parameters:
186
+ - user_query (str): The user's query.
187
+ - functions (list): A list of functions to include in the prompt.
188
+
189
+ Returns:
190
+ - str: The formatted conversation prompt.
191
+ """"""
192
+ system = ""You are an AI programming assistant, utilizing the Gorilla LLM model, developed by Gorilla LLM, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.""
193
+ if len(functions) == 0:
194
+ return f""{system}\n### Instruction: <<question>> {user_query}\n### Response: ""
195
+ functions_string = json.dumps(functions)
196
+ return f""{system}\n### Instruction: <<function>>{functions_string}\n<<question>>{user_query}\n### Response: ""
197
+ ```
198
+
199
+ Further, here is how we format the response:
200
+
201
+ Install the dependencies with:
202
+
203
+ ```bash
204
+ pip3 install tree_sitter
205
+ git clone https://github.com/tree-sitter/tree-sitter-java.git
206
+ git clone https://github.com/tree-sitter/tree-sitter-javascript.git
207
+ ```
208
+
209
+ And you can use the following code to format the response:
210
+
211
+ ```python
212
+
213
+ from openfunctions_utils import strip_function_calls, parse_function_call
214
+
215
+ def format_response(response: str):
216
+ """"""
217
+ Formats the response from the OpenFunctions model.
218
+
219
+ Parameters:
220
+ - response (str): The response generated by the LLM.
221
+
222
+ Returns:
223
+ - str: The formatted response.
224
+ - dict: The function call(s) extracted from the response.
225
+
226
+ """"""
227
+ function_call_dicts = None
228
+ try:
229
+ response = strip_function_calls(response)
230
+ # Parallel function calls returned as a str, list[dict]
231
+ if len(response) > 1:
232
+ function_call_dicts = []
233
+ for function_call in response:
234
+ function_call_dicts.append(parse_function_call(function_call))
235
+ response = "", "".join(response)
236
+ # Single function call returned as a str, dict
237
+ else:
238
+ function_call_dicts = parse_function_call(response[0])
239
+ response = response[0]
240
+ except Exception as e:
241
+ # Just faithfully return the generated response str to the user
242
+ pass
243
+ return response, function_call_dicts
244
+
245
+ ```
246
+
247
+ In the current directory, run the example code in `inference_local.py` to see how the model works.
248
+
249
+ ```bash
250
+ python inference_local.py
251
+ ```
252
+
253
+ **Note:** Use the `get_prompt` and `format_response` only if you are hosting it Locally. If you are using the Berkeley hosted models through the Chat-completion API, we do this in the backend, so you don't have to do this. The model is supported in Hugging Face 🤗 Transformers and can be run up locally:
254
+
255
+
256
+ ## License
257
+
258
+ Gorilla OpenFunctions v2 is distributed under the Apache 2.0 license. This software incorporates elements from the Deepseek model. Consequently, the licensing of Gorilla OpenFunctions v2 adheres to the Apache 2.0 license, with additional terms as outlined in [Appendix A](https://github.com/deepseek-ai/DeepSeek-LLM/blob/6712a86bfb7dd25c73383c5ad2eb7a8db540258b/LICENSE-MODEL) of the Deepseek license.
259
+
260
+
261
+ ## Contributing
262
+
263
+ Gorilla is an open source effort from UC Berkeley and we welcome contributors.
264
+ Please email us your comments, criticism, and questions. More information about the project can be found at [https://gorilla.cs.berkeley.edu/](https://gorilla.cs.berkeley.edu/)
265
+
266
+ ","{""id"": ""gorilla-llm/gorilla-openfunctions-v2"", ""author"": ""gorilla-llm"", ""sha"": ""1f6ac3b8bb09e347a875f5ff08cd66798706341e"", ""last_modified"": ""2024-04-18 02:39:42+00:00"", ""created_at"": ""2024-02-26 11:51:34+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3079, ""downloads_all_time"": null, ""likes"": 228, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""conversational"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Gorilla LLM model, developed by Gorilla LLM, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"", ""eos_token"": ""<|EOT|>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='latest', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='zero_to_fp32.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""sanjay920/rubra-v0.1-function-calling""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-18 02:39:42+00:00"", ""cardData"": ""license: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65dc7b466bd4865bb694eab6"", ""modelId"": ""gorilla-llm/gorilla-openfunctions-v2"", ""usedStorage"": 27641655714}",0,"https://huggingface.co/TuringsSolutions/mergekit-slerp-wjpnsbu, https://huggingface.co/aliencaocao/gorilla-openfunctions-v2-TIL24-r16-a16-ctx768-v2, https://huggingface.co/Praveen0209/results, https://huggingface.co/PrunaAI/gorilla-llm-gorilla-openfunctions-v2-QUANTO-int8bit-smashed",4,"https://huggingface.co/RuoxiL/style-treasury-from-gorilla, https://huggingface.co/RuoxiL/style-irs-gorrila, https://huggingface.co/RuoxiL/style-mixed-gorrila, https://huggingface.co/KevinLiuR/style-mixed-gorrila-complete, https://huggingface.co/Purusharth/gorilla-espro, https://huggingface.co/KevinLiuR/style-irs-gorrila-fewshot, https://huggingface.co/KevinLiuR/style-mixed-gorrila-fewshot",7,"https://huggingface.co/mlc-ai/gorilla-openfunctions-v2-q4f16_1-MLC, https://huggingface.co/CISCai/gorilla-openfunctions-v2-SOTA-GGUF, https://huggingface.co/mlc-ai/gorilla-openfunctions-v2-q4f32_1-MLC, https://huggingface.co/mradermacher/gorilla-openfunctions-v2-GGUF, https://huggingface.co/PrunaAI/gorilla-llm-gorilla-openfunctions-v2-bnb-4bit-smashed, https://huggingface.co/kobu2/JARVIS-8b-FUNCTIONCALLING-GGUF, https://huggingface.co/PrunaAI/gorilla-llm-gorilla-openfunctions-v2-AWQ-4bit-smashed, https://huggingface.co/tensorblock/gorilla-openfunctions-v2-GGUF, https://huggingface.co/smalinin/Llama-3.1-Storm-8B-q4f32_1-MLC, https://huggingface.co/smalinin/gorilla-openfunctions-v2_q4f32_1-MLC",10,,0,"huggingface/InferenceSupport/discussions/new?title=gorilla-llm/gorilla-openfunctions-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgorilla-llm%2Fgorilla-openfunctions-v2%5D(%2Fgorilla-llm%2Fgorilla-openfunctions-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, sanjay920/rubra-v0.1-function-calling",2
267
+ TuringsSolutions/mergekit-slerp-wjpnsbu,"---
268
+ base_model:
269
+ - gorilla-llm/gorilla-openfunctions-v2
270
+ library_name: transformers
271
+ tags:
272
+ - mergekit
273
+ - merge
274
+
275
+ ---
276
+ # merge
277
+
278
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
279
+
280
+ ## Merge Details
281
+ ### Merge Method
282
+
283
+ This model was merged using the SLERP merge method.
284
+
285
+ ### Models Merged
286
+
287
+ The following models were included in the merge:
288
+ * [gorilla-llm/gorilla-openfunctions-v2](https://huggingface.co/gorilla-llm/gorilla-openfunctions-v2)
289
+
290
+ ### Configuration
291
+
292
+ The following YAML configuration was used to produce this model:
293
+
294
+ ```yaml
295
+ models:
296
+ - model: gorilla-llm/gorilla-openfunctions-v2
297
+ - model: gorilla-llm/gorilla-openfunctions-v2
298
+ merge_method: slerp
299
+ base_model: gorilla-llm/gorilla-openfunctions-v2
300
+ dtype: bfloat16
301
+ parameters:
302
+ t: [0, 0.5, 1, 0.5, 0] # V shaped curve: Hermes for input & output, WizardMath in the middle layers
303
+
304
+ ```
305
+ ","{""id"": ""TuringsSolutions/mergekit-slerp-wjpnsbu"", ""author"": ""TuringsSolutions"", ""sha"": ""1654fcba6b47dd0f395d1dc37d6fb10b58941cc6"", ""last_modified"": ""2024-04-14 05:50:20+00:00"", ""created_at"": ""2024-04-14 05:48:35+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""conversational"", ""base_model:gorilla-llm/gorilla-openfunctions-v2"", ""base_model:finetune:gorilla-llm/gorilla-openfunctions-v2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- gorilla-llm/gorilla-openfunctions-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"", ""eos_token"": ""<|EOT|>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 6910365696}, ""total"": 6910365696}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-14 05:50:20+00:00"", ""cardData"": ""base_model:\n- gorilla-llm/gorilla-openfunctions-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""661b6e33b8a37b469c94bab8"", ""modelId"": ""TuringsSolutions/mergekit-slerp-wjpnsbu"", ""usedStorage"": 13820763176}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=TuringsSolutions/mergekit-slerp-wjpnsbu&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTuringsSolutions%2Fmergekit-slerp-wjpnsbu%5D(%2FTuringsSolutions%2Fmergekit-slerp-wjpnsbu)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
306
+ aliencaocao/gorilla-openfunctions-v2-TIL24-r16-a16-ctx768-v2,"---
307
+ language:
308
+ - en
309
+ license: apache-2.0
310
+ tags:
311
+ - text-generation-inference
312
+ - transformers
313
+ - unsloth
314
+ - llama
315
+ - trl
316
+ - sft
317
+ base_model: gorilla-llm/gorilla-openfunctions-v2
318
+ ---
319
+
320
+ # Uploaded model
321
+
322
+ - **Developed by:** aliencaocao
323
+ - **License:** apache-2.0
324
+ - **Finetuned from model :** gorilla-llm/gorilla-openfunctions-v2
325
+
326
+ This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
327
+
328
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
329
+ ","{""id"": ""aliencaocao/gorilla-openfunctions-v2-TIL24-r16-a16-ctx768-v2"", ""author"": ""aliencaocao"", ""sha"": ""2593b7eefa53da6e694d9e89db04ccec3640bfeb"", ""last_modified"": ""2024-05-16 16:34:08+00:00"", ""created_at"": ""2024-05-16 16:24:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""text-generation-inference"", ""unsloth"", ""trl"", ""sft"", ""conversational"", ""en"", ""base_model:gorilla-llm/gorilla-openfunctions-v2"", ""base_model:finetune:gorilla-llm/gorilla-openfunctions-v2"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: gorilla-llm/gorilla-openfunctions-v2\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl\n- sft"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Gorilla LLM model, developed by Gorilla LLM, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"", ""eos_token"": ""<|EOT|>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 6910365696}, ""total"": 6910365696}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-16 16:34:08+00:00"", ""cardData"": ""base_model: gorilla-llm/gorilla-openfunctions-v2\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl\n- sft"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""664633567578c988e6f9aa42"", ""modelId"": ""aliencaocao/gorilla-openfunctions-v2-TIL24-r16-a16-ctx768-v2"", ""usedStorage"": 27641588718}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=aliencaocao/gorilla-openfunctions-v2-TIL24-r16-a16-ctx768-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Baliencaocao%2Fgorilla-openfunctions-v2-TIL24-r16-a16-ctx768-v2%5D(%2Faliencaocao%2Fgorilla-openfunctions-v2-TIL24-r16-a16-ctx768-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
330
+ Praveen0209/results,"---
331
+ license: apache-2.0
332
+ base_model: gorilla-llm/gorilla-openfunctions-v2
333
+ tags:
334
+ - generated_from_trainer
335
+ model-index:
336
+ - name: results
337
+ results: []
338
+ ---
339
+
340
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
341
+ should probably proofread and complete it, then remove this comment. -->
342
+
343
+ # results
344
+
345
+ This model is a fine-tuned version of [gorilla-llm/gorilla-openfunctions-v2](https://huggingface.co/gorilla-llm/gorilla-openfunctions-v2) on the None dataset.
346
+
347
+ ## Model description
348
+
349
+ More information needed
350
+
351
+ ## Intended uses & limitations
352
+
353
+ More information needed
354
+
355
+ ## Training and evaluation data
356
+
357
+ More information needed
358
+
359
+ ## Training procedure
360
+
361
+ ### Training hyperparameters
362
+
363
+ The following hyperparameters were used during training:
364
+ - learning_rate: 2e-05
365
+ - train_batch_size: 4
366
+ - eval_batch_size: 8
367
+ - seed: 42
368
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
369
+ - lr_scheduler_type: constant
370
+ - lr_scheduler_warmup_ratio: 0.03
371
+ - num_epochs: 4
372
+
373
+ ### Training results
374
+
375
+
376
+
377
+ ### Framework versions
378
+
379
+ - Transformers 4.33.1
380
+ - Pytorch 2.1.2
381
+ - Datasets 2.19.2
382
+ - Tokenizers 0.13.3
383
+ ","{""id"": ""Praveen0209/results"", ""author"": ""Praveen0209"", ""sha"": ""821d4b6ae9fd92a1c2a2a4b802d694e20341d8b7"", ""last_modified"": ""2024-06-21 07:35:54+00:00"", ""created_at"": ""2024-06-18 05:26:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:gorilla-llm/gorilla-openfunctions-v2"", ""base_model:finetune:gorilla-llm/gorilla-openfunctions-v2"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: gorilla-llm/gorilla-openfunctions-v2\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: results\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""results"", ""results"": []}], ""config"": {""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Gorilla LLM model, developed by Gorilla LLM, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"", ""eos_token"": ""<|EOT|>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": true}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-21 07:35:54+00:00"", ""cardData"": ""base_model: gorilla-llm/gorilla-openfunctions-v2\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: results\n results: []"", ""transformersInfo"": null, ""_id"": ""66711a962338201ae31dc983"", ""modelId"": ""Praveen0209/results"", ""usedStorage"": 377631462}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Praveen0209/results&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPraveen0209%2Fresults%5D(%2FPraveen0209%2Fresults)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
384
+ PrunaAI/gorilla-llm-gorilla-openfunctions-v2-QUANTO-int8bit-smashed,"---
385
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
386
+ base_model: gorilla-llm/gorilla-openfunctions-v2
387
+ metrics:
388
+ - memory_disk
389
+ - memory_inference
390
+ - inference_latency
391
+ - inference_throughput
392
+ - inference_CO2_emissions
393
+ - inference_energy_consumption
394
+ tags:
395
+ - pruna-ai
396
+ ---
397
+ <!-- header start -->
398
+ <!-- 200823 -->
399
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
400
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
401
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
402
+ </a>
403
+ </div>
404
+ <!-- header end -->
405
+
406
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
407
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
408
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
409
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
410
+
411
+ # Simply make AI models cheaper, smaller, faster, and greener!
412
+
413
+ - Give a thumbs up if you like this model!
414
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
415
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
416
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
417
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
418
+
419
+ ## Results
420
+
421
+ ![image info](./plots.png)
422
+
423
+ **Frequently Asked Questions**
424
+ - ***How does the compression work?*** The model is compressed with quanto.
425
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
426
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
427
+ - ***What is the model format?*** We use safetensors.
428
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
429
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
430
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
431
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
432
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
433
+
434
+ ## Setup
435
+
436
+ You can run the smashed model with these steps:
437
+
438
+ 0. Check requirements from the original repo gorilla-llm/gorilla-openfunctions-v2 installed. In particular, check python, cuda, and transformers versions.
439
+ 1. Make sure that you have installed quantization related packages.
440
+ ```bash
441
+ pip install quanto
442
+ ```
443
+ 2. Load & run the model.
444
+ ```python
445
+ from transformers import AutoModelForCausalLM, AutoTokenizer
446
+ IMPORTS
447
+
448
+ model = AutoModelForCausalLM.from_pretrained(""PrunaAI/gorilla-llm-gorilla-openfunctions-v2-QUANTO-int8bit-smashed"", trust_remote_code=True, device_map='auto')
449
+ tokenizer = AutoTokenizer.from_pretrained(""gorilla-llm/gorilla-openfunctions-v2"")
450
+
451
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
452
+
453
+ outputs = model.generate(input_ids, max_new_tokens=216)
454
+ tokenizer.decode(outputs[0])
455
+ ```
456
+
457
+ ## Configurations
458
+
459
+ The configuration info are in `smash_config.json`.
460
+
461
+ ## Credits & License
462
+
463
+ The license of the smashed model follows the license of the original model. Please check the license of the original model gorilla-llm/gorilla-openfunctions-v2 before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
464
+
465
+ ## Want to compress other models?
466
+
467
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
468
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/gorilla-llm-gorilla-openfunctions-v2-QUANTO-int8bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""1ec4c9d2fcc13530188c168c72ac9f0c3fc781c6"", ""last_modified"": ""2024-07-19 09:23:23+00:00"", ""created_at"": ""2024-06-25 00:40:01+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:gorilla-llm/gorilla-openfunctions-v2"", ""base_model:finetune:gorilla-llm/gorilla-openfunctions-v2"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: gorilla-llm/gorilla-openfunctions-v2\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Gorilla LLM model, developed by Gorilla LLM, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"", ""eos_token"": ""<|EOT|>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:23:23+00:00"", ""cardData"": ""base_model: gorilla-llm/gorilla-openfunctions-v2\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""667a11e1b3c1b9aac8c7fe95"", ""modelId"": ""PrunaAI/gorilla-llm-gorilla-openfunctions-v2-QUANTO-int8bit-smashed"", ""usedStorage"": 13883990022}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/gorilla-llm-gorilla-openfunctions-v2-QUANTO-int8bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fgorilla-llm-gorilla-openfunctions-v2-QUANTO-int8bit-smashed%5D(%2FPrunaAI%2Fgorilla-llm-gorilla-openfunctions-v2-QUANTO-int8bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
gte-small_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
The diff for this file is too large to render. See raw diff
 
instructor-large_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
The diff for this file is too large to render. See raw diff
 
internlm-xcomposer2d5-7b_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ internlm/internlm-xcomposer2d5-7b,"---
3
+ license: other
4
+ pipeline_tag: visual-question-answering
5
+ ---
6
+
7
+
8
+ <p align=""center"">
9
+ <img src=""logo_en.png"" width=""600""/>
10
+ <p>
11
+
12
+ <p align=""center"">
13
+ <b><font size=""6"">InternLM-XComposer-2.5</font></b>
14
+ <p>
15
+
16
+ <div align=""center"">
17
+
18
+ [💻Github Repo](https://github.com/InternLM/InternLM-XComposer)
19
+
20
+ [Online Demo](https://huggingface.co/spaces/Willow123/InternLM-XComposer)
21
+
22
+ [Paper](https://huggingface.co/papers/2407.03320)
23
+
24
+ </div>
25
+
26
+ **InternLM-XComposer2.5** excels in various text-image comprehension and composition applications, achieving GPT-4V level capabilities with merely 7B LLM backend. IXC2.5 is trained with 24K interleaved image-text contexts, it can seamlessly extend to 96K long contexts via RoPE extrapolation. This long-context capability allows IXC-2.5 to excel in tasks requiring extensive input and output contexts.
27
+
28
+
29
+ ### Import from Transformers
30
+ To load the InternLM-XComposer2-4KHD model using Transformers, use the following code:
31
+ ```python
32
+ import torch
33
+ from transformers import AutoTokenizer, AutoModelForCausalLM
34
+ ckpt_path = ""internlm/internlm-xcomposer2d5-7b""
35
+ tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True).cuda()
36
+ # Set `torch_dtype=torch.floatb16` to load model in bfloat16, otherwise it will be loaded as float32 and might cause OOM Error.
37
+ model = AutoModelForCausalLM.from_pretrained(ckpt_path, torch_dtype=torch.bfloat16, trust_remote_code=True).cuda()
38
+ model = model.eval()
39
+ ```
40
+
41
+ ## Quickstart
42
+
43
+ We provide a simple example to show how to use InternLM-XComposer2.5 with 🤗 Transformers.
44
+
45
+ <details>
46
+ <summary>
47
+ <b>Video Understanding</b>
48
+ </summary>
49
+
50
+ ```python
51
+ import torch
52
+ from transformers import AutoModel, AutoTokenizer
53
+
54
+ torch.set_grad_enabled(False)
55
+
56
+ # init model and tokenizer
57
+ model = AutoModel.from_pretrained('internlm/internlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
58
+ tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b', trust_remote_code=True)
59
+ model.tokenizer = tokenizer
60
+
61
+ query = 'Here are some frames of a video. Describe this video in detail'
62
+ image = ['./examples/liuxiang.mp4',]
63
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
64
+ response, his = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)
65
+ print(response)
66
+ #The video opens with a shot of an athlete, dressed in a red and yellow uniform with the word ""CHINA"" emblazoned across the front, preparing for a race.
67
+ #The athlete, Liu Xiang, is seen in a crouched position, focused and ready, with the Olympic rings visible in the background, indicating the prestigious setting of the Olympic Games. As the race commences, the athletes are seen sprinting towards the hurdles, their determination evident in their powerful strides.
68
+ #The camera captures the intensity of the competition, with the athletes' numbers and times displayed on the screen, providing a real-time update on their performance. The race reaches a climax as Liu Xiang, still in his red and yellow uniform, triumphantly crosses the finish line, his arms raised in victory.
69
+ #The crowd in the stands erupts into cheers, their excitement palpable as they witness the athlete's success. The video concludes with a close-up shot of Liu Xiang, still basking in the glory of his victory, as the Olympic rings continue to symbolize the significance of the event.
70
+
71
+ query = 'tell me the athlete code of Liu Xiang'
72
+ image = ['./examples/liuxiang.mp4',]
73
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
74
+ response, _ = model.chat(tokenizer, query, image, history=his, do_sample=False, num_beams=3, use_meta=True)
75
+ print(response)
76
+ #The athlete code of Liu Xiang, as displayed on his uniform in the video, is ""1363"".
77
+ ```
78
+
79
+ </details>
80
+
81
+ <details>
82
+ <summary>
83
+ <b>Multi-Image Mutli-Tune Dialog</b>
84
+ </summary>
85
+
86
+ ```python
87
+ import torch
88
+ from transformers import AutoModel, AutoTokenizer
89
+
90
+ torch.set_grad_enabled(False)
91
+
92
+ # init model and tokenizer
93
+ model = AutoModel.from_pretrained('internlm/internlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
94
+ tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b', trust_remote_code=True)
95
+ model.tokenizer = tokenizer
96
+
97
+ query = 'Image1 <ImageHere>; Image2 <ImageHere>; Image3 <ImageHere>; I want to buy a car from the three given cars, analyze their advantages and weaknesses one by one'
98
+ image = ['./examples/cars1.jpg',
99
+ './examples/cars2.jpg',
100
+ './examples/cars3.jpg',]
101
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
102
+ response, his = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)
103
+ print(response)
104
+ #To analyze the advantages and disadvantages of each car, we need to consider factors such as brand reputation, performance, design, cost, and maintenance.
105
+ #1. Mercedes-Benz: - Advantages: Known for its luxury and reliability, the Mercedes-Benz brand offers a high level of comfort, advanced technology, and superior craftsmanship. The vehicle in the image appears to be an SUV, which is versatile for both city driving and off-road conditions. - Disadvantages: Typically, Mercedes-Benz vehicles are more expensive compared to other brands, and they may require more frequent maintenance due to their luxury status.
106
+ #2. Bugatti: - Advantages: Bugatti is renowned for producing some of the fastest and most powerful cars in the world. The vehicle in the image is a sports car, likely offering an exhilarating driving experience with its high-performance engine and advanced aerodynamics. - Disadvantages: Bugatti cars are extremely expensive, making them less accessible to the average consumer. They also require specialized knowledge for maintenance and may not be suitable for everyday driving due to their high-performance nature.
107
+ #3. BMW: - Advantages: BMW is a well-respected manufacturer known for its innovative engineering and design. The car in the image appears to be an electric vehicle (EV), which offers advantages such as lower emissions, reduced noise pollution, and potentially lower operating costs. - Disadvantages: EVs like the one in the image may have a shorter range compared to traditional gasoline-powered vehicles, limiting their suitability for long-distance travel.
108
+ #Additionally, the initial cost of purchasing an EV can be higher than that of a conventional car. In conclusion, each car has its own set of advantages and disadvantages. The Mercedes-Benz offers luxury and versatility, the Bugatti provides high performance and exclusivity, and the BMW represents innovation in electric vehicle technology. The choice ultimately depends on the buyer's priorities, such as comfort, performance, environmental impact, and budget.
109
+
110
+ query = 'Image4 <ImageHere>; How about the car in Image4'
111
+ image.append('./examples/cars4.jpg')
112
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
113
+ response, _ = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, history= his, use_meta=True)
114
+ print(response)
115
+ #The car in Image4 is a red sports car, which appears to be a Ferrari. Ferrari is a renowned Italian manufacturer known for producing some of the most iconic and high-performance sports cars in the world. - Advantages: Ferrari vehicles are synonymous with speed, luxury, and engineering excellence.
116
+ #The car in the image likely offers an exhilarating driving experience with its powerful engine, advanced aerodynamics, and high-quality craftsmanship. The red color adds to the car's aesthetic appeal, making it stand out on the road. - Disadvantages: Ferrari cars are extremely expensive, making them less accessible to the average consumer.
117
+ #They also require specialized knowledge for maintenance and may not be suitable for everyday driving due to their high-performance nature. In conclusion, the Ferrari in Image4 represents a pinnacle of automotive engineering and design, offering unmatched performance and luxury.
118
+ #However, its high cost and specialized maintenance requirements make it less practical for everyday use compared to the other vehicles in the images.
119
+ ```
120
+
121
+
122
+ </details>
123
+
124
+ <details>
125
+ <summary>
126
+ <b>High Resolution Image Understanding</b>
127
+ </summary>
128
+
129
+ ```python
130
+ import torch
131
+ from transformers import AutoModel, AutoTokenizer
132
+
133
+ torch.set_grad_enabled(False)
134
+
135
+ # init model and tokenizer
136
+ model = AutoModel.from_pretrained('internlm/internlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
137
+ tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b', trust_remote_code=True)
138
+ model.tokenizer = tokenizer
139
+
140
+ query = 'Analyze the given image in a detail manner'
141
+ image = ['./examples/dubai.png']
142
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
143
+ response, _ = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)
144
+ print(response)
145
+ #The infographic is a visual representation of various facts about Dubai. It begins with a statement about Palm Jumeirah, highlighting it as the largest artificial island visible from space. It then provides a historical context, noting that in 1968, there were only a few cars in Dubai, contrasting this with the current figure of more than 1.5 million vehicles.
146
+ #The infographic also points out that Dubai has the world's largest Gold Chain, with 7 of the top 10 tallest hotels located there. Additionally, it mentions that the crime rate is near 0%, and the income tax rate is also 0%, with 20% of the world's total cranes operating in Dubai. Furthermore, it states that 17% of the population is Emirati, and 83% are immigrants.
147
+ #The Dubai Mall is highlighted as the largest shopping mall in the world, with 1200 stores. The infographic also notes that Dubai has no standard address system, with no zip codes, area codes, or postal services. It mentions that the Burj Khalifa is so tall that its residents on top floors need to wait longer to break fast during Ramadan.
148
+ #The infographic also includes information about Dubai's climate-controlled City, with the Royal Suite at Burj Al Arab costing $24,000 per night. Lastly, it notes that the net worth of the four listed billionaires is roughly equal to the GDP of Honduras.
149
+
150
+ ```
151
+
152
+ </details>
153
+
154
+
155
+ <details>
156
+ <summary>
157
+ <b>Instruction to Webpage</b>
158
+ </summary>
159
+
160
+ ```python
161
+ import torch
162
+ from transformers import AutoModel, AutoTokenizer
163
+
164
+ torch.set_grad_enabled(False)
165
+
166
+ # init model and tokenizer
167
+ model = AutoModel.from_pretrained('internlm/internlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
168
+ tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b', trust_remote_code=True)
169
+ model.tokenizer = tokenizer
170
+
171
+ query = 'A website for Research institutions. The name is Shanghai AI lab. Top Navigation Bar is blue.Below left, an image shows the logo of the lab. In the right, there is a passage of text below that describes the mission of the laboratory.There are several images to show the research projects of Shanghai AI lab.'
172
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
173
+ response = model.write_webpage(query, seed=202, task='Instruction-aware Webpage Generation', repetition_penalty=3.0)
174
+ print(response)
175
+ # see the Instruction-aware Webpage Generation.html
176
+ ```
177
+
178
+ See the [Instruction to Webpage](https://github.com/InternLM/InternLM-XComposer/blob/main/examples/Instruction-aware_Webpage_Generation.html) results here.
179
+ </details>
180
+
181
+ <details>
182
+ <summary>
183
+ <b>Resume to Webpage</b>
184
+ </summary>
185
+
186
+ ```python
187
+ import torch
188
+ from transformers import AutoModel, AutoTokenizer
189
+
190
+ torch.set_grad_enabled(False)
191
+
192
+ # init model and tokenizer
193
+ model = AutoModel.from_pretrained('internlm/internlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
194
+ tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b', trust_remote_code=True)
195
+ model.tokenizer = tokenizer
196
+
197
+ ## the input should be a resume in markdown format
198
+ query = './examples/resume.md'
199
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
200
+ response = model.resume_2_webpage(query, seed=202, repetition_penalty=3.0)
201
+ print(response)
202
+ ```
203
+ See the [Resume to Webpage](https://github.com/InternLM/InternLM-XComposer/blob/main/examples/Resume-to-Personal_Page.html) results here.
204
+
205
+
206
+ </details>
207
+
208
+
209
+ <details>
210
+ <summary>
211
+ <b>Screenshot to Webpage</b>
212
+ </summary>
213
+
214
+ ```python
215
+ import torch
216
+ from transformers import AutoModel, AutoTokenizer
217
+
218
+ torch.set_grad_enabled(False)
219
+
220
+ # init model and tokenizer
221
+ model = AutoModel.from_pretrained('internlm/internlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
222
+ tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b', trust_remote_code=True)
223
+ model.tokenizer = tokenizer
224
+
225
+ query = 'Generate the HTML code of this web image with Tailwind CSS.'
226
+ image = ['./examples/screenshot.jpg']
227
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
228
+ response = model.screen_2_webpage(query, image, seed=202, repetition_penalty=3.0)
229
+ print(response)
230
+ ```
231
+ See the [Screenshot to Webpage](https://github.com/InternLM/InternLM-XComposer/blob/main/examples/Screenshot-to-Webpage.html) results here.
232
+
233
+ </details>
234
+
235
+
236
+
237
+ <details>
238
+ <summary>
239
+ <b>Write Article</b>
240
+ </summary>
241
+
242
+ ```python
243
+ import torch
244
+ from transformers import AutoModel, AutoTokenizer
245
+
246
+ torch.set_grad_enabled(False)
247
+
248
+ # init model and tokenizer
249
+ model = AutoModel.from_pretrained('internlm/internlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
250
+ tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b', trust_remote_code=True)
251
+ model.tokenizer = tokenizer
252
+
253
+ query = '阅读下面的材料,根据要求写作。 电影《长安三万里》的出现让人感慨,影片并未将重点全落在大唐风华上,也展现了恢弘气象的阴暗面,即旧门阀的资源垄断、朝政的日益衰败与青年才俊的壮志难酬。高适仕进无门,只能回乡>沉潜修行。李白虽得玉真公主举荐,擢入翰林,但他只是成为唐玄宗的御用文人,不能真正实现有益于朝政的志意。然而,片中高潮部分《将进酒》一节,人至中年、挂着肚腩的李白引众人乘仙鹤上天,一路从水面、瀑布飞升至银河进入仙>宫,李白狂奔着与仙人们碰杯,最后大家纵身飞向漩涡般的九重天。肉身的微贱、世路的“天生我材必有用,坎坷,拘不住精神的高蹈。“天生我材必有用,千金散尽还复来。” 古往今来,身处闲顿、遭受挫折、被病痛折磨,很多人都曾经历>了人生的“失意”,却反而成就了他们“诗意”的人生。对正在追求人生价值的当代青年来说,如何对待人生中的缺憾和困顿?诗意人生中又有怎样的自我坚守和自我认同?请结合“失意”与“诗意”这两个关键词写一篇文章。 要求:选准角度,确定>立意,明确文体,自拟标题;不要套作,不得抄袭;不得泄露个人信息;不少于 800 字。'
254
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
255
+ response = model.write_artical(query, seed=8192)
256
+ print(response)
257
+ #诗意人生,贵在坚守
258
+ #《菜根谭》有云:“闲时要有吃紧的心思,忙里要留吃闲工夫。”人生在世,总有失意之时,当面对缺憾和困顿,诗意地生活着才能为人生增添一抹亮色。何谓诗意地生活? 所谓诗意地生活,便是在于坚守本心、直面遗憾、超越自我,在失意中寻找人生价值。
259
+ #诗意地生活,需坚守本心,淡然处之。
260
+ #陶渊明曾执意辞去彭泽县令,归隐田园,“采菊东篱下,悠然见南山”,在山水间寄情自娱;王维面对仕途失意,终日沉醉于诗酒之中,“兴来每独往,胜事空自知”,在诗酒中闲逸自如;李白仕途不顺,被赐金放还,但他依旧豪气干云,“天生我才必有用,千金散尽还复来”,在失意中坦然豁达。坚守本心,便能在遭遇失意之时守住自己的精神家园,让生活充满诗意。反之,若不能坚守本心,而只是一味迎合世俗以求得升迁,那纵使身居高位,亦会丧失生活的乐趣。
261
+ #诗意地生活,需直面遗憾,超越自我。
262
+ #“西塞山前白鹭飞,桃花流水鳜鱼肥。青箬笠,绿柳枝,半斤酒,一纶丝。五湖四海皆如此,何妨到此处归。”白居易的《渔歌子》写出了多少人的愿望:没有权势纷扰,没有贫困凄凉,只有青山绿水、白鹭鸥鸟作伴,如此自由自在的生活令人神往。然而,白居易却并没有因此真的归隐山林,而是直面人生,超越自我,写下了一首首诗意而富有现实关怀的作品。如果白居易只顾逃避人生,那又怎会拥有“大弦嘈嘈如急雨,小弦切切如私语”的绝美比喻呢?如果白居易只顾归隐山林,那又怎会写出“此曲只应天上有,人间哪得配白居易”这样的诗句呢?
263
+ #诗意地生活,需直面遗憾,坚守本心。
264
+ #李文波患有渐冻症,医生说他活不过五年,但他没有因此放弃对音乐的热爱,而是与病魔作斗争,演奏出美妙的乐曲;孙家林自幼患有脑瘫,但他不甘于命运的捉弄,终成全国最美教师;史铁生饱受疾病折磨,但他仍能发出“我常常在我的心头清点,我有什么?”的叩问,并由此走上文学道路,为后世留下丰厚的文化遗产。这些人没有逃避,而是选择直面人生的缺憾,在坚守本心的同时超越自我,最终实现了自己的价值。
265
+ #诗意地生活,是于失意中坚守本心,于缺憾中超越自我。当面对人生的缺憾与挫折,坚守本心、超越自我的同时,也必将书写属于自己的辉煌篇章。
266
+ #愿你我都能诗意地生活着!
267
+
268
+ query = 'Please write a blog based on the title: French Pastries: A Sweet Indulgence'
269
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
270
+ response = model.write_artical(query, seed=8192)
271
+ print(response)
272
+ #French Pastries: A Sweet Indulgence
273
+ #The French are well known for their love of pastries, and it’s a love that is passed down through generations. When one visits France, they are treated to an assortment of baked goods that can range from the delicate macaron to the rich and decadent chocolate mousse. While there are many delicious types of pastries found in France, five stand out as being the most iconic. Each of these pastries has its own unique qualities that make it special.
274
+ #1. Croissant
275
+ #One of the most famous pastries from France is the croissant. It is a buttery, flaky pastry that is best enjoyed fresh from the bakery. The dough is laminated with butter, giving it its signature layers. Croissants are typically eaten for breakfast or brunch, often accompanied by coffee or hot chocolate.
276
+ #2. Macaron
277
+ #The macaron is a small, delicate French confection made from almond flour, powdered sugar, and egg whites. The macaron itself is sandwiched with a ganache or jam filling. They come in a variety of colors and flavors, making them a popular choice for both casual snacking and upscale desserts.
278
+ #3. Madeleine
279
+ #The madeleine is a small shell-shaped cake that is light and sponge-like. It is often flavored with lemon or orange zest and sometimes dipped in chocolate. Madeleines are perfect for an afternoon snack with tea or coffee.
280
+ #4. Éclair
281
+ #The éclair is a long, thin pastry filled with cream and topped with chocolate glaze. It is a classic French treat that is both sweet and satisfying. Éclairs can be found in bakeries all over France and are often enjoyed with a cup of hot chocolate.
282
+ #5. Tarte Tatin
283
+ #The tarte Tatin is an apple tart that is known for its caramelized apples and puff pastry crust. It is named after the Tatin sisters who created the recipe in the late 19th century. Tarte Tatin is best served warm with a scoop of vanilla ice cream.
284
+ #These pastries are just a few of the many delicious treats that France has to offer. Whether you are a seasoned traveler or a first-time visitor, indulging in French pastries is a must-do activity. So go ahead, treat yourself—you deserve it!
285
+ ```
286
+
287
+ </details>
288
+
289
+
290
+ ### Open Source License
291
+ The code is licensed under Apache-2.0, while model weights are fully open for academic research and also allow free commercial usage. To apply for a commercial license, please fill in the application form (English)/申请表(中文). For other questions or collaborations, please contact internlm@pjlab.org.cn.","{""id"": ""internlm/internlm-xcomposer2d5-7b"", ""author"": ""internlm"", ""sha"": ""4aa81f2bbf20a9ddd4137dfe847c142adf07b652"", ""last_modified"": ""2024-07-22 06:01:33+00:00"", ""created_at"": ""2024-07-02 10:54:13+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1612, ""downloads_all_time"": null, ""likes"": 203, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""internlm2"", ""feature-extraction"", ""visual-question-answering"", ""custom_code"", ""arxiv:2407.03320"", ""license:other"", ""region:us""], ""pipeline_tag"": ""visual-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\npipeline_tag: visual-question-answering"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""InternLMXComposer2ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_internlm_xcomposer2.InternLMXcomposer2Config"", ""AutoModel"": ""modeling_internlm_xcomposer2.InternLMXComposer2ForCausalLM"", ""AutoModelForCausalLM"": ""modeling_internlm_xcomposer2.InternLMXComposer2ForCausalLM""}, ""model_type"": ""internlm2"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_internlm_xcomposer2.InternLMXComposer2ForCausalLM"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SimHei.ttf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='build_mlp.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_internlm_xcomposer2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/cars1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/cars2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/cars3.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/cars4.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/dubai.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/liuxiang.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/resume.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/screenshot.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/test.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ixc_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logo_en.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_internlm2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_internlm_xcomposer2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_internlm2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ManishThota/Super-Rapid-Annotator""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-22 06:01:33+00:00"", ""cardData"": ""license: other\npipeline_tag: visual-question-answering"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_internlm_xcomposer2.InternLMXComposer2ForCausalLM"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""6683dc558cd7b806585aa381"", ""modelId"": ""internlm/internlm-xcomposer2d5-7b"", ""usedStorage"": 44424770654}",0,,0,,0,,0,,0,"ManishThota/Super-Rapid-Annotator, Willow123/InternLM-XComposer, huggingface/InferenceSupport/discussions/new?title=internlm/internlm-xcomposer2d5-7b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Binternlm%2Finternlm-xcomposer2d5-7b%5D(%2Finternlm%2Finternlm-xcomposer2d5-7b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",3
jetmoe-8b_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ jetmoe/jetmoe-8b,"---
3
+ license: apache-2.0
4
+ ---
5
+
6
+ <div align=""center"">
7
+ <div>&nbsp;</div>
8
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/641de0213239b631552713e4/ieHnwuczidNNoGRA_FN2y.png"" width=""500""/>
9
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/641de0213239b631552713e4/UOsk9_zcbHpCCy6kmryYM.png"" width=""530""/>
10
+ </div>
11
+
12
+ # JetMoE: Reaching LLaMA2 Performance with 0.1M Dollars
13
+
14
+ ## Key Messages
15
+
16
+ 1. JetMoE-8B is **trained with less than $ 0.1 million**<sup>1</sup> **cost but outperforms LLaMA2-7B from Meta AI**, who has multi-billion-dollar training resources. LLM training can be **much cheaper than people previously thought**.
17
+
18
+ 2. JetMoE-8B is **fully open-sourced and academia-friendly** because:
19
+ - It **only uses public datasets** for training, and the code is open-sourced. No proprietary resource is needed.
20
+ - It **can be finetuned with very limited compute budget** (e.g., consumer-grade GPU) that most labs can afford.
21
+
22
+ 3. JetMoE-8B **only has 2.2B active parameters** during inference, which drastically lowers the computational cost. Compared to a model with similar inference computation, like Gemma-2B, JetMoE-8B achieves constantly better performance.
23
+
24
+ <sup>1</sup> We used a 96×H100 GPU cluster for 2 weeks, which cost ~$0.08 million.
25
+
26
+ Website: [https://research.myshell.ai/jetmoe](https://research.myshell.ai/jetmoe)
27
+
28
+ HuggingFace: [https://huggingface.co/jetmoe/jetmoe-8b](https://huggingface.co/jetmoe/jetmoe-8b)
29
+
30
+ Online Demo on Lepton AI: [https://www.lepton.ai/playground/chat?model=jetmoe-8b-chat](https://www.lepton.ai/playground/chat?model=jetmoe-8b-chat)
31
+
32
+ Technical Report: [https://arxiv.org/pdf/2404.07413.pdf](https://arxiv.org/pdf/2404.07413.pdf)
33
+
34
+ ## Authors
35
+
36
+ The project is contributed by [Yikang Shen](https://scholar.google.com.hk/citations?user=qff5rRYAAAAJ), [Zhen Guo](https://zguo0525.github.io/), [Tianle Cai](https://www.tianle.website/#/) and [Zengyi Qin](https://www.qinzy.tech/). For technical inquiries, please contact [Yikang Shen](https://scholar.google.com.hk/citations?user=qff5rRYAAAAJ). For media and collaboration inquiries, please contact [Zengyi Qin](https://www.qinzy.tech/).
37
+
38
+ ## Collaboration
39
+ **If you have great ideas but need more resources (GPU, data, funding, etc.)**, welcome to contact **MyShell.ai** via [Zengyi Qin](https://www.qinzy.tech/). **MyShell.ai** is open to collaborations and are actively supporting high-quality open-source projects.
40
+
41
+ ## Benchmarks
42
+ We use the same evaluation methodology as in the Open LLM leaderboard. For MBPP code benchmark, we use the same evaluation methodology as in the LLaMA2 and Deepseek-MoE paper. The results are shown below:
43
+
44
+ |Model|Activate Params|Training Tokens|Open LLM Leaderboard Avg|ARC|Hellaswag|MMLU|TruthfulQA|WinoGrande|GSM8k|MBPP|HumanEval|
45
+ |---|---|---|---|---|---|---|---|---|---|---|---|
46
+ |Shot||||25|10|5|0|5|5|3|0|
47
+ |Metric||||acc_norm|acc_norm|acc|mc2|acc|acc|Pass@1|Pass@1|
48
+ |LLaMA2-7B|7B|2T|51.0|53.1|78.6|46.9|38.8|74|14.5|20.8|12.8|
49
+ |LLaMA-13B|13B|1T|51.4|**56.2**|**80.9**|47.7|39.5|**76.2**|7.6|22.0|15.8|
50
+ |DeepseekMoE-16B|2.8B|2T|51.1|53.2|79.8|46.3|36.1|73.7|17.3|34.0|**25.0**|
51
+ |Gemma-2B|2B|2T|46.4|48.4|71.8|41.8|33.1|66.3|16.9|28.0|24.4|
52
+ |JetMoE-8B|2.2B|1.25T|**53.0**|48.7|80.5|**49.2**|**41.7**|70.2|**27.8**|**34.2**|14.6|
53
+
54
+ | Model | MT-Bench Score |
55
+ |---------------------|-----------|
56
+ | GPT-4 | 9.014 |
57
+ | GPT-3.5-turbo | 7.995 |
58
+ | Claude-v1 | 7.923 |
59
+ | **JetMoE-8B-chat** | **6.681** |
60
+ | Llama-2-13b-chat | 6.650 |
61
+ | Vicuna-13b-v1.3 | 6.413 |
62
+ | Wizardlm-13b | 6.353 |
63
+ | Llama-2-7b-chat | 6.269 |
64
+
65
+ To our surprise, despite the lower training cost and computation, JetMoE-8B performs even better than LLaMA2-7B, LLaMA-13B, and DeepseekMoE-16B. Compared to a model with similar training and inference computation, like Gemma-2B, JetMoE-8B achieves better performance.
66
+
67
+ ## Model Usage
68
+ To load the models, you need install [this package](https://github.com/myshell-ai/JetMoE):
69
+ ```
70
+ pip install -e .
71
+ ```
72
+
73
+ Then you can load the model with the following code:
74
+ ```python
75
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, AutoModelForSequenceClassification
76
+ from jetmoe import JetMoEForCausalLM, JetMoEConfig, JetMoEForSequenceClassification
77
+
78
+ AutoConfig.register(""jetmoe"", JetMoEConfig)
79
+ AutoModelForCausalLM.register(JetMoEConfig, JetMoEForCausalLM)
80
+ AutoModelForSequenceClassification.register(JetMoEConfig, JetMoEForSequenceClassification)
81
+
82
+ tokenizer = AutoTokenizer.from_pretrained('jetmoe/jetmoe-8b')
83
+ model = AutoModelForCausalLM.from_pretrained('jetmoe/jetmoe-8b')
84
+ ```
85
+
86
+ ## Model Details
87
+ JetMoE-8B has 24 blocks.
88
+ Each block has two MoE layers: Mixture of Attention heads (MoA) and Mixture of MLP Experts (MoE).
89
+ Each MoA and MoE layer has 8 expert, and 2 experts are activated for each input token.
90
+ It has 8 billion parameters in total and 2.2B active parameters.
91
+ JetMoE-8B is trained on 1.25T tokens from publicly available datasets, with a learning rate of 5.0 x 10<sup>-4</sup> and a global batch-size of 4M tokens.
92
+
93
+ <figure>
94
+ <center>
95
+ <img src=""images/jetmoe_architecture.png"" width=""40%"">
96
+ <figcaption>JetMoE Architecture</figcaption>
97
+ </center>
98
+ </figure>
99
+
100
+ ## Training Details
101
+ Our training recipe follows the [MiniCPM](https://shengdinghu.notion.site/MiniCPM-Unveiling-the-Potential-of-End-side-Large-Language-Models-d4d3a8c426424654a4e80e42a711cb20?pvs=4)'s two-phases training method. Phase 1 uses a constant learning rate with linear warmup and is trained on 1 trillion tokens from large-scale open-source pretraining datasets, including RefinedWeb, Pile, Github data, etc. Phase 2 uses exponential learning rate decay and is trained on 250 billion tokens from phase 1 datasets and extra high-quality open-source datasets.
102
+
103
+ <figure>
104
+ <center>
105
+ <img src=""images/Phase1_data.png"" width=""60%"">
106
+ <img src=""images/Phase2_data.png"" width=""60%"">
107
+ </center>
108
+ </figure>
109
+
110
+ ## Technical Report
111
+ For more details, please refer to the [JetMoE Technical Report](https://arxiv.org/abs/2404.07413).
112
+
113
+ ## JetMoE Model Index
114
+ |Model|Index|
115
+ |---|---|
116
+ |JetMoE-8B-Base| [Link](https://huggingface.co/jetmoe/jetmoe-8B) |
117
+ |JetMoE-8B-SFT| [Link](https://huggingface.co/jetmoe/jetmoe-8B-sft) |
118
+ |JetMoE-8B-Chat| [Link](https://huggingface.co/jetmoe/jetmoe-8B-chat) |
119
+
120
+ ## Acknowledgement
121
+ We express our gratitude to [Shengding Hu](https://shengdinghu.github.io/) for his valuable advice on the Phase 2 data mixture. We also express our gratitude to [Exabits](https://www.exabits.ai/) for their assistance in setting up the GPU clusters, and to [Lepton AI](https://www.lepton.ai/) for their support in setting up the chat demo.
122
+
123
+ ","{""id"": ""jetmoe/jetmoe-8b"", ""author"": ""jetmoe"", ""sha"": ""d8fd02ccf7911aa8148a63c7984ffd2e465b0352"", ""last_modified"": ""2024-04-15 03:23:40+00:00"", ""created_at"": ""2024-03-25 16:47:01+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1416, ""downloads_all_time"": null, ""likes"": 246, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""jetmoe"", ""text-generation"", ""arxiv:2404.07413"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""model_type"": ""jetmoe"", ""architectures"": [""JetMoEForCausalLM""], ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Phase1_data.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Phase2_data.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/jetmoe_architecture.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8522237952}, ""total"": 8522237952}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-15 03:23:40+00:00"", ""cardData"": ""license: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6601aa851d7ae71f0ee68c57"", ""modelId"": ""jetmoe/jetmoe-8b"", ""usedStorage"": 34089512635}",0,"https://huggingface.co/jetmoe/jetmoe-8b-chat, https://huggingface.co/jetmoe/jetmoe-8b-sft",2,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=jetmoe/jetmoe-8b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjetmoe%2Fjetmoe-8b%5D(%2Fjetmoe%2Fjetmoe-8b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
124
+ jetmoe/jetmoe-8b-chat,"---
125
+ license: apache-2.0
126
+ base_model: jetmoe/jetmoe-8b
127
+ tags:
128
+ - alignment-handbook
129
+ - generated_from_trainer
130
+ datasets:
131
+ - HuggingFaceH4/ultrachat_200k
132
+ - HuggingFaceH4/airoboros-3.2
133
+ - HuggingFaceH4/Code-Feedback
134
+ - HuggingFaceH4/orca-math-word-problems-200k
135
+ - HuggingFaceH4/SystemChat
136
+ - HuggingFaceH4/capybara
137
+ model-index:
138
+ - name: jetmoe-8b-sft
139
+ results: []
140
+ ---
141
+
142
+ <div align=""center"">
143
+ <div>&nbsp;</div>
144
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/641de0213239b631552713e4/ieHnwuczidNNoGRA_FN2y.png"" width=""500""/>
145
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/641de0213239b631552713e4/UOsk9_zcbHpCCy6kmryYM.png"" width=""530""/>
146
+ </div>
147
+
148
+ # JetMoE: Reaching LLaMA2 Performance with 0.1M Dollars
149
+
150
+ ## Key Messages
151
+
152
+ 1. JetMoE-8B is **trained with less than $ 0.1 million**<sup>1</sup> **cost but outperforms LLaMA2-7B from Meta AI**, who has multi-billion-dollar training resources. LLM training can be **much cheaper than people previously thought**.
153
+
154
+ 2. JetMoE-8B is **fully open-sourced and academia-friendly** because:
155
+ - It **only uses public datasets** for training, and the code is open-sourced. No proprietary resource is needed.
156
+ - It **can be finetuned with very limited compute budget** (e.g., consumer-grade GPU) that most labs can afford.
157
+
158
+ 3. JetMoE-8B **only has 2.2B active parameters** during inference, which drastically lowers the computational cost. Compared to a model with similar inference computation, like Gemma-2B, JetMoE-8B achieves constantly better performance.
159
+
160
+ <sup>1</sup> We used a 96×H100 GPU cluster for 2 weeks, which cost ~$0.08 million.
161
+
162
+ Website: [https://research.myshell.ai/jetmoe](https://research.myshell.ai/jetmoe)
163
+
164
+ HuggingFace: [https://huggingface.co/jetmoe/jetmoe-8b](https://huggingface.co/jetmoe/jetmoe-8b)
165
+
166
+ Online Demo on Lepton AI: [https://www.lepton.ai/playground/chat?model=jetmoe-8b-chat](https://www.lepton.ai/playground/chat?model=jetmoe-8b-chat)
167
+
168
+ Technical Report: [https://arxiv.org/pdf/2404.07413.pdf](https://arxiv.org/pdf/2404.07413.pdf)
169
+
170
+ ## Authors
171
+
172
+ The project is contributed by [Yikang Shen](https://scholar.google.com.hk/citations?user=qff5rRYAAAAJ), [Zhen Guo](https://zguo0525.github.io/), [Tianle Cai](https://www.tianle.website/#/) and [Zengyi Qin](https://www.qinzy.tech/). For technical inquiries, please contact [Yikang Shen](https://scholar.google.com.hk/citations?user=qff5rRYAAAAJ). For media and collaboration inquiries, please contact [Zengyi Qin](https://www.qinzy.tech/).
173
+
174
+ ## Collaboration
175
+ **If you have great ideas but need more resources (GPU, data, funding, etc.)**, welcome to contact **MyShell.ai** via [Zengyi Qin](https://www.qinzy.tech/). **MyShell.ai** is open to collaborations and are actively supporting high-quality open-source projects.
176
+
177
+ ## Benchmarks
178
+ We use the same evaluation methodology as in the Open LLM leaderboard. For MBPP code benchmark, we use the same evaluation methodology as in the LLaMA2 and Deepseek-MoE paper. The results are shown below:
179
+
180
+ |Model|Activate Params|Training Tokens|Open LLM Leaderboard Avg|ARC|Hellaswag|MMLU|TruthfulQA|WinoGrande|GSM8k|MBPP|HumanEval|
181
+ |---|---|---|---|---|---|---|---|---|---|---|---|
182
+ |Shot||||25|10|5|0|5|5|3|0|
183
+ |Metric||||acc_norm|acc_norm|acc|mc2|acc|acc|Pass@1|Pass@1|
184
+ |LLaMA2-7B|7B|2T|51.0|53.1|78.6|46.9|38.8|74|14.5|20.8|12.8|
185
+ |LLaMA-13B|13B|1T|51.4|**56.2**|**80.9**|47.7|39.5|**76.2**|7.6|22.0|15.8|
186
+ |DeepseekMoE-16B|2.8B|2T|51.1|53.2|79.8|46.3|36.1|73.7|17.3|34.0|**25.0**|
187
+ |Gemma-2B|2B|2T|46.4|48.4|71.8|41.8|33.1|66.3|16.9|28.0|24.4|
188
+ |JetMoE-8B|2.2B|1.25T|**53.0**|48.7|80.5|**49.2**|**41.7**|70.2|**27.8**|**34.2**|14.6|
189
+
190
+ | Model | MT-Bench Score |
191
+ |---------------------|-----------|
192
+ | GPT-4 | 9.014 |
193
+ | GPT-3.5-turbo | 7.995 |
194
+ | Claude-v1 | 7.923 |
195
+ | **JetMoE-8B-chat** | **6.681** |
196
+ | Llama-2-13b-chat | 6.650 |
197
+ | Vicuna-13b-v1.3 | 6.413 |
198
+ | Wizardlm-13b | 6.353 |
199
+ | Llama-2-7b-chat | 6.269 |
200
+
201
+
202
+
203
+ To our surprise, despite the lower training cost and computation, JetMoE-8B performs even better than LLaMA2-7B, LLaMA-13B, and DeepseekMoE-16B. Compared to a model with similar training and inference computation, like Gemma-2B, JetMoE-8B achieves better performance.
204
+
205
+ ## Model Usage
206
+
207
+ Here's a quick example to get you started with JetMoE-8B-chat:
208
+
209
+ ```python
210
+ import torch
211
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
212
+ # Initialize the model and tokenizer
213
+ model_name = ""jetmoe/jetmoe-8b-chat""
214
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
215
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, attn_implementation=""eager"", trust_remote_code=True)
216
+ # Check if a GPU is available and move the model to GPU if it is
217
+ if torch.cuda.is_available():
218
+ model = model.cuda()
219
+ print(""Using GPU:"", torch.cuda.get_device_name(torch.cuda.current_device()))
220
+ else:
221
+ print(""GPU is not available, using CPU instead."")
222
+ # Encode input context
223
+ messages = [
224
+ {
225
+ ""role"": ""system"",
226
+ ""content"": ""You are a friendly chatbot"",
227
+ },
228
+ {""role"": ""user"", ""content"": ""How many helicopters can a human eat in one sitting?""},
229
+ ]
230
+ tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors=""pt"")
231
+ print(tokenized_chat)
232
+ # If using a GPU, move the input IDs to the GPU
233
+ if torch.cuda.is_available():
234
+ input_ids = tokenized_chat.cuda()
235
+ # Generate text
236
+ output = model.generate(input_ids, max_length=500, num_return_sequences=1, no_repeat_ngram_size=2)
237
+ # If the output is on the GPU, move it back to CPU for decoding
238
+ if torch.cuda.is_available():
239
+ output = output.cpu()
240
+ # Decode the generated text
241
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
242
+ print(generated_text)
243
+ ```
244
+
245
+ ## Model Details
246
+ JetMoE-8B has 24 blocks.
247
+ Each block has two MoE layers: Mixture of Attention heads (MoA) and Mixture of MLP Experts (MoE).
248
+ Each MoA and MoE layer has 8 expert, and 2 experts are activated for each input token.
249
+ It has 8 billion parameters in total and 2.2B active parameters.
250
+ JetMoE-8B is trained on 1.25T tokens from publicly available datasets, with a learning rate of 5.0 x 10<sup>-4</sup> and a global batch-size of 4M tokens.
251
+
252
+ <figure>
253
+ <center>
254
+ <img src=""images/jetmoe_architecture.png"" width=""40%"">
255
+ <figcaption>JetMoE Architecture</figcaption>
256
+ </center>
257
+ </figure>
258
+
259
+ ## Training Details
260
+ Our training recipe follows the [MiniCPM](https://shengdinghu.notion.site/MiniCPM-Unveiling-the-Potential-of-End-side-Large-Language-Models-d4d3a8c426424654a4e80e42a711cb20?pvs=4)'s two-phases training method. Phase 1 uses a constant learning rate with linear warmup and is trained on 1 trillion tokens from large-scale open-source pretraining datasets, including RefinedWeb, Pile, Github data, etc. Phase 2 uses exponential learning rate decay and is trained on 250 billion tokens from phase 1 datasets and extra high-quality open-source datasets.
261
+
262
+ <figure>
263
+ <center>
264
+ <img src=""images/Phase1_data.png"" width=""60%"">
265
+ <img src=""images/Phase2_data.png"" width=""60%"">
266
+ </center>
267
+ </figure>
268
+
269
+ ## Technical Report
270
+ For more details, please refer to the [JetMoE Technical Report](https://arxiv.org/abs/2404.07413).
271
+
272
+ ## JetMoE Model Index
273
+ |Model|Index|
274
+ |---|---|
275
+ |JetMoE-8B-Base| [Link](https://huggingface.co/jetmoe/jetmoe-8B) |
276
+ |JetMoE-8B-SFT| [Link](https://huggingface.co/jetmoe/jetmoe-8B-sft) |
277
+ |JetMoE-8B-Chat| [Link](https://huggingface.co/jetmoe/jetmoe-8B-chat) |
278
+
279
+ ## Acknowledgement
280
+ We express our gratitude to [Shengding Hu](https://shengdinghu.github.io/) for his valuable advice on the Phase 2 data mixture. We also express our gratitude to [Exabits](https://www.exabits.ai/) for their assistance in setting up the GPU clusters, and to [Lepton AI](https://www.lepton.ai/) for their support in setting up the chat demo.","{""id"": ""jetmoe/jetmoe-8b-chat"", ""author"": ""jetmoe"", ""sha"": ""962cfc471ee571ef550f683af8b32b3cac5d0024"", ""last_modified"": ""2024-05-11 12:24:25+00:00"", ""created_at"": ""2024-03-31 00:00:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 26, ""downloads_all_time"": null, ""likes"": 29, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""jetmoe"", ""text-generation"", ""alignment-handbook"", ""generated_from_trainer"", ""conversational"", ""custom_code"", ""dataset:HuggingFaceH4/ultrachat_200k"", ""dataset:HuggingFaceH4/airoboros-3.2"", ""dataset:HuggingFaceH4/Code-Feedback"", ""dataset:HuggingFaceH4/orca-math-word-problems-200k"", ""dataset:HuggingFaceH4/SystemChat"", ""dataset:HuggingFaceH4/capybara"", ""arxiv:2404.07413"", ""base_model:jetmoe/jetmoe-8b"", ""base_model:finetune:jetmoe/jetmoe-8b"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: jetmoe/jetmoe-8b\ndatasets:\n- HuggingFaceH4/ultrachat_200k\n- HuggingFaceH4/airoboros-3.2\n- HuggingFaceH4/Code-Feedback\n- HuggingFaceH4/orca-math-word-problems-200k\n- HuggingFaceH4/SystemChat\n- HuggingFaceH4/capybara\nlicense: apache-2.0\ntags:\n- alignment-handbook\n- generated_from_trainer\nmodel-index:\n- name: jetmoe-8b-sft\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""jetmoe-8b-sft"", ""results"": []}], ""config"": {""architectures"": [""JetMoEForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_jetmoe.JetMoEConfig"", ""AutoModelForCausalLM"": ""modeling_jetmoe.JetMoEForCausalLM""}, ""model_type"": ""jetmoe"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_jetmoe.JetMoEForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_jetmoe.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Phase1_data.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Phase2_data.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/jetmoe_architecture.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_jetmoe.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='requirements.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8522237952}, ""total"": 8522237952}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-11 12:24:25+00:00"", ""cardData"": ""base_model: jetmoe/jetmoe-8b\ndatasets:\n- HuggingFaceH4/ultrachat_200k\n- HuggingFaceH4/airoboros-3.2\n- HuggingFaceH4/Code-Feedback\n- HuggingFaceH4/orca-math-word-problems-200k\n- HuggingFaceH4/SystemChat\n- HuggingFaceH4/capybara\nlicense: apache-2.0\ntags:\n- alignment-handbook\n- generated_from_trainer\nmodel-index:\n- name: jetmoe-8b-sft\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_jetmoe.JetMoEForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6608a7b550d2b7a710ddd99d"", ""modelId"": ""jetmoe/jetmoe-8b-chat"", ""usedStorage"": 17045001643}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=jetmoe/jetmoe-8b-chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjetmoe%2Fjetmoe-8b-chat%5D(%2Fjetmoe%2Fjetmoe-8b-chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
281
+ jetmoe/jetmoe-8b-sft,"---
282
+ license: apache-2.0
283
+ base_model: jetmoe/jetmoe-8b
284
+ tags:
285
+ - alignment-handbook
286
+ - generated_from_trainer
287
+ datasets:
288
+ - HuggingFaceH4/ultrachat_200k
289
+ - HuggingFaceH4/airoboros-3.2
290
+ - HuggingFaceH4/Code-Feedback
291
+ - HuggingFaceH4/orca-math-word-problems-200k
292
+ - HuggingFaceH4/SystemChat
293
+ - HuggingFaceH4/capybara
294
+ model-index:
295
+ - name: jetmoe-8b-sft
296
+ results: []
297
+ ---
298
+
299
+ <div align=""center"">
300
+ <div>&nbsp;</div>
301
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/641de0213239b631552713e4/ieHnwuczidNNoGRA_FN2y.png"" width=""500""/>
302
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/641de0213239b631552713e4/UOsk9_zcbHpCCy6kmryYM.png"" width=""530""/>
303
+ </div>
304
+
305
+ # JetMoE: Reaching LLaMA2 Performance with 0.1M Dollars
306
+
307
+ ## Key Messages
308
+
309
+ 1. JetMoE-8B is **trained with less than $ 0.1 million**<sup>1</sup> **cost but outperforms LLaMA2-7B from Meta AI**, who has multi-billion-dollar training resources. LLM training can be **much cheaper than people previously thought**.
310
+
311
+ 2. JetMoE-8B is **fully open-sourced and academia-friendly** because:
312
+ - It **only uses public datasets** for training, and the code is open-sourced. No proprietary resource is needed.
313
+ - It **can be finetuned with very limited compute budget** (e.g., consumer-grade GPU) that most labs can afford.
314
+
315
+ 3. JetMoE-8B **only has 2.2B active parameters** during inference, which drastically lowers the computational cost. Compared to a model with similar inference computation, like Gemma-2B, JetMoE-8B achieves constantly better performance.
316
+
317
+ <sup>1</sup> We used a 96×H100 GPU cluster for 2 weeks, which cost ~$0.08 million.
318
+
319
+ Website: [https://research.myshell.ai/jetmoe](https://research.myshell.ai/jetmoe)
320
+
321
+ HuggingFace: [https://huggingface.co/jetmoe/jetmoe-8b](https://huggingface.co/jetmoe/jetmoe-8b)
322
+
323
+ Online Demo on Lepton AI: [https://www.lepton.ai/playground/chat?model=jetmoe-8b-chat](https://www.lepton.ai/playground/chat?model=jetmoe-8b-chat)
324
+
325
+ Technical Report: [https://arxiv.org/pdf/2404.07413.pdf](https://arxiv.org/pdf/2404.07413.pdf)
326
+
327
+ ## Authors
328
+
329
+ The project is contributed by [Yikang Shen](https://scholar.google.com.hk/citations?user=qff5rRYAAAAJ), [Zhen Guo](https://zguo0525.github.io/), [Tianle Cai](https://www.tianle.website/#/) and [Zengyi Qin](https://www.qinzy.tech/). For technical inquiries, please contact [Yikang Shen](https://scholar.google.com.hk/citations?user=qff5rRYAAAAJ). For media and collaboration inquiries, please contact [Zengyi Qin](https://www.qinzy.tech/).
330
+
331
+ ## Collaboration
332
+ **If you have great ideas but need more resources (GPU, data, funding, etc.)**, welcome to contact **MyShell.ai** via [Zengyi Qin](https://www.qinzy.tech/). **MyShell.ai** is open to collaborations and are actively supporting high-quality open-source projects.
333
+
334
+ ## Benchmarks
335
+ We use the same evaluation methodology as in the Open LLM leaderboard. For MBPP code benchmark, we use the same evaluation methodology as in the LLaMA2 and Deepseek-MoE paper. The results are shown below:
336
+
337
+ |Model|Activate Params|Training Tokens|Open LLM Leaderboard Avg|ARC|Hellaswag|MMLU|TruthfulQA|WinoGrande|GSM8k|MBPP|HumanEval|
338
+ |---|---|---|---|---|---|---|---|---|---|---|---|
339
+ |Shot||||25|10|5|0|5|5|3|0|
340
+ |Metric||||acc_norm|acc_norm|acc|mc2|acc|acc|Pass@1|Pass@1|
341
+ |LLaMA2-7B|7B|2T|51.0|53.1|78.6|46.9|38.8|74|14.5|20.8|12.8|
342
+ |LLaMA-13B|13B|1T|51.4|**56.2**|**80.9**|47.7|39.5|**76.2**|7.6|22.0|15.8|
343
+ |DeepseekMoE-16B|2.8B|2T|51.1|53.2|79.8|46.3|36.1|73.7|17.3|34.0|**25.0**|
344
+ |Gemma-2B|2B|2T|46.4|48.4|71.8|41.8|33.1|66.3|16.9|28.0|24.4|
345
+ |JetMoE-8B|2.2B|1.25T|**53.0**|48.7|80.5|**49.2**|**41.7**|70.2|**27.8**|**34.2**|14.6|
346
+
347
+ | Model | MT-Bench Score |
348
+ |---------------------|-----------|
349
+ | GPT-4 | 9.014 |
350
+ | GPT-3.5-turbo | 7.995 |
351
+ | Claude-v1 | 7.923 |
352
+ | **JetMoE-8B-chat** | **6.681** |
353
+ | Llama-2-13b-chat | 6.650 |
354
+ | Vicuna-13b-v1.3 | 6.413 |
355
+ | Wizardlm-13b | 6.353 |
356
+ | Llama-2-7b-chat | 6.269 |
357
+
358
+ To our surprise, despite the lower training cost and computation, JetMoE-8B performs even better than LLaMA2-7B, LLaMA-13B, and DeepseekMoE-16B. Compared to a model with similar training and inference computation, like Gemma-2B, JetMoE-8B achieves better performance.
359
+
360
+ ## Model Usage
361
+ To load the models, you need install [this package](https://github.com/myshell-ai/JetMoE):
362
+ ```
363
+ pip install -e .
364
+ ```
365
+
366
+ Then you can load the model with the following code:
367
+ ```python
368
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, AutoModelForSequenceClassification
369
+ from jetmoe import JetMoEForCausalLM, JetMoEConfig, JetMoEForSequenceClassification
370
+
371
+ AutoConfig.register(""jetmoe"", JetMoEConfig)
372
+ AutoModelForCausalLM.register(JetMoEConfig, JetMoEForCausalLM)
373
+ AutoModelForSequenceClassification.register(JetMoEConfig, JetMoEForSequenceClassification)
374
+
375
+ tokenizer = AutoTokenizer.from_pretrained('jetmoe/jetmoe-8b')
376
+ model = AutoModelForCausalLM.from_pretrained('jetmoe/jetmoe-8b')
377
+ ```
378
+
379
+ ## Model Details
380
+ JetMoE-8B has 24 blocks.
381
+ Each block has two MoE layers: Mixture of Attention heads (MoA) and Mixture of MLP Experts (MoE).
382
+ Each MoA and MoE layer has 8 expert, and 2 experts are activated for each input token.
383
+ It has 8 billion parameters in total and 2.2B active parameters.
384
+ JetMoE-8B is trained on 1.25T tokens from publicly available datasets, with a learning rate of 5.0 x 10<sup>-4</sup> and a global batch-size of 4M tokens.
385
+
386
+ <figure>
387
+ <center>
388
+ <img src=""images/jetmoe_architecture.png"" width=""40%"">
389
+ <figcaption>JetMoE Architecture</figcaption>
390
+ </center>
391
+ </figure>
392
+
393
+ ## Training Details
394
+ Our training recipe follows the [MiniCPM](https://shengdinghu.notion.site/MiniCPM-Unveiling-the-Potential-of-End-side-Large-Language-Models-d4d3a8c426424654a4e80e42a711cb20?pvs=4)'s two-phases training method. Phase 1 uses a constant learning rate with linear warmup and is trained on 1 trillion tokens from large-scale open-source pretraining datasets, including RefinedWeb, Pile, Github data, etc. Phase 2 uses exponential learning rate decay and is trained on 250 billion tokens from phase 1 datasets and extra high-quality open-source datasets.
395
+
396
+ <figure>
397
+ <center>
398
+ <img src=""images/Phase1_data.png"" width=""60%"">
399
+ <img src=""images/Phase2_data.png"" width=""60%"">
400
+ </center>
401
+ </figure>
402
+
403
+ ## Technical Report
404
+ For more details, please refer to the [JetMoE Technical Report](https://arxiv.org/abs/2404.07413).
405
+
406
+ ## JetMoE Model Index
407
+ |Model|Index|
408
+ |---|---|
409
+ |JetMoE-8B-Base| [Link](https://huggingface.co/jetmoe/jetmoe-8B) |
410
+ |JetMoE-8B-SFT| [Link](https://huggingface.co/jetmoe/jetmoe-8B-sft) |
411
+ |JetMoE-8B-Chat| [Link](https://huggingface.co/jetmoe/jetmoe-8B-chat) |
412
+
413
+ ## Acknowledgement
414
+ We express our gratitude to [Shengding Hu](https://shengdinghu.github.io/) for his valuable advice on the Phase 2 data mixture. We also express our gratitude to [Exabits](https://www.exabits.ai/) for their assistance in setting up the GPU clusters, and to [Lepton AI](https://www.lepton.ai/) for their support in setting up the chat demo.
415
+
416
+ ","{""id"": ""jetmoe/jetmoe-8b-sft"", ""author"": ""jetmoe"", ""sha"": ""9715d14abb732b0cc492ed491093b9be200c48e3"", ""last_modified"": ""2024-04-15 03:24:08+00:00"", ""created_at"": ""2024-03-30 23:49:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 13, ""downloads_all_time"": null, ""likes"": 6, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""jetmoe"", ""text-generation"", ""alignment-handbook"", ""generated_from_trainer"", ""conversational"", ""dataset:HuggingFaceH4/ultrachat_200k"", ""dataset:HuggingFaceH4/airoboros-3.2"", ""dataset:HuggingFaceH4/Code-Feedback"", ""dataset:HuggingFaceH4/orca-math-word-problems-200k"", ""dataset:HuggingFaceH4/SystemChat"", ""dataset:HuggingFaceH4/capybara"", ""arxiv:2404.07413"", ""base_model:jetmoe/jetmoe-8b"", ""base_model:finetune:jetmoe/jetmoe-8b"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: jetmoe/jetmoe-8b\ndatasets:\n- HuggingFaceH4/ultrachat_200k\n- HuggingFaceH4/airoboros-3.2\n- HuggingFaceH4/Code-Feedback\n- HuggingFaceH4/orca-math-word-problems-200k\n- HuggingFaceH4/SystemChat\n- HuggingFaceH4/capybara\nlicense: apache-2.0\ntags:\n- alignment-handbook\n- generated_from_trainer\nmodel-index:\n- name: jetmoe-8b-sft\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""jetmoe-8b-sft"", ""results"": []}], ""config"": {""model_type"": ""jetmoe"", ""architectures"": [""JetMoEForCausalLM""], ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Phase1_data.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Phase2_data.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/jetmoe_architecture.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8522237952}, ""total"": 8522237952}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-15 03:24:08+00:00"", ""cardData"": ""base_model: jetmoe/jetmoe-8b\ndatasets:\n- HuggingFaceH4/ultrachat_200k\n- HuggingFaceH4/airoboros-3.2\n- HuggingFaceH4/Code-Feedback\n- HuggingFaceH4/orca-math-word-problems-200k\n- HuggingFaceH4/SystemChat\n- HuggingFaceH4/capybara\nlicense: apache-2.0\ntags:\n- alignment-handbook\n- generated_from_trainer\nmodel-index:\n- name: jetmoe-8b-sft\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6608a4f16f0efa80d5280cdb"", ""modelId"": ""jetmoe/jetmoe-8b-sft"", ""usedStorage"": 17045001643}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=jetmoe/jetmoe-8b-sft&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjetmoe%2Fjetmoe-8b-sft%5D(%2Fjetmoe%2Fjetmoe-8b-sft)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
jina-embeddings-v2-base-en_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
The diff for this file is too large to render. See raw diff
 
mT5_multilingual_XLSum_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
The diff for this file is too large to render. See raw diff