Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- AuraSR_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +38 -0
- BELLE-7B-2M_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +2 -0
- BiRefNet_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +228 -0
- ControlNet_finetunes_20250424_143024.csv_finetunes_20250424_143024.csv +90 -0
- DeepCoder-14B-Preview_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +551 -0
- DeepSeek-R1-Distill-Qwen-7B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
- DeepSeek-V3-0324_finetunes_20250424_145241.csv_finetunes_20250424_145241.csv +0 -0
- Fimbulvetr-11B-v2_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +702 -0
- Florence-2-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +739 -0
- Ghibli-Diffusion_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv +99 -0
- Guanaco_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +108 -0
- HunyuanVideo-gguf_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +25 -0
- LaBSE_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- Llama-2-7B-Chat-GGML_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +372 -0
- Llama2-Chinese-7b-Chat_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +42 -0
- Llasa-3B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +240 -0
- MARS5-TTS_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +170 -0
- MEETING_SUMMARY_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +0 -0
- MN-12B-Mag-Mell-R1_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +224 -0
- MaskGCT_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +301 -0
- Meta-Llama-3-8B_finetunes_20250422_180448.csv +0 -0
- Midjourney_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +325 -0
- MiniCPM-o-2_6_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +1565 -0
- Nemotron-Mini-4B-Instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +131 -0
- NeverEnding-Dream_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +36 -0
- Nitro-Diffusion_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +83 -0
- OpenVoiceV2_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +195 -0
- Phi-3-small-8k-instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
- Qwen-14B-Chat_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +792 -0
- Qwen2-Audio-7B-Instruct_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +576 -0
- Real-ESRGAN_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +41 -0
- Ruyi-Mini-7B_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +196 -0
- SmallThinker-3B-Preview_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
- TemporalDiff_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +2 -0
- Tifa-Deepsex-14b-CoT-Q8_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +201 -0
- Yarn-Mistral-7b-128k_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +344 -0
- alpaca-native_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +61 -0
- animatediff_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +5 -0
- anything-midjourney-v-4-1_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +17 -0
- bart-large-cnn_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +0 -0
- bge-large-en_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv +0 -0
- bge-reranker-large_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +580 -0
- bge-reranker-v2-m3_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +800 -0
- biomedical-ner-all_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +428 -0
- blenderbot-400M-distill_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
- blessed_vae_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +38 -0
- blue_pencil_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- clip-vit-large-patch14_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +0 -0
- deepseek-vl2-small_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +143 -0
- deepseek-vl2_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +143 -0
AuraSR_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
fal/AuraSR,"---
|
| 3 |
+
license: cc
|
| 4 |
+
tags:
|
| 5 |
+
- art
|
| 6 |
+
- pytorch
|
| 7 |
+
- super-resolution
|
| 8 |
+
---
|
| 9 |
+
# AuraSR
|
| 10 |
+

|
| 11 |
+
|
| 12 |
+
GAN-based Super-Resolution for upscaling generated images, a variation of the [GigaGAN](https://mingukkang.github.io/GigaGAN/) paper for image-conditioned upscaling. Torch implementation is based on the unofficial [lucidrains/gigagan-pytorch](https://github.com/lucidrains/gigagan-pytorch) repository.
|
| 13 |
+
|
| 14 |
+
## Usage
|
| 15 |
+
|
| 16 |
+
```bash
|
| 17 |
+
$ pip install aura-sr
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
```python
|
| 21 |
+
from aura_sr import AuraSR
|
| 22 |
+
|
| 23 |
+
aura_sr = AuraSR.from_pretrained(""fal-ai/AuraSR"")
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
```python
|
| 27 |
+
import requests
|
| 28 |
+
from io import BytesIO
|
| 29 |
+
from PIL import Image
|
| 30 |
+
|
| 31 |
+
def load_image_from_url(url):
|
| 32 |
+
response = requests.get(url)
|
| 33 |
+
image_data = BytesIO(response.content)
|
| 34 |
+
return Image.open(image_data)
|
| 35 |
+
|
| 36 |
+
image = load_image_from_url(""https://mingukkang.github.io/GigaGAN/static/images/iguana_output.jpg"").resize((256, 256))
|
| 37 |
+
upscaled_image = aura_sr.upscale_4x(image)
|
| 38 |
+
```","{""id"": ""fal/AuraSR"", ""author"": ""fal"", ""sha"": ""8b70681ad0364f3221a9bc8c7ef07531df885509"", ""last_modified"": ""2024-07-15 16:44:58+00:00"", ""created_at"": ""2024-06-25 17:22:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 707, ""downloads_all_time"": null, ""likes"": 303, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""art"", ""pytorch"", ""super-resolution"", ""license:cc"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: cc\ntags:\n- art\n- pytorch\n- super-resolution"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""gokaygokay/AuraSR-v2"", ""philipp-zettl/OS-upscaler-AuraSR"", ""rupeshs/fastsdcpu"", ""ZENLLC/AuraUpscale"", ""EPFL-VILAB/FlexTok"", ""tejani/Another"", ""NataLobster/testspace"", ""cocktailpeanut/AuraSR"", ""Sunghokim/diversegpt"", ""ProPerNounpYK/PILAI"", ""Raven7/AuraSR"", ""ProPerNounpYK/PIL"", ""KaiShin1885/AuraSR"", ""cocktailpeanut/AuraSR-v2"", ""Rodneyontherock1067/fastsdcpu"", ""MartsoBodziu1994/AuraSR-v2"", ""YoBatM/FastStableDifussion"", ""svjack/OS-upscaler-AuraSR"", ""tejani/fastsdcpu"", ""tejani/NewApp""], ""safetensors"": {""parameters"": {""F32"": 617554917}, ""total"": 617554917}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-15 16:44:58+00:00"", ""cardData"": ""license: cc\ntags:\n- art\n- pytorch\n- super-resolution"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""667afcbfe005e1dbcc640477"", ""modelId"": ""fal/AuraSR"", ""usedStorage"": 7410876896}",0,,0,,0,,0,,0,"EPFL-VILAB/FlexTok, KaiShin1885/AuraSR, NataLobster/testspace, ProPerNounpYK/PILAI, Raven7/AuraSR, Sunghokim/diversegpt, ZENLLC/AuraUpscale, cocktailpeanut/AuraSR, gokaygokay/AuraSR-v2, huggingface/InferenceSupport/discussions/new?title=fal/AuraSR&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfal%2FAuraSR%5D(%2Ffal%2FAuraSR)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, philipp-zettl/OS-upscaler-AuraSR, rupeshs/fastsdcpu, tejani/Another",13
|
BELLE-7B-2M_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
BelleGroup/BELLE-7B-2M,N/A,N/A,0,,0,,0,,0,,0,"for1988/BelleGroup-BELLE-7B-2M, gaoshine/BelleGroup-BELLE-7B-2M, huggingface/InferenceSupport/discussions/new?title=BelleGroup/BELLE-7B-2M&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BBelleGroup%2FBELLE-7B-2M%5D(%2FBelleGroup%2FBELLE-7B-2M)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, markmagic/BelleGroup-BELLE-7B-2M, zgldh/BelleGroup-BELLE-7B-2M",5
|
BiRefNet_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
ZhengPeng7/BiRefNet,"---
|
| 3 |
+
library_name: birefnet
|
| 4 |
+
tags:
|
| 5 |
+
- background-removal
|
| 6 |
+
- mask-generation
|
| 7 |
+
- Dichotomous Image Segmentation
|
| 8 |
+
- Camouflaged Object Detection
|
| 9 |
+
- Salient Object Detection
|
| 10 |
+
- pytorch_model_hub_mixin
|
| 11 |
+
- model_hub_mixin
|
| 12 |
+
- transformers
|
| 13 |
+
- transformers.js
|
| 14 |
+
repo_url: https://github.com/ZhengPeng7/BiRefNet
|
| 15 |
+
pipeline_tag: image-segmentation
|
| 16 |
+
license: mit
|
| 17 |
+
---
|
| 18 |
+
<h1 align=""center"">Bilateral Reference for High-Resolution Dichotomous Image Segmentation</h1>
|
| 19 |
+
|
| 20 |
+
<div align='center'>
|
| 21 |
+
<a href='https://scholar.google.com/citations?user=TZRzWOsAAAAJ' target='_blank'><strong>Peng Zheng</strong></a><sup> 1,4,5,6</sup>, 
|
| 22 |
+
<a href='https://scholar.google.com/citations?user=0uPb8MMAAAAJ' target='_blank'><strong>Dehong Gao</strong></a><sup> 2</sup>, 
|
| 23 |
+
<a href='https://scholar.google.com/citations?user=kakwJ5QAAAAJ' target='_blank'><strong>Deng-Ping Fan</strong></a><sup> 1*</sup>, 
|
| 24 |
+
<a href='https://scholar.google.com/citations?user=9cMQrVsAAAAJ' target='_blank'><strong>Li Liu</strong></a><sup> 3</sup>, 
|
| 25 |
+
<a href='https://scholar.google.com/citations?user=qQP6WXIAAAAJ' target='_blank'><strong>Jorma Laaksonen</strong></a><sup> 4</sup>, 
|
| 26 |
+
<a href='https://scholar.google.com/citations?user=pw_0Z_UAAAAJ' target='_blank'><strong>Wanli Ouyang</strong></a><sup> 5</sup>, 
|
| 27 |
+
<a href='https://scholar.google.com/citations?user=stFCYOAAAAAJ' target='_blank'><strong>Nicu Sebe</strong></a><sup> 6</sup>
|
| 28 |
+
</div>
|
| 29 |
+
|
| 30 |
+
<div align='center'>
|
| 31 |
+
<sup>1 </sup>Nankai University  <sup>2 </sup>Northwestern Polytechnical University  <sup>3 </sup>National University of Defense Technology  <sup>4 </sup>Aalto University  <sup>5 </sup>Shanghai AI Laboratory  <sup>6 </sup>University of Trento 
|
| 32 |
+
</div>
|
| 33 |
+
|
| 34 |
+
<div align=""center"" style=""display: flex; justify-content: center; flex-wrap: wrap;"">
|
| 35 |
+
<a href='https://www.sciopen.com/article/pdf/10.26599/AIR.2024.9150038.pdf'><img src='https://img.shields.io/badge/Journal-Paper-red'></a> 
|
| 36 |
+
<a href='https://arxiv.org/pdf/2401.03407'><img src='https://img.shields.io/badge/arXiv-BiRefNet-red'></a> 
|
| 37 |
+
<a href='https://drive.google.com/file/d/1aBnJ_R9lbnC2dm8dqD0-pzP2Cu-U1Xpt/view?usp=drive_link'><img src='https://img.shields.io/badge/中文版-BiRefNet-red'></a> 
|
| 38 |
+
<a href='https://www.birefnet.top'><img src='https://img.shields.io/badge/Page-BiRefNet-red'></a> 
|
| 39 |
+
<a href='https://drive.google.com/drive/folders/1s2Xe0cjq-2ctnJBR24563yMSCOu4CcxM'><img src='https://img.shields.io/badge/Drive-Stuff-green'></a> 
|
| 40 |
+
<a href='LICENSE'><img src='https://img.shields.io/badge/License-MIT-yellow'></a> 
|
| 41 |
+
<a href='https://huggingface.co/spaces/ZhengPeng7/BiRefNet_demo'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20HF%20Spaces-BiRefNet-blue'></a> 
|
| 42 |
+
<a href='https://huggingface.co/ZhengPeng7/BiRefNet'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20HF%20Models-BiRefNet-blue'></a> 
|
| 43 |
+
<a href='https://colab.research.google.com/drive/14Dqg7oeBkFEtchaHLNpig2BcdkZEogba?usp=drive_link'><img src='https://img.shields.io/badge/Single_Image_Inference-F9AB00?style=for-the-badge&logo=googlecolab&color=525252'></a> 
|
| 44 |
+
<a href='https://colab.research.google.com/drive/1MaEiBfJ4xIaZZn0DqKrhydHB8X97hNXl#scrollTo=DJ4meUYjia6S'><img src='https://img.shields.io/badge/Inference_&_Evaluation-F9AB00?style=for-the-badge&logo=googlecolab&color=525252'></a> 
|
| 45 |
+
</div>
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
| *DIS-Sample_1* | *DIS-Sample_2* |
|
| 49 |
+
| :------------------------------: | :-------------------------------: |
|
| 50 |
+
| <img src=""https://drive.google.com/thumbnail?id=1ItXaA26iYnE8XQ_GgNLy71MOWePoS2-g&sz=w400"" /> | <img src=""https://drive.google.com/thumbnail?id=1Z-esCujQF_uEa_YJjkibc3NUrW4aR_d4&sz=w400"" /> |
|
| 51 |
+
|
| 52 |
+
This repo is the official implementation of ""[**Bilateral Reference for High-Resolution Dichotomous Image Segmentation**](https://arxiv.org/pdf/2401.03407.pdf)"" (___CAAI AIR 2024___).
|
| 53 |
+
|
| 54 |
+
Visit our GitHub repo: [https://github.com/ZhengPeng7/BiRefNet](https://github.com/ZhengPeng7/BiRefNet) for more details -- **codes**, **docs**, and **model zoo**!
|
| 55 |
+
|
| 56 |
+
## How to use
|
| 57 |
+
|
| 58 |
+
### 0. Install Packages:
|
| 59 |
+
```
|
| 60 |
+
pip install -qr https://raw.githubusercontent.com/ZhengPeng7/BiRefNet/main/requirements.txt
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
### 1. Load BiRefNet:
|
| 64 |
+
|
| 65 |
+
#### Use codes + weights from HuggingFace
|
| 66 |
+
> Only use the weights on HuggingFace -- Pro: No need to download BiRefNet codes manually; Con: Codes on HuggingFace might not be latest version (I'll try to keep them always latest).
|
| 67 |
+
|
| 68 |
+
```python
|
| 69 |
+
# Load BiRefNet with weights
|
| 70 |
+
from transformers import AutoModelForImageSegmentation
|
| 71 |
+
birefnet = AutoModelForImageSegmentation.from_pretrained('ZhengPeng7/BiRefNet', trust_remote_code=True)
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
#### Use codes from GitHub + weights from HuggingFace
|
| 75 |
+
> Only use the weights on HuggingFace -- Pro: codes are always latest; Con: Need to clone the BiRefNet repo from my GitHub.
|
| 76 |
+
|
| 77 |
+
```shell
|
| 78 |
+
# Download codes
|
| 79 |
+
git clone https://github.com/ZhengPeng7/BiRefNet.git
|
| 80 |
+
cd BiRefNet
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
```python
|
| 84 |
+
# Use codes locally
|
| 85 |
+
from models.birefnet import BiRefNet
|
| 86 |
+
|
| 87 |
+
# Load weights from Hugging Face Models
|
| 88 |
+
birefnet = BiRefNet.from_pretrained('ZhengPeng7/BiRefNet')
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
#### Use codes from GitHub + weights from local space
|
| 92 |
+
> Only use the weights and codes both locally.
|
| 93 |
+
|
| 94 |
+
```python
|
| 95 |
+
# Use codes and weights locally
|
| 96 |
+
import torch
|
| 97 |
+
from utils import check_state_dict
|
| 98 |
+
|
| 99 |
+
birefnet = BiRefNet(bb_pretrained=False)
|
| 100 |
+
state_dict = torch.load(PATH_TO_WEIGHT, map_location='cpu')
|
| 101 |
+
state_dict = check_state_dict(state_dict)
|
| 102 |
+
birefnet.load_state_dict(state_dict)
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
#### Use the loaded BiRefNet for inference
|
| 106 |
+
```python
|
| 107 |
+
# Imports
|
| 108 |
+
from PIL import Image
|
| 109 |
+
import matplotlib.pyplot as plt
|
| 110 |
+
import torch
|
| 111 |
+
from torchvision import transforms
|
| 112 |
+
from models.birefnet import BiRefNet
|
| 113 |
+
|
| 114 |
+
birefnet = ... # -- BiRefNet should be loaded with codes above, either way.
|
| 115 |
+
torch.set_float32_matmul_precision(['high', 'highest'][0])
|
| 116 |
+
birefnet.to('cuda')
|
| 117 |
+
birefnet.eval()
|
| 118 |
+
birefnet.half()
|
| 119 |
+
|
| 120 |
+
def extract_object(birefnet, imagepath):
|
| 121 |
+
# Data settings
|
| 122 |
+
image_size = (1024, 1024)
|
| 123 |
+
transform_image = transforms.Compose([
|
| 124 |
+
transforms.Resize(image_size),
|
| 125 |
+
transforms.ToTensor(),
|
| 126 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
| 127 |
+
])
|
| 128 |
+
|
| 129 |
+
image = Image.open(imagepath)
|
| 130 |
+
input_images = transform_image(image).unsqueeze(0).to('cuda').half()
|
| 131 |
+
|
| 132 |
+
# Prediction
|
| 133 |
+
with torch.no_grad():
|
| 134 |
+
preds = birefnet(input_images)[-1].sigmoid().cpu()
|
| 135 |
+
pred = preds[0].squeeze()
|
| 136 |
+
pred_pil = transforms.ToPILImage()(pred)
|
| 137 |
+
mask = pred_pil.resize(image.size)
|
| 138 |
+
image.putalpha(mask)
|
| 139 |
+
return image, mask
|
| 140 |
+
|
| 141 |
+
# Visualization
|
| 142 |
+
plt.axis(""off"")
|
| 143 |
+
plt.imshow(extract_object(birefnet, imagepath='PATH-TO-YOUR_IMAGE.jpg')[0])
|
| 144 |
+
plt.show()
|
| 145 |
+
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
### 2. Use inference endpoint locally:
|
| 149 |
+
> You may need to click the *deploy* and set up the endpoint by yourself, which would make some costs.
|
| 150 |
+
```
|
| 151 |
+
import requests
|
| 152 |
+
import base64
|
| 153 |
+
from io import BytesIO
|
| 154 |
+
from PIL import Image
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
YOUR_HF_TOKEN = 'xxx'
|
| 158 |
+
API_URL = ""xxx""
|
| 159 |
+
headers = {
|
| 160 |
+
""Authorization"": ""Bearer {}"".format(YOUR_HF_TOKEN)
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
def base64_to_bytes(base64_string):
|
| 164 |
+
# Remove the data URI prefix if present
|
| 165 |
+
if ""data:image"" in base64_string:
|
| 166 |
+
base64_string = base64_string.split("","")[1]
|
| 167 |
+
|
| 168 |
+
# Decode the Base64 string into bytes
|
| 169 |
+
image_bytes = base64.b64decode(base64_string)
|
| 170 |
+
return image_bytes
|
| 171 |
+
|
| 172 |
+
def bytes_to_base64(image_bytes):
|
| 173 |
+
# Create a BytesIO object to handle the image data
|
| 174 |
+
image_stream = BytesIO(image_bytes)
|
| 175 |
+
|
| 176 |
+
# Open the image using Pillow (PIL)
|
| 177 |
+
image = Image.open(image_stream)
|
| 178 |
+
return image
|
| 179 |
+
|
| 180 |
+
def query(payload):
|
| 181 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 182 |
+
return response.json()
|
| 183 |
+
|
| 184 |
+
output = query({
|
| 185 |
+
""inputs"": ""https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg"",
|
| 186 |
+
""parameters"": {}
|
| 187 |
+
})
|
| 188 |
+
|
| 189 |
+
output_image = bytes_to_base64(base64_to_bytes(output))
|
| 190 |
+
output_image
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
> This BiRefNet for standard dichotomous image segmentation (DIS) is trained on **DIS-TR** and validated on **DIS-TEs and DIS-VD**.
|
| 195 |
+
|
| 196 |
+
## This repo holds the official model weights of ""[<ins>Bilateral Reference for High-Resolution Dichotomous Image Segmentation</ins>](https://arxiv.org/pdf/2401.03407)"" (_CAAI AIR 2024_).
|
| 197 |
+
|
| 198 |
+
This repo contains the weights of BiRefNet proposed in our paper, which has achieved the SOTA performance on three tasks (DIS, HRSOD, and COD).
|
| 199 |
+
|
| 200 |
+
Go to my GitHub page for BiRefNet codes and the latest updates: https://github.com/ZhengPeng7/BiRefNet :)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
#### Try our online demos for inference:
|
| 204 |
+
|
| 205 |
+
+ Online **Image Inference** on Colab: [](https://colab.research.google.com/drive/14Dqg7oeBkFEtchaHLNpig2BcdkZEogba?usp=drive_link)
|
| 206 |
+
+ **Online Inference with GUI on Hugging Face** with adjustable resolutions: [](https://huggingface.co/spaces/ZhengPeng7/BiRefNet_demo)
|
| 207 |
+
+ **Inference and evaluation** of your given weights: [](https://colab.research.google.com/drive/1MaEiBfJ4xIaZZn0DqKrhydHB8X97hNXl#scrollTo=DJ4meUYjia6S)
|
| 208 |
+
<img src=""https://drive.google.com/thumbnail?id=12XmDhKtO1o2fEvBu4OE4ULVB2BK0ecWi&sz=w1080"" />
|
| 209 |
+
|
| 210 |
+
## Acknowledgement:
|
| 211 |
+
|
| 212 |
+
+ Many thanks to @Freepik for their generous support on GPU resources for training higher resolution BiRefNet models and more of my explorations.
|
| 213 |
+
+ Many thanks to @fal for their generous support on GPU resources for training better general BiRefNet models.
|
| 214 |
+
+ Many thanks to @not-lain for his help on the better deployment of our BiRefNet model on HuggingFace.
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
## Citation
|
| 218 |
+
|
| 219 |
+
```
|
| 220 |
+
@article{zheng2024birefnet,
|
| 221 |
+
title={Bilateral Reference for High-Resolution Dichotomous Image Segmentation},
|
| 222 |
+
author={Zheng, Peng and Gao, Dehong and Fan, Deng-Ping and Liu, Li and Laaksonen, Jorma and Ouyang, Wanli and Sebe, Nicu},
|
| 223 |
+
journal={CAAI Artificial Intelligence Research},
|
| 224 |
+
volume = {3},
|
| 225 |
+
pages = {9150038},
|
| 226 |
+
year={2024}
|
| 227 |
+
}
|
| 228 |
+
```","{""id"": ""ZhengPeng7/BiRefNet"", ""author"": ""ZhengPeng7"", ""sha"": ""6a62b7dcfa18a3829087877fb16c8006831e4220"", ""last_modified"": ""2025-03-31 06:29:55+00:00"", ""created_at"": ""2024-07-12 08:50:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 632363, ""downloads_all_time"": null, ""likes"": 362, ""library_name"": ""birefnet"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""birefnet"", ""safetensors"", ""image-segmentation"", ""background-removal"", ""mask-generation"", ""Dichotomous Image Segmentation"", ""Camouflaged Object Detection"", ""Salient Object Detection"", ""pytorch_model_hub_mixin"", ""model_hub_mixin"", ""transformers"", ""transformers.js"", ""custom_code"", ""arxiv:2401.03407"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-segmentation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: birefnet\nlicense: mit\npipeline_tag: image-segmentation\ntags:\n- background-removal\n- mask-generation\n- Dichotomous Image Segmentation\n- Camouflaged Object Detection\n- Salient Object Detection\n- pytorch_model_hub_mixin\n- model_hub_mixin\n- transformers\n- transformers.js\nrepo_url: https://github.com/ZhengPeng7/BiRefNet"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""BiRefNet""], ""auto_map"": {""AutoConfig"": ""BiRefNet_config.BiRefNetConfig"", ""AutoModelForImageSegmentation"": ""birefnet.BiRefNet""}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageSegmentation"", ""custom_class"": ""birefnet.BiRefNet"", ""pipeline_tag"": ""image-segmentation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitignore', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='BiRefNet_config.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='birefnet.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='handler.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='requirements.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""not-lain/background-removal"", ""VAST-AI/TripoSG"", ""innova-ai/video-background-removal"", ""jasperai/LBM_relighting"", ""InstantX/InstantCharacter"", ""ZhengPeng7/BiRefNet_demo"", ""PramaLLC/BEN2"", ""VAST-AI/MV-Adapter-I2MV-SDXL"", ""VAST-AI/MV-Adapter-Img2Texture"", ""TencentARC/FreeSplatter"", ""not-lain/locally-compatible-BG-removal"", ""Stable-X/StableRecon"", ""NegiTurkey/Multi_Birefnetfor_Background_Removal"", ""ysharma/BiRefNet_for_text_writing"", ""victor/background-removal"", ""Han-123/background-removal"", ""not-lain/gpu-utils"", ""not-lain/video-background-removal"", ""Tassawar/back_ground_removal"", ""cocktailpeanut/video-background-removal"", ""lodhrangpt/Multi_BG_Removal"", ""shemayons/Image-Background-Removal"", ""svjack/video-background-removal"", ""randomtable/BiRefNet_Backgroun_Removal"", ""sumit400/RemoveBG"", ""ginigen/BiRefNet_plus"", ""jHaselberger/cool-avatar"", ""gaur3009/GraphicsAI"", ""danielecordano/background-colouring"", ""ghostsInTheMachine/BiRefNet_demo"", ""chuuhtetnaing/background-remover"", ""ShahbazAlam/background-removal-dub2"", ""walidadebayo/BackgroundRemoverandChanger"", ""John6666/BackgroundRemoverandChanger"", ""NabeelShar/BiRefNet_for_text_writing"", ""Ashoka74/RefurnishAI"", ""Kims12/background-removal"", ""Invictus-Jai/image-segment"", ""sariyam/test1"", ""chryssouille/agent_choupinou"", ""dibahadie/KeychainSegmentation"", ""rphrp1985/zerogpu-2"", ""onebitss/Remover_bg"", ""harshkidzure/BiRefNet_demo"", ""LULDev/background-removal"", ""DrChamyoung/deep_ml_backgroundremoval"", ""SUHHHH/background100"", ""superrich001/background1000"", ""aliceblue11/background-removal111"", ""SUHHHH/background-removal-test1"", ""superrich001/background1001"", ""superrich001/background1002"", ""superrich001/background1003"", ""yucelgumus61/Image_Background_Remove"", ""kodnuke/arkaplansilici"", ""minn4/background-remover"", ""Eldirectorweb/Prueba"", ""vinayakrevankar/background-removal"", ""Fywzero/HivisionIDPhotos"", ""BananaSauce/background-removal2"", ""manoloye/background-removal"", ""Golfies/fuchsia-filter"", ""q1139168548/HivisionIDPhotos"", ""killuabakura/background-removal"", ""sainkan/video-background-removal"", ""Ron1006/background-removal"", ""SUP3RMASS1VE/background-removal"", ""Nymbo/video-background-removal"", ""mahmudev/video-background-removal"", ""qweret6565/removebg"", ""qweret6565/background-removal"", ""Oomify/video-background-removal"", ""sapbot/VideoBackgroundRemoval-Copy"", ""amirkhanbloch/BG_Removal"", ""InvictusRudra/Camouflaged_Object_detect"", ""sammichang/removebg"", ""digitalmax1/background-removal"", ""joermd/removeback"", ""MrDianosaur/background-removal"", ""kheloo/background-removal"", ""khelonaseer1/background-removal"", ""kheloo/Multi_BG_Removal"", ""Ashoka74/ProductPlacementBG"", ""MLBench/detect-contours"", ""Ashoka74/Refurnish"", ""bep40/360IMAGES"", ""Ashoka74/Demo_Refurnish"", ""NEROTECHRB/clothing-segmentation-detection"", ""victorestrada/quitar-background"", ""raymerjacque/makulu-bg-removal"", ""prassu999p/BiRefNet_demo_Background_Text"", ""kharismagp/Hapus-Background"", ""hans7393/background-removal_3"", ""Kims12/2-4_background-removal_GPU"", ""CSB261/bgr"", ""superrich001/background-removal_250101"", ""karan2050/background-removal"", ""MLBench/Contours_Extraction"", ""MLBench/Drawer_Detection"", ""p9iaai/background-removal""], ""safetensors"": {""parameters"": {""I64"": 497664, ""F32"": 220202578}, ""total"": 220700242}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-31 06:29:55+00:00"", ""cardData"": ""library_name: birefnet\nlicense: mit\npipeline_tag: image-segmentation\ntags:\n- background-removal\n- mask-generation\n- Dichotomous Image Segmentation\n- Camouflaged Object Detection\n- Salient Object Detection\n- pytorch_model_hub_mixin\n- model_hub_mixin\n- transformers\n- transformers.js\nrepo_url: https://github.com/ZhengPeng7/BiRefNet"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageSegmentation"", ""custom_class"": ""birefnet.BiRefNet"", ""pipeline_tag"": ""image-segmentation"", ""processor"": null}, ""_id"": ""6690ee4190a83f3e25f11393"", ""modelId"": ""ZhengPeng7/BiRefNet"", ""usedStorage"": 3099110132}",0,,0,,0,https://huggingface.co/onnx-community/BiRefNet-ONNX,1,,0,"InstantX/InstantCharacter, PramaLLC/BEN2, Tassawar/back_ground_removal, VAST-AI/MV-Adapter-I2MV-SDXL, VAST-AI/MV-Adapter-Img2Texture, VAST-AI/TripoSG, ZhengPeng7/BiRefNet_demo, huggingface/InferenceSupport/discussions/186, innova-ai/video-background-removal, jasperai/LBM_relighting, not-lain/background-removal, not-lain/locally-compatible-BG-removal, svjack/video-background-removal",13
|
ControlNet_finetunes_20250424_143024.csv_finetunes_20250424_143024.csv
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
lllyasviel/ControlNet,"---
|
| 3 |
+
license: openrail
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
This is the pretrained weights and some other detector weights of ControlNet.
|
| 7 |
+
|
| 8 |
+
See also: https://github.com/lllyasviel/ControlNet
|
| 9 |
+
|
| 10 |
+
# Description of Files
|
| 11 |
+
|
| 12 |
+
ControlNet/models/control_sd15_canny.pth
|
| 13 |
+
|
| 14 |
+
- The ControlNet+SD1.5 model to control SD using canny edge detection.
|
| 15 |
+
|
| 16 |
+
ControlNet/models/control_sd15_depth.pth
|
| 17 |
+
|
| 18 |
+
- The ControlNet+SD1.5 model to control SD using Midas depth estimation.
|
| 19 |
+
|
| 20 |
+
ControlNet/models/control_sd15_hed.pth
|
| 21 |
+
|
| 22 |
+
- The ControlNet+SD1.5 model to control SD using HED edge detection (soft edge).
|
| 23 |
+
|
| 24 |
+
ControlNet/models/control_sd15_mlsd.pth
|
| 25 |
+
|
| 26 |
+
- The ControlNet+SD1.5 model to control SD using M-LSD line detection (will also work with traditional Hough transform).
|
| 27 |
+
|
| 28 |
+
ControlNet/models/control_sd15_normal.pth
|
| 29 |
+
|
| 30 |
+
- The ControlNet+SD1.5 model to control SD using normal map. Best to use the normal map generated by that Gradio app. Other normal maps may also work as long as the direction is correct (left looks red, right looks blue, up looks green, down looks purple).
|
| 31 |
+
|
| 32 |
+
ControlNet/models/control_sd15_openpose.pth
|
| 33 |
+
|
| 34 |
+
- The ControlNet+SD1.5 model to control SD using OpenPose pose detection. Directly manipulating pose skeleton should also work.
|
| 35 |
+
|
| 36 |
+
ControlNet/models/control_sd15_scribble.pth
|
| 37 |
+
|
| 38 |
+
- The ControlNet+SD1.5 model to control SD using human scribbles. The model is trained with boundary edges with very strong data augmentation to simulate boundary lines similar to that drawn by human.
|
| 39 |
+
|
| 40 |
+
ControlNet/models/control_sd15_seg.pth
|
| 41 |
+
|
| 42 |
+
- The ControlNet+SD1.5 model to control SD using semantic segmentation. The protocol is ADE20k.
|
| 43 |
+
|
| 44 |
+
ControlNet/annotator/ckpts/body_pose_model.pth
|
| 45 |
+
|
| 46 |
+
- Third-party model: Openpose’s pose detection model.
|
| 47 |
+
|
| 48 |
+
ControlNet/annotator/ckpts/hand_pose_model.pth
|
| 49 |
+
|
| 50 |
+
- Third-party model: Openpose’s hand detection model.
|
| 51 |
+
|
| 52 |
+
ControlNet/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt
|
| 53 |
+
|
| 54 |
+
- Third-party model: Midas depth estimation model.
|
| 55 |
+
|
| 56 |
+
ControlNet/annotator/ckpts/mlsd_large_512_fp32.pth
|
| 57 |
+
|
| 58 |
+
- Third-party model: M-LSD detection model.
|
| 59 |
+
|
| 60 |
+
ControlNet/annotator/ckpts/mlsd_tiny_512_fp32.pth
|
| 61 |
+
|
| 62 |
+
- Third-party model: M-LSD’s another smaller detection model (we do not use this one).
|
| 63 |
+
|
| 64 |
+
ControlNet/annotator/ckpts/network-bsds500.pth
|
| 65 |
+
|
| 66 |
+
- Third-party model: HED boundary detection.
|
| 67 |
+
|
| 68 |
+
ControlNet/annotator/ckpts/upernet_global_small.pth
|
| 69 |
+
|
| 70 |
+
- Third-party model: Uniformer semantic segmentation.
|
| 71 |
+
|
| 72 |
+
ControlNet/training/fill50k.zip
|
| 73 |
+
|
| 74 |
+
- The data for our training tutorial.
|
| 75 |
+
|
| 76 |
+
# Related Resources
|
| 77 |
+
|
| 78 |
+
Special Thank to the great project - [Mikubill' A1111 Webui Plugin](https://github.com/Mikubill/sd-webui-controlnet) !
|
| 79 |
+
|
| 80 |
+
We also thank Hysts for making [Gradio](https://github.com/gradio-app/gradio) demo in [Hugging Face Space](https://huggingface.co/spaces/hysts/ControlNet) as well as more than 65 models in that amazing [Colab list](https://github.com/camenduru/controlnet-colab)!
|
| 81 |
+
|
| 82 |
+
Thank haofanwang for making [ControlNet-for-Diffusers](https://github.com/haofanwang/ControlNet-for-Diffusers)!
|
| 83 |
+
|
| 84 |
+
We also thank all authors for making Controlnet DEMOs, including but not limited to [fffiloni](https://huggingface.co/spaces/fffiloni/ControlNet-Video), [other-model](https://huggingface.co/spaces/hysts/ControlNet-with-other-models), [ThereforeGames](https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/7784), [RamAnanth1](https://huggingface.co/spaces/RamAnanth1/ControlNet), etc!
|
| 85 |
+
|
| 86 |
+
# Misuse, Malicious Use, and Out-of-Scope Use
|
| 87 |
+
|
| 88 |
+
The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
|
| 89 |
+
|
| 90 |
+
","{""id"": ""lllyasviel/ControlNet"", ""author"": ""lllyasviel"", ""sha"": ""e78a8c4a5052a238198043ee5c0cb44e22abb9f7"", ""last_modified"": ""2023-02-25 05:57:36+00:00"", ""created_at"": ""2023-02-08 18:51:21+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 3695, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""license:openrail"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: openrail"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='annotator/ckpts/body_pose_model.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='annotator/ckpts/dpt_hybrid-midas-501f0c75.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='annotator/ckpts/hand_pose_model.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='annotator/ckpts/mlsd_large_512_fp32.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='annotator/ckpts/mlsd_tiny_512_fp32.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='annotator/ckpts/network-bsds500.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='annotator/ckpts/upernet_global_small.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/control_sd15_canny.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/control_sd15_depth.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/control_sd15_hed.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/control_sd15_mlsd.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/control_sd15_normal.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/control_sd15_openpose.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/control_sd15_scribble.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/control_sd15_seg.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training/fill50k.zip', size=None, blob_id=None, lfs=None)""], ""spaces"": [""InstantX/InstantID"", ""microsoft/HuggingGPT"", ""AI4Editing/MagicQuill"", ""hysts/ControlNet"", ""multimodalart/flux-style-shaping"", ""microsoft/visual_chatgpt"", ""Anonymous-sub/Rerender"", ""fffiloni/ControlNet-Video"", ""PAIR/Text2Video-Zero"", ""hysts/ControlNet-with-Anything-v4"", ""modelscope/AnyText"", ""Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro"", ""RamAnanth1/ControlNet"", ""georgefen/Face-Landmark-ControlNet"", ""Yuliang/ECON"", ""diffusers/controlnet-openpose"", ""shi-labs/Prompt-Free-Diffusion"", ""mikonvergence/theaTRON"", ""fotographerai/Zen-Style-Shape"", ""ozgurkara/RAVE"", ""fffiloni/video2openpose2"", ""radames/LayerDiffuse-gradio-unofficial"", ""broyang/anime-ai"", ""feishen29/IMAGDressing-v1"", ""ginipick/StyleGen"", ""Fucius/OMG-InstantID"", ""vumichien/canvas_controlnet"", ""Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro-2.0"", ""fffiloni/ControlVideo"", ""Fucius/OMG"", ""Qdssa/good_upscaler"", ""visionMaze/Magic-Me"", ""carloscar/stable-diffusion-webui-controlnet-docker"", ""Superlang/ImageProcessor"", ""Robert001/UniControl-Demo"", ""dreamer-technoland/object-to-object-replace"", ""fantos/flxcontrol"", ""tombetthauser/astronaut-horse-concept-loader"", ""ddosxd/InstantID"", ""multimodalart/InstantID-FaceID-6M"", ""azhan77168/mq"", ""rupeshs/fastsdcpu"", ""EPFL-VILAB/ViPer"", ""abidlabs/ControlNet"", ""RamAnanth1/roomGPT"", ""yuan2023/Stable-Diffusion-ControlNet-WebUI"", ""wenkai/FAPM_demo"", ""ginipick/Fashion-Style"", ""abhishek/sketch-to-image"", ""wondervictor/ControlAR"", ""yuan2023/stable-diffusion-webui-controlnet-docker"", ""yslan/3DEnhancer"", ""model2/advanceblur"", ""taesiri/HuggingGPT-Lite"", ""salahIguiliz/ControlLogoNet"", ""charlieguo610/InstantID"", ""aki-0421/character-360"", ""JoPmt/Multi-SD_Cntrl_Cny_Pse_Img2Img"", ""PKUWilliamYang/FRESCO"", ""JoPmt/Img2Img_SD_Control_Canny_Pose_Multi"", ""nowsyn/AnyControl"", ""Potre1qw/jorag"", ""waloneai/InstantAIPortrait"", ""Pie31415/control-animation"", ""RamAnanth1/T2I-Adapter"", ""svjack/ControlNet-Pose-Chinese"", ""bobu5/SD-webui-controlnet-docker"", ""soonyau/visconet"", ""LiuZichen/DrawNGuess"", ""meowingamogus69/stable-diffusion-webui-controlnet-docker"", ""wchai/StableVideo"", ""egg22314/object-to-object-replace"", ""dreamer-technoland/object-to-object-replace-1"", ""Etrwy/cucumberUpscaler"", ""VincentZB/Stable-Diffusion-ControlNet-WebUI"", ""ysharma/ControlNet_Image_Comparison"", ""Thaweewat/ControlNet-Architecture"", ""shellypeng/Anime-Pack"", ""bewizz/SD3_Batch_Imagine"", ""Freak-ppa/obj_rem_inpaint_outpaint"", ""addsw11/obj_rem_inpaint_outpaint2"", ""briaai/BRIA-2.3-ControlNet-Pose"", ""svjack/ControlNet-Canny-Chinese-df"", ""rzzgate/Stable-Diffusion-ControlNet-WebUI"", ""JFoz/CoherentControl"", ""ysharma/visual_chatgpt_dummy"", ""AIFILMS/ControlNet-Video"", ""SUPERSHANKY/ControlNet_Colab"", ""kirch/Text2Video-Zero"", ""Alfasign/visual_chatgpt"", ""Yabo/ControlVideo"", ""ikechan8370/cp-extra"", ""brunvelop/ComfyUI"", ""SD-online/Fooocus-Docker"", ""parsee-mizuhashi/mangaka"", ""jcudit/InstantID2"", ""Etrwy/universal_space_test"", ""nftnik/Redux"", ""pandaphd/generative_photography"", ""ccarr0807/HuggingGPT""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-02-25 05:57:36+00:00"", ""cardData"": ""license: openrail"", ""transformersInfo"": null, ""_id"": ""63e3ef298de575a15a63c2b1"", ""modelId"": ""lllyasviel/ControlNet"", ""usedStorage"": 47039764846}",0,,0,,0,,0,,0,"AI4Editing/MagicQuill, InstantX/InstantID, RamAnanth1/ControlNet, Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro, broyang/anime-ai, feishen29/IMAGDressing-v1, fffiloni/ControlNet-Video, fotographerai/Zen-Style-Shape, ginipick/StyleGen, huggingface/InferenceSupport/discussions/new?title=lllyasviel/ControlNet&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blllyasviel%2FControlNet%5D(%2Flllyasviel%2FControlNet)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, hysts/ControlNet, hysts/ControlNet-with-other-models, modelscope/AnyText, multimodalart/flux-style-shaping, ozgurkara/RAVE, radames/LayerDiffuse-gradio-unofficial",16
|
DeepCoder-14B-Preview_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
ADDED
|
@@ -0,0 +1,551 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
agentica-org/DeepCoder-14B-Preview,"---
|
| 3 |
+
license: mit
|
| 4 |
+
library_name: transformers
|
| 5 |
+
datasets:
|
| 6 |
+
- PrimeIntellect/verifiable-coding-problems
|
| 7 |
+
- likaixin/TACO-verified
|
| 8 |
+
- livecodebench/code_generation_lite
|
| 9 |
+
language:
|
| 10 |
+
- en
|
| 11 |
+
base_model:
|
| 12 |
+
- deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
|
| 13 |
+
pipeline_tag: text-generation
|
| 14 |
+
---
|
| 15 |
+
|
| 16 |
+
<div align=""center"">
|
| 17 |
+
<span style=""font-family: default; font-size: 1.5em;"">DeepCoder-14B-Preview</span>
|
| 18 |
+
<div>
|
| 19 |
+
🚀 Democratizing Reinforcement Learning for LLMs (RLLM) 🌟
|
| 20 |
+
</div>
|
| 21 |
+
</div>
|
| 22 |
+
<br>
|
| 23 |
+
<div align=""center"" style=""line-height: 1;"">
|
| 24 |
+
<a href=""https://github.com/agentica-project/rllm"" style=""margin: 2px;"">
|
| 25 |
+
<img alt=""Code"" src=""https://img.shields.io/badge/RLLM-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 26 |
+
</a>
|
| 27 |
+
<a href=""https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51"" target=""_blank"" style=""margin: 2px;"">
|
| 28 |
+
<img alt=""Blog"" src=""https://img.shields.io/badge/Notion-%23000000.svg?style=for-the-badge&logo=notion&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 29 |
+
</a>
|
| 30 |
+
<a href=""https://x.com/Agentica_"" style=""margin: 2px;"">
|
| 31 |
+
<img alt=""X.ai"" src=""https://img.shields.io/badge/Agentica-white?style=for-the-badge&logo=X&logoColor=000&color=000&labelColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 32 |
+
</a>
|
| 33 |
+
<a href=""https://huggingface.co/agentica-org"" style=""margin: 2px;"">
|
| 34 |
+
<img alt=""Hugging Face"" src=""https://img.shields.io/badge/Agentica-fcd022?style=for-the-badge&logo=huggingface&logoColor=000&labelColor"" style=""display: inline-block; vertical-align: middle;""/>
|
| 35 |
+
</a>
|
| 36 |
+
<a href=""https://www.together.ai"" style=""margin: 2px;"">
|
| 37 |
+
<img alt=""Together AI"" src=""https://img.shields.io/badge/-Together_AI%20-white?style=for-the-badge&logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAAUAAAAFACAMAAAD6TlWYAAAC7lBMVEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADIBDt6AAAA%2BnRSTlMAAiQEKgcdKQwiHBMUzrtSUEmjhmZGH96yv8n1ey7nL3y1U%2FZfCaIo1WFg1NrcsHYrA2%2Fv80J%2BMeilnpefqKw%2B64%2BQlSbYZGVnBGkCV%2BxW8XJube6WJ9kZF9bSzBALRynPQfLhIjvwyBEAXOTLp3o%2FJA9Y9%2F7%2F9FEKDhIVFo4GHkVzjGz8icrHzY39iHR1i0M8Jj14LLZUvb7DxMXGoQEFeQcgSBOHaPvm4uOdRLMMqcDTLbcII0sNuVn4TKaRd6RKIeDd37Svra6xuLpaW17lXUAlHh8WGxUPIS4JGQoFECMsBg4gFwsRJRIrCC0oAycaFC8NMDIzMRgBsVt9rwAAD25JREFUeNrs3QVzG0kWB%2FA3ikHhZeYwk3LMbF7GcBasOGw9hb3MzLyKw8zMzMx2rsokhySNY2mmR1N4xXV3a7sHuzWu%2BX2Ef3XPG%2Br3wOVyuVwul8vlcrlcLpfL5XK5dOlXOHTIvLnb27Xd%2FasBvrt9A%2B7r1bbdTTffcmuXwhzgTYwk6q%2BHr2RWlcclRYqXV2VeCV%2Bvr4mIkCJKZ83uc9NLC0fMD%2BD%2FCswfMfLtzh%2FeelsJcKJW19SG66KSTP6fLEXrwrU11Srw5Z8zbuzePcUBbFyg%2BPY7Pv%2Bs0A%2Bsid7ayiqFNEWp8iS9Ir%2F0Cl957bkRAaQLFLz15sBBfpbpJc7FJKKFFGuV4JJh6N573g6idr7vP%2F8iC9iI1NZJRDupLnlRBbaW3XjTfQHUJ3D8d68MBtsJiTNRold5uEYAdibkHgqiESMefGi9zfFVeCRihOS5LLJafV99XYxGddgwabKt8SmEyEQ%2FmRDlSoUA9gsNvKMDmhE8MC4L7OFtSYmPFmFlAmzm%2F9tfH0Oz8v6yFmxQ3SpOiY8eYTwjHew0%2BB9%2FD6B5ga4dLd%2FHQus0SnzaIrzWWgDb9P19MVqjw01dwFLpYYVYQymLgD1Kjj6J1umaHwLLqJfpy0%2FHIryqgg2mvetDKxXMnQMWEa9LxEpSqxZguS%2B%2BfA%2Bt9cZBi7ZxeqVMX376FqEnAtbyv7ISrTfspB%2FM82bq3r70BNMSYKV%2Bo4rQDiPzc8Csy1Fih%2BhVsE7o0cfQHnn%2FygJz6uNEJtaTSfy8ChYpnelDuxQ8HAIT1LOS8fwoCSq1FiVYcs%2FdaJ%2FgNhMJqrWKqfwoCSYtSTA08260U%2FBh47v4LDU%2F%2FgnmPOJDexX86ycwpp6yf80neB7M8o96DO2Wl2%2Bw%2FlLrh%2FlKYroW31qE9ht5EgzwRs3nR00wmgBTVq1EFtp2Ad0imdbkR0kwLQImTP8S2eg9B3QSKwkbHhPPxSUzAsjGe3P1luLrMmGklQpGjfIhKwU6C8llibBJUCaS4UKy6klkp0cX0CE9zcr8KAlei4Ahy36PLHXuBJqpYcJSmQBG3LIJWerQETS7qhCWlHowoMvfka2Va0Gjaus3MGUTp4NuWY8ja3%2FuB9q0IqydBt1eeQxZ%2B9MfQRNvnLAWT%2BiuIEuRvT9MBg3UlkQmbMmkUgB9cjsge8EbQIMLCmFPuQy6DPoGeVi9HqgED5EJazL5VAQ9Nm5CHjq0B6oKhZCUX4LrNyAfSycDhVBJZMKeTK4IoN26IPJRsAQoEhLhQ7kAmoV%2Bjbwspt0LniF8yKRMBa1%2B%2BSvkZVFfaFIkSngpvwha%2FQL56QNNqiX8%2FBs0mnMX8vPtBGiCWEf4iYmgzey7kZ8Rw6EJXonwo9SANn9GnuZCE84RnlqBJm3aIk8vFUKjxBjhKbMFaDHQhzy9%2BAI06pJEeJIS%2FGuwBn1M1WD%2BdXjNauSrdwk0Qq0kfHlUoFs7Evnq9TI0orqK8BVN1%2FIcvAn56vAKNCKhEDruz8NjkbdXOV4CKZJA1W8M8vbjT9CwMOGtDKjmjEbefpgCDRLqCB33p7kvipC3kc83UkOihLdohF5DfMjbiBf43UZTSPQq8vobyNsbudCgyzLhTT4PNK8hpmoZPkv4awU0y5G%2F1%2Fj90WG%2BDK9ATNX7mDDh71OgWYn83RHi9yRMkQY0I5G%2FOydDA4RPCX9RoMlD%2Fu6a0mCAMcJfHGh8yN%2BwqdAAMZPwJwFNB%2BRv5TRoQIs0wp%2FiiAB7TG%2B2Abor0L0GmiO5VdicuHsfaE7UfRIxJ80Rz8Kdnfss7L6NoShz8vvAWsLfOUe8kZ7o5DfSm1Pgm8gnTv4msqoIzXC%2FyrUZjWa434XdPxOoRZjiHjTD%2FTcGNm9Cg9y%2Fs9z%2FAymi1e4fqqZ4VPcfaQZnlQYGkacXP3H6X%2FrT2qIZ7jkR%2BAvy9L5jTyq5Z%2BUolBpHnNYc5PDTmubrsHtemOeJ9aJmcWI9tAV5%2BQ29Z4Kc%2Bj0TYHOQVwl5pVl07YD1h9EMt28MHOHUueihZtK5CArvRB4OTWkuvbNgYjGyF5wEGlQ4oXsbrF%2BK7O2fDBoIPPoHegQndLAc14w6WELot8jaX5pVD1Xo8iSy1WM8nzbcFMZbcf%2BLcR%2Fp7qBZayf0kYZly5GlzpOd3Mmcfy%2F9rl1AhwjTXvoXwaATDKc55Dp6mgP%2FeSLvZ4E%2B55wwTwSmr0Y2Djp6og3%2FmUrDhqbuTKWLYMqQ42i%2FkcNTdqpXeQ2Y4z82AO2Wl8txrpz5AkLRr38Q7TUiOydlJxueBfNCYzugnYKvOn62JkXpA3YmGPy8xPnTXanzhYP27d8PSvjPFzafH0Wov12VJC87ZSdcS2dVsEy%2FE8fRDgtznTFj3Tz%2FrT3QesOGO2bKv3mrVr%2BH1nrjjqFgiUilTGRr8%2FNEwHLTZ%2FisLR9vzgGLiOckYiWpVQuwQcmonmidZ3JDYBn1chohslXL79pVFWzh%2F2L5JrRG8fahYKlIWCHWUMoiYJtl%2F3wygOYFunabDBYTWmtdhJTlVy%2BAjfxPPP4YmpW3dTzYID0jTo%2BQEl88Ix1sFlqytAOacfe%2Bk1lgD29LxXiEMiFKZUIF%2By3L%2F6YYjSpu134w2EaouEKPsNH4rlwWgI0JEzcE0Qjfl19NAVsJFR6JGCF5LovAzrId2%2B8LoD6BBT8OGQy2E2rCUaJXebhGALZC9z%2FwUhC18%2F0wc1UWsBFJ1klEOymWvKgCe%2F7CW999xxdAusCI0R99PMgP7IiJczFJY3qtEiLw8tOckw88uKs40FR4xXuWzvzjVD%2BwJnqTlVUKaYpS5Ul6ReCsdOeOmVveKgq%2Bh%2F%2FvveCiu7Zvmz2rFDhRq2tqw7GoJJP%2FJ0vRWFmyplqF1NBv0KmTJz7fumX1d889%2B8yTzzz73Ldfbtm6bdS48RNygDcx3Xu1NqPMUxdLS7uWlhar85RlJK9600VIOf6c0mWDpj391NNtBg0uyfFDSlEF8T%2Ft3eFyqjwTwPGNiKq9eq%2BtqiCeoxZVEcRW4mK%2Bvc%2F5%2Bk7bBSDZOJPfFfwHWkEMG%2B%2BfXChwHMdxHMdxHMdxHMdxHMdxHIeV4yiR%2FyOUS6tHfBxP88Vse74N%2F7mdt7PF%2FHT8EFakbYg0XupvMZ%2Fddt%2F%2Ber27zebFX%2BXSfpQfD%2BMLsX7iMp4fc460%2BfgiqbSD1jSCGH1WXAV1v32OhOm0O1Yh9aUR0sNUYnVyekjBEH9eL%2B2mIY2gilmGdWXvhTKQNnpvkDYrBJgjNluJTchtIDSnBY3TNgLMUEGvbL4Qvhco3WkPbOS%2FNAEGjMay1bsEMjyCJsewXVo5HoFuH5P2b7OsJh9a0har1mn3tmkElXTzPlU%2FUd2nDfnTKH53b%2FTN%2FI7TZp2l7X3QZNPlO6X9jb1pJwUa5J8SuyQ%2Fc2vTFjl0zu%2F8vfrH2O8obdx52jaFjmmZ7HAdQQeOVw1pwxF0StNskd0GWtvsUIfsBB3SNt3m%2FgUtva1402jEfCXm%2BUBLjWkHBZ2gJ3zxHcG51JhWdnQENc%2BYk3O2vz%2F6CEJrBqYcyi9o6E172hJaMjJn876BRjYG0k7QiqFJr7tRo7SdgbSsgBaMzRoe%2BlCbfzWTlkILxqZdj%2FPaaWM0Y%2BtBUwbnrT8%2BoaZPY2kLBc2Ynfi%2FgVo2BtNO0JDRPSf6PtTgm0y7pNCI2KNJewWVqZnZNAH1md93J4HKEsNpb1Abw85P%2FQ%2Bo6GNoOs2H%2BgZo2gQqWqBpA6iNY%2Fe7EVRyXNm%2FMR%2FP%2FotjBRWokCFtK6AOrh1AA6ggkBxpG6hFnImzzLUFKNv2uOec5Q9Qw3kO7N%2BgmT7LjB81asuU1hNQXSyRhyyAULClxVDdHh%2FI4YEzIMzY0vZQWZQhlyyFX6V8aasIqnoinwP86oB8nlBRfkM%2Btxx%2BIaZWpNGf03zkCH4xYk0r7PiuTljALz6R0wQqya%2FI6ZrTHy78acS%2FCSd5hB8dmdNGdlyDCQfiGmz7dVhtkddWWZvWU0D72CGv3Qf84O%2BFP40Wl8irLOAHBXtaDLQDoq0fgnPk9gTaHrnt4Qcz5Bba8T2OcBPwLUGnWXAnmGbILfP5Lm%2BELLX3WSp9v3q0IC0GytcDuT1O8K2TBWlLq58kEJfhOfJbACVEfhN7z20IlDPy2xM3WIymQBkiv57i%2ByZM6ANlh%2FymAr6hpshvB5QVoqW3q%2BKK%2FO5AkchvmMM38iHyk0ApkV%2Ffg294feRXugPoDiCr0n0GtiPdVbid%2BwvfB4op8svcN5F2%2Bu67cDvTV34aM0F%2B4Ss%2FDzzYcW4JSwse%2Byav%2FETa4t9ERhakBS%2F9q5wFaRH%2F6kDaNbf3d2EPXuAyvLd30UQItCdyO9i7bOf5EquzYnvTgpdeH8iflvlAUz3kZf8KVcs%2FBJ%2F2rl1cQxWFvUvhR8xpBVThDfnvAu28SR16UMkEOS3sfdQxgGri0tp%2Fk0Lac39l6T%2FKLbd2AfLVg4rW9t7rPy24BtOiFXJZRda%2BTL%2F6A1Wp0N7BBHu2tFBBZUGJPGRs7QPfMrB9cBExnIV7pM1ZQA0nrvFA9qYlUEc%2B5R9QZddYrymdxn%2Bey5O9g%2BUSqEf0rB3SJ7YMaT0BNRUMEywLa9NkDHWpdzRtYO9413cFtaUXw6NyL76VA4abj%2BL%2BMjys%2BcvaEdePJTQhxmhSKGqkhWjSWEAj0cXagfWpybRdBA0lpbktExJrN5oo36ApNUFTJqpm2gJNGShozOuhGT3P2rSzBy1EfSMbF%2FVTqC01lBZBK%2FHK2q2zisxA2iqGlhKpf%2FO2pGHaXXuafOPfGZKMLJeMO0MSaXNoTz1LvRtYPhXftqlE2lpBB9SayOQ6fgDqqTXtk07jzKSPH00dpL60tbJ9h%2Bb2%2BzODWt7tSKM34tZhlUBrSaYn7Q06Ffc1bKXfj6EDhQ1ptOhcP5OI7EXQibTXedo5gs55gxK7VE68ztImstu0gQcaqGSH%2BOjqHF8S1WXapcO03ZsCPaLxA7tRhhF0Kg1L7MZjHIE24os%2B05X%2B%2FL6ErWm7pQCd0ndJdxKN93cfNPDf763T5CwFzVTcK%2BnOXxrLXqE0pRXbtmmxAv3EaUp3%2Ftg4PQlL0x7TRIAZeXIusYnyfMo1p50apyU5mCOCcIV1rcJA2J9mivqzvpZYXXldR8pQWlQ77Y8CBnk8GFYLlcNBnJtNmwwlVlH%2Bl%2BYBG69Yn7Py98Ksty48lrQemXY2kEZRfvAMr5l84P97yOwaPgNfWZq2NpZG86JgPhlP%2B9ldlo9S3rP%2BdDyZB5FnRdqygzTHcRzHcRzHcRzHcRzHcZz%2FAbyvLkVmYcs9AAAAAElFTkSuQmCC&link=https%3A%2F%2Fwww.together.ai"" style=""display: inline-block; vertical-align: middle;""/>
|
| 38 |
+
</a>
|
| 39 |
+
</div>
|
| 40 |
+
</div>
|
| 41 |
+
</div>
|
| 42 |
+
|
| 43 |
+
## DeepCoder Overview
|
| 44 |
+
DeepCoder-14B-Preview is a code reasoning LLM fine-tuned from DeepSeek-R1-Distilled-Qwen-14B using distributed reinforcement learning (RL) to scale up to long context lengths. The model achieves 60.6% Pass@1 accuracy on LiveCodeBench v5 (8/1/24-2/1/25), representing a 8% improvement over the base model (53%) and achieving similar performance to OpenAI's o3-mini with just 14B parameters.
|
| 45 |
+
|
| 46 |
+
<div style=""margin: 0 auto;"">
|
| 47 |
+
<img src=""https://cdn-uploads.huggingface.co/production/uploads/654037be97949fd2304aab7f/r3-vzkItOCrMf1qldW0Mj.png"" style=""width: 100%;"" />
|
| 48 |
+
</div>
|
| 49 |
+
|
| 50 |
+
## Data
|
| 51 |
+
Our training dataset consists of approximately 24K unique problem-tests pairs compiled from:
|
| 52 |
+
- Taco-Verified
|
| 53 |
+
- PrimeIntellect SYNTHETIC-1
|
| 54 |
+
- LiveCodeBench v5 (5/1/23-7/31/24)
|
| 55 |
+
|
| 56 |
+
## Training Recipe
|
| 57 |
+
|
| 58 |
+
Our training recipe relies on an improved version of GRPO (GRPO+) and iterative context lengthening, introduced in DeepScaleR.
|
| 59 |
+
|
| 60 |
+
### GRPO+
|
| 61 |
+
|
| 62 |
+
We enhance the original GRPO algorithm with insights from DAPO to enable more stable training:
|
| 63 |
+
|
| 64 |
+
- **Offline Difficulty Filtering:** DAPO employs online dynamic sampling, discarding both entirely correct and entirely incorrect samples on the fly. While this helps maintain a more stable effective batch size, it introduces significant runtime overhead due to rejection sampling. Instead, we perform offline difficulty filtering on a subset of coding problems to ensure the training dataset remains within a suitable difficulty range.
|
| 65 |
+
- **No Entropy Loss:** We observed that including an entropy loss term often led to instability, with entropy growing exponentially and ultimately collapsing training. To mitigate this, we eliminate the entropy loss entirely.
|
| 66 |
+
- **No KL Loss:** Eliminating KL loss prevents the LLM from staying within trust region of the original SFT model. This removal also obviates the need to compute log probabilities for the reference policy, thereby accelerating training.
|
| 67 |
+
- **Overlong Filtering** **(from DAPO):** To preserve long-context reasoning, we mask the loss for truncated sequences. This technique enables DeepCoder to generalize to 64K-context inference despite being trained with a 32K context.
|
| 68 |
+
- **Clip High (from DAPO):** By increasing the upper bound in GRPO/PPO’s surrogate loss, we encourage more exploration and more stable entropy.
|
| 69 |
+
|
| 70 |
+
### Iterative Context Lengthening
|
| 71 |
+
|
| 72 |
+
Our original `Deepscaler-1.5B-Preview` scaled long context training from 8K→16K→24K, achieving 33→38→43% on AIME respectively. Similarly, `Deepcoder-14B-Preview` is trained on 16K→32K, achieving 54→58% on LiveCodeBench (v5). `DeepCoder-14B-Preview` successfully generalizes to longer contexts when evaluated at 64K context, reaching 60.6%.
|
| 73 |
+
|
| 74 |
+
DeepCoder generalizes better to long contexts than the base distilled model, due to DAPO's overlong filtering. However, it's longer responses are often truncated when the max length is capped at 16K, which can lower its scores.
|
| 75 |
+
|
| 76 |
+
| **Model** | **16K** | **32K** | **64K** |
|
| 77 |
+
| --- | --- | --- | --- |
|
| 78 |
+
| **DeepCoder-14B-Preview** | 45.6 | 57.9 | 60.6 |
|
| 79 |
+
| **DeepSeek-R1-Distill-Qwen-14B** | 50.2 | 53.0 | 53.0 |
|
| 80 |
+
|
| 81 |
+
A more detailed description of the training recipe can be found in our [blog post](https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51).
|
| 82 |
+
|
| 83 |
+
## Evaluation
|
| 84 |
+
|
| 85 |
+
We evaluate `Deepcoder-14B-Preview` on various coding benchmarks, including LiveCodeBench (LCBv5), Codeforces, and HumanEval+.
|
| 86 |
+
|
| 87 |
+
| **Model** | LCB (v5)(8/1/24-2/1/25) | Codeforces Rating | Codeforces Percentile | HumanEval+ |
|
| 88 |
+
| --- | --- | --- | --- | --- |
|
| 89 |
+
| **DeepCoder-14B-Preview (ours)** | ***60.6*** | ***1936*** | ***95.3*** | ***92.6*** |
|
| 90 |
+
| **DeepSeek-R1-Distill-Qwen-14B** | 53.0 | 1791 | 92.7 | 92.0 |
|
| 91 |
+
| **O1-2024-12-17 (Low)** | 59.5 | **1991** | **96.1** | 90.8 |
|
| 92 |
+
| **O3-Mini-2025-1-31 (Low)** | **60.9** | 1918 | 94.9 | 92.6 |
|
| 93 |
+
| **O1-Preview** | 42.7 | 1658 | 88.5 | 89 |
|
| 94 |
+
| **Deepseek-R1** | 62.8 | 1948 | 95.4 | 92.6 |
|
| 95 |
+
| **Llama-4-Behemoth** | 49.4 | - | - | - |
|
| 96 |
+
|
| 97 |
+
## Serving DeepCoder
|
| 98 |
+
Our model can be served using popular high-performance inference systems:
|
| 99 |
+
- vLLM
|
| 100 |
+
- Hugging Face Text Generation Inference (TGI)
|
| 101 |
+
- SGLang
|
| 102 |
+
- TensorRT-LLM
|
| 103 |
+
|
| 104 |
+
All these systems support the OpenAI Chat Completions API format.
|
| 105 |
+
|
| 106 |
+
### Usage Recommendations
|
| 107 |
+
Our usage recommendations are similar to those of R1 and R1 Distill series:
|
| 108 |
+
1. Avoid adding a system prompt; all instructions should be contained within the user prompt.
|
| 109 |
+
2. `temperature = 0.6`
|
| 110 |
+
3. `top_p = 0.95`
|
| 111 |
+
4. This model performs best with `max_tokens` set to at least `64000`
|
| 112 |
+
|
| 113 |
+
## License
|
| 114 |
+
This project is released under the MIT License, reflecting our commitment to open and accessible AI development.
|
| 115 |
+
We believe in democratizing AI technology by making our work freely available for anyone to use, modify, and build upon.
|
| 116 |
+
This permissive license ensures that researchers, developers, and enthusiasts worldwide can leverage and extend our work without restrictions, fostering innovation and collaboration in the AI community.
|
| 117 |
+
|
| 118 |
+
## Acknowledgement
|
| 119 |
+
- Our training experiments are powered by our heavily modified fork of [Verl](https://github.com/agentica-project/verl), an open-source post-training library.
|
| 120 |
+
- Our model is trained on top of [`DeepSeek-R1-Distill-Qwen-14B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B).
|
| 121 |
+
- Our work is done as part of [Berkeley Sky Computing Lab](https://skycomputing.berkeley.edu/) and [Berkeley AI Research](https://bair.berkeley.edu/).
|
| 122 |
+
|
| 123 |
+
## Citation
|
| 124 |
+
```bibtex
|
| 125 |
+
@misc{deepcoder2025,
|
| 126 |
+
title={DeepCoder: A Fully Open-Source 14B Coder at O3-mini Level},
|
| 127 |
+
author={Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, Ion Stoica},
|
| 128 |
+
howpublished={\url{https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51}},
|
| 129 |
+
note={Notion Blog},
|
| 130 |
+
year={2025}
|
| 131 |
+
}
|
| 132 |
+
```","{""id"": ""agentica-org/DeepCoder-14B-Preview"", ""author"": ""agentica-org"", ""sha"": ""b8d891051ba35b18545cc6ee14817a6f6b06b186"", ""last_modified"": ""2025-04-09 21:12:07+00:00"", ""created_at"": ""2025-04-07 10:39:13+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 39699, ""downloads_all_time"": null, ""likes"": 607, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""en"", ""dataset:PrimeIntellect/verifiable-coding-problems"", ""dataset:likaixin/TACO-verified"", ""dataset:livecodebench/code_generation_lite"", ""base_model:deepseek-ai/DeepSeek-R1-Distill-Qwen-14B"", ""base_model:finetune:deepseek-ai/DeepSeek-R1-Distill-Qwen-14B"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/DeepSeek-R1-Distill-Qwen-14B\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00012.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='together-ai-branding-lightOnDark.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""hadadrjt/ai"", ""IamOmer4148/Test"", ""sierrafr/test"", ""naxwinn/Aura-2""], ""safetensors"": {""parameters"": {""F32"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-09 21:12:07+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/DeepSeek-R1-Distill-Qwen-14B\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67f3ab51e59f071a85391aa2"", ""modelId"": ""agentica-org/DeepCoder-14B-Preview"", ""usedStorage"": 59091623642}",0,"https://huggingface.co/EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth, https://huggingface.co/EpistemeAI/SAI-DeepCoder-14B-Preview-v1.0, https://huggingface.co/mlx-community/DeepCoder-14B-Preview-bf16, https://huggingface.co/Gapeleon/DeepCoder-14B-Preview-int4-awq-ov, https://huggingface.co/secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5, https://huggingface.co/Apel-sin/deepcoder-14B-preview-exl2, https://huggingface.co/wasim845/dfgh, https://huggingface.co/rieon/DeepCoder-14B-Preview-Suger",8,,0,"https://huggingface.co/bartowski/agentica-org_DeepCoder-14B-Preview-GGUF, https://huggingface.co/mlx-community/DeepCoder-14B-Preview-4bit, https://huggingface.co/mlx-community/DeepCoder-14B-Preview-6bit, https://huggingface.co/mlx-community/DeepCoder-14B-Preview-8bit, https://huggingface.co/achitech/DeepCoder-14B-Preview-Q4_K_M-GGUF, https://huggingface.co/lmstudio-community/DeepCoder-14B-Preview-GGUF, https://huggingface.co/DevQuasar/agentica-org.DeepCoder-14B-Preview-GGUF, https://huggingface.co/Joumdane/DeepCoder-14B-Preview-GGUF, https://huggingface.co/miike-ai/deepcoder-14b-fp8, https://huggingface.co/cgus/DeepCoder-14B-Preview-exl2, https://huggingface.co/numen-tech/DeepCoder-14B-Preview-GPTQ-Int4, https://huggingface.co/achitech/DeepCoder-14B-Preview-Q6_K-GGUF, https://huggingface.co/justinmeans/DeepCoder-14B-Preview-mlx-8Bit, https://huggingface.co/achitech/DeepCoder-14B-Preview-Q8_0-GGUF, https://huggingface.co/achitech/DeepCoder-14B-Preview-Q3_K_M-GGUF, https://huggingface.co/mradermacher/DeepCoder-14B-Preview-GGUF, https://huggingface.co/justinmeans/DeepCoder-14B-Preview-mlx-2Bit, https://huggingface.co/justinmeans/DeepCoder-14B-Preview-mlx-4Bit, https://huggingface.co/okamototk/DeepCoder-14B-Preview-imatrix-GGUF, https://huggingface.co/noneUsername/DeepCoder-14B-Preview-W8A8, https://huggingface.co/WSDW/DeepCoder-14B-Preview-Q3_K_M-GGUF, https://huggingface.co/WSDW/DeepCoder-14B-Preview-Q2_K-GGUF, https://huggingface.co/BenevolenceMessiah/DeepCoder-14B-Preview-Q8_0-GGUF, https://huggingface.co/EpistemeAI/DeepCoder-14B-Preview-GGUF, https://huggingface.co/gercamjr/DeepCoder-14B-Preview-Q4_K_M-GGUF, https://huggingface.co/tensorblock/agentica-org_DeepCoder-14B-Preview-GGUF",26,"https://huggingface.co/YOYO-AI/YOYO-O1-14B, https://huggingface.co/mergekit-community/mergekit-sce-sudfgqi, https://huggingface.co/spacematt/Qwen2.5-Channel-Coder-14B-Instruct",3,"IamOmer4148/Test, hadadrjt/ai, huggingface/InferenceSupport/discussions/518, naxwinn/Aura-2, sierrafr/test",5
|
| 133 |
+
EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth,"---
|
| 134 |
+
base_model: agentica-org/DeepCoder-14B-Preview
|
| 135 |
+
tags:
|
| 136 |
+
- text-generation-inference
|
| 137 |
+
- transformers
|
| 138 |
+
- unsloth
|
| 139 |
+
- qwen2
|
| 140 |
+
- trl
|
| 141 |
+
license: mit
|
| 142 |
+
language:
|
| 143 |
+
- en
|
| 144 |
+
---
|
| 145 |
+
|
| 146 |
+
## please better model - [SIA DeepCoder 14B model](https://huggingface.co/EpistemeAI/SA-DeepCoder-14B-Preview-unsloth-v1.0)
|
| 147 |
+
|
| 148 |
+
## This model is supervised fine tuning with [gretelai's safety and alignment](https://huggingface.co/datasets/gretelai/gretel-safety-alignment-en-v1) with [Unsloth](https://github.com/unslothai/unsloth)
|
| 149 |
+
|
| 150 |
+
## Episteme alignment and safety technique
|
| 151 |
+
|
| 152 |
+
### To use think, add < think > to your prompt
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
## Model Card
|
| 156 |
+
|
| 157 |
+
<div align=""center"">
|
| 158 |
+
<span style=""font-family: default; font-size: 1.5em;"">DeepCoder-14B-Preview</span>
|
| 159 |
+
<div>
|
| 160 |
+
🚀 Democratizing Reinforcement Learning for LLMs (RLLM) 🌟
|
| 161 |
+
</div>
|
| 162 |
+
</div>
|
| 163 |
+
<br>
|
| 164 |
+
<div align=""center"" style=""line-height: 1;"">
|
| 165 |
+
<a href=""https://github.com/agentica-project/rllm"" style=""margin: 2px;"">
|
| 166 |
+
<img alt=""Code"" src=""https://img.shields.io/badge/RLLM-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 167 |
+
</a>
|
| 168 |
+
<a href=""https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51"" target=""_blank"" style=""margin: 2px;"">
|
| 169 |
+
<img alt=""Blog"" src=""https://img.shields.io/badge/Notion-%23000000.svg?style=for-the-badge&logo=notion&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 170 |
+
</a>
|
| 171 |
+
<a href=""https://x.com/Agentica_"" style=""margin: 2px;"">
|
| 172 |
+
<img alt=""X.ai"" src=""https://img.shields.io/badge/Agentica-white?style=for-the-badge&logo=X&logoColor=000&color=000&labelColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 173 |
+
</a>
|
| 174 |
+
<a href=""https://huggingface.co/agentica-org"" style=""margin: 2px;"">
|
| 175 |
+
<img alt=""Hugging Face"" src=""https://img.shields.io/badge/Agentica-fcd022?style=for-the-badge&logo=huggingface&logoColor=000&labelColor"" style=""display: inline-block; vertical-align: middle;""/>
|
| 176 |
+
</a>
|
| 177 |
+
<a href=""https://www.together.ai"" style=""margin: 2px;"">
|
| 178 |
+
<img alt=""Together AI"" src=""https://img.shields.io/badge/-Together_AI%20-white?style=for-the-badge&logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAAUAAAAFACAMAAAD6TlWYAAAC7lBMVEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADIBDt6AAAA%2BnRSTlMAAiQEKgcdKQwiHBMUzrtSUEmjhmZGH96yv8n1ey7nL3y1U%2FZfCaIo1WFg1NrcsHYrA2%2Fv80J%2BMeilnpefqKw%2B64%2BQlSbYZGVnBGkCV%2BxW8XJube6WJ9kZF9bSzBALRynPQfLhIjvwyBEAXOTLp3o%2FJA9Y9%2F7%2F9FEKDhIVFo4GHkVzjGz8icrHzY39iHR1i0M8Jj14LLZUvb7DxMXGoQEFeQcgSBOHaPvm4uOdRLMMqcDTLbcII0sNuVn4TKaRd6RKIeDd37Svra6xuLpaW17lXUAlHh8WGxUPIS4JGQoFECMsBg4gFwsRJRIrCC0oAycaFC8NMDIzMRgBsVt9rwAAD25JREFUeNrs3QVzG0kWB%2FA3ikHhZeYwk3LMbF7GcBasOGw9hb3MzLyKw8zMzMx2rsokhySNY2mmR1N4xXV3a7sHuzWu%2BX2Ef3XPG%2Br3wOVyuVwul8vlcrlcLpfL5XK5dOlXOHTIvLnb27Xd%2FasBvrt9A%2B7r1bbdTTffcmuXwhzgTYwk6q%2BHr2RWlcclRYqXV2VeCV%2Bvr4mIkCJKZ83uc9NLC0fMD%2BD%2FCswfMfLtzh%2FeelsJcKJW19SG66KSTP6fLEXrwrU11Srw5Z8zbuzePcUBbFyg%2BPY7Pv%2Bs0A%2Bsid7ayiqFNEWp8iS9Ir%2F0Cl957bkRAaQLFLz15sBBfpbpJc7FJKKFFGuV4JJh6N573g6idr7vP%2F8iC9iI1NZJRDupLnlRBbaW3XjTfQHUJ3D8d68MBtsJiTNRold5uEYAdibkHgqiESMefGi9zfFVeCRihOS5LLJafV99XYxGddgwabKt8SmEyEQ%2FmRDlSoUA9gsNvKMDmhE8MC4L7OFtSYmPFmFlAmzm%2F9tfH0Oz8v6yFmxQ3SpOiY8eYTwjHew0%2BB9%2FD6B5ga4dLd%2FHQus0SnzaIrzWWgDb9P19MVqjw01dwFLpYYVYQymLgD1Kjj6J1umaHwLLqJfpy0%2FHIryqgg2mvetDKxXMnQMWEa9LxEpSqxZguS%2B%2BfA%2Bt9cZBi7ZxeqVMX376FqEnAtbyv7ISrTfspB%2FM82bq3r70BNMSYKV%2Bo4rQDiPzc8Csy1Fih%2BhVsE7o0cfQHnn%2FygJz6uNEJtaTSfy8ChYpnelDuxQ8HAIT1LOS8fwoCSq1FiVYcs%2FdaJ%2FgNhMJqrWKqfwoCSYtSTA08260U%2FBh47v4LDU%2F%2FgnmPOJDexX86ycwpp6yf80neB7M8o96DO2Wl2%2Bw%2FlLrh%2FlKYroW31qE9ht5EgzwRs3nR00wmgBTVq1EFtp2Ad0imdbkR0kwLQImTP8S2eg9B3QSKwkbHhPPxSUzAsjGe3P1luLrMmGklQpGjfIhKwU6C8llibBJUCaS4UKy6klkp0cX0CE9zcr8KAlei4Ahy36PLHXuBJqpYcJSmQBG3LIJWerQETS7qhCWlHowoMvfka2Va0Gjaus3MGUTp4NuWY8ja3%2FuB9q0IqydBt1eeQxZ%2B9MfQRNvnLAWT%2BiuIEuRvT9MBg3UlkQmbMmkUgB9cjsge8EbQIMLCmFPuQy6DPoGeVi9HqgED5EJazL5VAQ9Nm5CHjq0B6oKhZCUX4LrNyAfSycDhVBJZMKeTK4IoN26IPJRsAQoEhLhQ7kAmoV%2Bjbwspt0LniF8yKRMBa1%2B%2BSvkZVFfaFIkSngpvwha%2FQL56QNNqiX8%2FBs0mnMX8vPtBGiCWEf4iYmgzey7kZ8Rw6EJXonwo9SANn9GnuZCE84RnlqBJm3aIk8vFUKjxBjhKbMFaDHQhzy9%2BAI06pJEeJIS%2FGuwBn1M1WD%2BdXjNauSrdwk0Qq0kfHlUoFs7Evnq9TI0orqK8BVN1%2FIcvAn56vAKNCKhEDruz8NjkbdXOV4CKZJA1W8M8vbjT9CwMOGtDKjmjEbefpgCDRLqCB33p7kvipC3kc83UkOihLdohF5DfMjbiBf43UZTSPQq8vobyNsbudCgyzLhTT4PNK8hpmoZPkv4awU0y5G%2F1%2Fj90WG%2BDK9ATNX7mDDh71OgWYn83RHi9yRMkQY0I5G%2FOydDA4RPCX9RoMlD%2Fu6a0mCAMcJfHGh8yN%2BwqdAAMZPwJwFNB%2BRv5TRoQIs0wp%2FiiAB7TG%2B2Abor0L0GmiO5VdicuHsfaE7UfRIxJ80Rz8Kdnfss7L6NoShz8vvAWsLfOUe8kZ7o5DfSm1Pgm8gnTv4msqoIzXC%2FyrUZjWa434XdPxOoRZjiHjTD%2FTcGNm9Cg9y%2Fs9z%2FAymi1e4fqqZ4VPcfaQZnlQYGkacXP3H6X%2FrT2qIZ7jkR%2BAvy9L5jTyq5Z%2BUolBpHnNYc5PDTmubrsHtemOeJ9aJmcWI9tAV5%2BQ29Z4Kc%2Bj0TYHOQVwl5pVl07YD1h9EMt28MHOHUueihZtK5CArvRB4OTWkuvbNgYjGyF5wEGlQ4oXsbrF%2BK7O2fDBoIPPoHegQndLAc14w6WELot8jaX5pVD1Xo8iSy1WM8nzbcFMZbcf%2BLcR%2Fp7qBZayf0kYZly5GlzpOd3Mmcfy%2F9rl1AhwjTXvoXwaATDKc55Dp6mgP%2FeSLvZ4E%2B55wwTwSmr0Y2Djp6og3%2FmUrDhqbuTKWLYMqQ42i%2FkcNTdqpXeQ2Y4z82AO2Wl8txrpz5AkLRr38Q7TUiOydlJxueBfNCYzugnYKvOn62JkXpA3YmGPy8xPnTXanzhYP27d8PSvjPFzafH0Wov12VJC87ZSdcS2dVsEy%2FE8fRDgtznTFj3Tz%2FrT3QesOGO2bKv3mrVr%2BH1nrjjqFgiUilTGRr8%2FNEwHLTZ%2FisLR9vzgGLiOckYiWpVQuwQcmonmidZ3JDYBn1chohslXL79pVFWzh%2F2L5JrRG8fahYKlIWCHWUMoiYJtl%2F3wygOYFunabDBYTWmtdhJTlVy%2BAjfxPPP4YmpW3dTzYID0jTo%2BQEl88Ix1sFlqytAOacfe%2Bk1lgD29LxXiEMiFKZUIF%2By3L%2F6YYjSpu134w2EaouEKPsNH4rlwWgI0JEzcE0Qjfl19NAVsJFR6JGCF5LovAzrId2%2B8LoD6BBT8OGQy2E2rCUaJXebhGALZC9z%2FwUhC18%2F0wc1UWsBFJ1klEOymWvKgCe%2F7CW999xxdAusCI0R99PMgP7IiJczFJY3qtEiLw8tOckw88uKs40FR4xXuWzvzjVD%2BwJnqTlVUKaYpS5Ul6ReCsdOeOmVveKgq%2Bh%2F%2FvveCiu7Zvmz2rFDhRq2tqw7GoJJP%2FJ0vRWFmyplqF1NBv0KmTJz7fumX1d889%2B8yTzzz73Ldfbtm6bdS48RNygDcx3Xu1NqPMUxdLS7uWlhar85RlJK9600VIOf6c0mWDpj391NNtBg0uyfFDSlEF8T%2Ft3eFyqjwTwPGNiKq9eq%2BtqiCeoxZVEcRW4mK%2Bvc%2F5%2Bk7bBSDZOJPfFfwHWkEMG%2B%2BfXChwHMdxHMdxHMdxHMdxHMdxHIeV4yiR%2FyOUS6tHfBxP88Vse74N%2F7mdt7PF%2FHT8EFakbYg0XupvMZ%2Fddt%2F%2Ber27zebFX%2BXSfpQfD%2BMLsX7iMp4fc460%2BfgiqbSD1jSCGH1WXAV1v32OhOm0O1Yh9aUR0sNUYnVyekjBEH9eL%2B2mIY2gilmGdWXvhTKQNnpvkDYrBJgjNluJTchtIDSnBY3TNgLMUEGvbL4Qvhco3WkPbOS%2FNAEGjMay1bsEMjyCJsewXVo5HoFuH5P2b7OsJh9a0har1mn3tmkElXTzPlU%2FUd2nDfnTKH53b%2FTN%2FI7TZp2l7X3QZNPlO6X9jb1pJwUa5J8SuyQ%2Fc2vTFjl0zu%2F8vfrH2O8obdx52jaFjmmZ7HAdQQeOVw1pwxF0StNskd0GWtvsUIfsBB3SNt3m%2FgUtva1402jEfCXm%2BUBLjWkHBZ2gJ3zxHcG51JhWdnQENc%2BYk3O2vz%2F6CEJrBqYcyi9o6E172hJaMjJn876BRjYG0k7QiqFJr7tRo7SdgbSsgBaMzRoe%2BlCbfzWTlkILxqZdj%2FPaaWM0Y%2BtBUwbnrT8%2BoaZPY2kLBc2Ynfi%2FgVo2BtNO0JDRPSf6PtTgm0y7pNCI2KNJewWVqZnZNAH1md93J4HKEsNpb1Abw85P%2FQ%2Bo6GNoOs2H%2BgZo2gQqWqBpA6iNY%2Fe7EVRyXNm%2FMR%2FP%2FotjBRWokCFtK6AOrh1AA6ggkBxpG6hFnImzzLUFKNv2uOec5Q9Qw3kO7N%2BgmT7LjB81asuU1hNQXSyRhyyAULClxVDdHh%2FI4YEzIMzY0vZQWZQhlyyFX6V8aasIqnoinwP86oB8nlBRfkM%2Btxx%2BIaZWpNGf03zkCH4xYk0r7PiuTljALz6R0wQqya%2FI6ZrTHy78acS%2FCSd5hB8dmdNGdlyDCQfiGmz7dVhtkddWWZvWU0D72CGv3Qf84O%2BFP40Wl8irLOAHBXtaDLQDoq0fgnPk9gTaHrnt4Qcz5Bba8T2OcBPwLUGnWXAnmGbILfP5Lm%2BELLX3WSp9v3q0IC0GytcDuT1O8K2TBWlLq58kEJfhOfJbACVEfhN7z20IlDPy2xM3WIymQBkiv57i%2ByZM6ANlh%2FymAr6hpshvB5QVoqW3q%2BKK%2FO5AkchvmMM38iHyk0ApkV%2Ffg294feRXugPoDiCr0n0GtiPdVbid%2BwvfB4op8svcN5F2%2Bu67cDvTV34aM0F%2B4Ss%2FDzzYcW4JSwse%2Byav%2FETa4t9ERhakBS%2F9q5wFaRH%2F6kDaNbf3d2EPXuAyvLd30UQItCdyO9i7bOf5EquzYnvTgpdeH8iflvlAUz3kZf8KVcs%2FBJ%2F2rl1cQxWFvUvhR8xpBVThDfnvAu28SR16UMkEOS3sfdQxgGri0tp%2Fk0Lac39l6T%2FKLbd2AfLVg4rW9t7rPy24BtOiFXJZRda%2BTL%2F6A1Wp0N7BBHu2tFBBZUGJPGRs7QPfMrB9cBExnIV7pM1ZQA0nrvFA9qYlUEc%2B5R9QZddYrymdxn%2Bey5O9g%2BUSqEf0rB3SJ7YMaT0BNRUMEywLa9NkDHWpdzRtYO9413cFtaUXw6NyL76VA4abj%2BL%2BMjys%2BcvaEdePJTQhxmhSKGqkhWjSWEAj0cXagfWpybRdBA0lpbktExJrN5oo36ApNUFTJqpm2gJNGShozOuhGT3P2rSzBy1EfSMbF%2FVTqC01lBZBK%2FHK2q2zisxA2iqGlhKpf%2FO2pGHaXXuafOPfGZKMLJeMO0MSaXNoTz1LvRtYPhXftqlE2lpBB9SayOQ6fgDqqTXtk07jzKSPH00dpL60tbJ9h%2Bb2%2BzODWt7tSKM34tZhlUBrSaYn7Q06Ffc1bKXfj6EDhQ1ptOhcP5OI7EXQibTXedo5gs55gxK7VE68ztImstu0gQcaqGSH%2BOjqHF8S1WXapcO03ZsCPaLxA7tRhhF0Kg1L7MZjHIE24os%2B05X%2B%2FL6ErWm7pQCd0ndJdxKN93cfNPDf763T5CwFzVTcK%2BnOXxrLXqE0pRXbtmmxAv3EaUp3%2Ftg4PQlL0x7TRIAZeXIusYnyfMo1p50apyU5mCOCcIV1rcJA2J9mivqzvpZYXXldR8pQWlQ77Y8CBnk8GFYLlcNBnJtNmwwlVlH%2Bl%2BYBG69Yn7Py98Ksty48lrQemXY2kEZRfvAMr5l84P97yOwaPgNfWZq2NpZG86JgPhlP%2B9ldlo9S3rP%2BdDyZB5FnRdqygzTHcRzHcRzHcRzHcRzHcZz%2FAbyvLkVmYcs9AAAAAElFTkSuQmCC&link=https%3A%2F%2Fwww.together.ai"" style=""display: inline-block; vertical-align: middle;""/>
|
| 179 |
+
</a>
|
| 180 |
+
</div>
|
| 181 |
+
</div>
|
| 182 |
+
</div>
|
| 183 |
+
|
| 184 |
+
## DeepCoder Overview
|
| 185 |
+
DeepCoder-14B-Preview is a code reasoning LLM fine-tuned from DeepSeek-R1-Distilled-Qwen-14B using distributed reinforcement learning (RL) to scale up to long context lengths. The model achieves 60.6% Pass@1 accuracy on LiveCodeBench v5 (8/1/24-2/1/25), representing a 8% improvement over the base model (53%) and achieving similar performance to OpenAI's o3-mini with just 14B parameters.
|
| 186 |
+
|
| 187 |
+
<div style=""margin: 0 auto;"">
|
| 188 |
+
<img src=""https://cdn-uploads.huggingface.co/production/uploads/654037be97949fd2304aab7f/r3-vzkItOCrMf1qldW0Mj.png"" style=""width: 100%;"" />
|
| 189 |
+
</div>
|
| 190 |
+
|
| 191 |
+
## Data
|
| 192 |
+
Our training dataset consists of approximately 24K unique problem-tests pairs compiled from:
|
| 193 |
+
- Taco-Verified
|
| 194 |
+
- PrimeIntellect SYNTHETIC-1
|
| 195 |
+
- LiveCodeBench v5 (5/1/23-7/31/24)
|
| 196 |
+
|
| 197 |
+
## Training Recipe
|
| 198 |
+
|
| 199 |
+
Our training recipe relies on an improved version of GRPO (GRPO+) and iterative context lengthening, introduced in DeepScaleR.
|
| 200 |
+
|
| 201 |
+
### GRPO+
|
| 202 |
+
|
| 203 |
+
We enhance the original GRPO algorithm with insights from DAPO to enable more stable training:
|
| 204 |
+
|
| 205 |
+
- **Offline Difficulty Filtering:** DAPO employs online dynamic sampling, discarding both entirely correct and entirely incorrect samples on the fly. While this helps maintain a more stable effective batch size, it introduces significant runtime overhead due to rejection sampling. Instead, we perform offline difficulty filtering on a subset of coding problems to ensure the training dataset remains within a suitable difficulty range.
|
| 206 |
+
- **No Entropy Loss:** We observed that including an entropy loss term often led to instability, with entropy growing exponentially and ultimately collapsing training. To mitigate this, we eliminate the entropy loss entirely.
|
| 207 |
+
- **No KL Loss:** Eliminating KL loss prevents the LLM from staying within trust region of the original SFT model. This removal also obviates the need to compute log probabilities for the reference policy, thereby accelerating training.
|
| 208 |
+
- **Overlong Filtering** **(from DAPO):** To preserve long-context reasoning, we mask the loss for truncated sequences. This technique enables DeepCoder to generalize to 64K-context inference despite being trained with a 32K context.
|
| 209 |
+
- **Clip High (from DAPO):** By increasing the upper bound in GRPO/PPO’s surrogate loss, we encourage more exploration and more stable entropy.
|
| 210 |
+
|
| 211 |
+
### Iterative Context Lengthening
|
| 212 |
+
|
| 213 |
+
Our original `Deepscaler-1.5B-Preview` scaled long context training from 8K→16K→24K, achieving 33→38→43% on AIME respectively. Similarly, `Deepcoder-14B-Preview` is trained on 16K→32K, achieving 54→58% on LiveCodeBench (v5). `DeepCoder-14B-Preview` successfully generalizes to longer contexts when evaluated at 64K context, reaching 60.6%.
|
| 214 |
+
|
| 215 |
+
DeepCoder generalizes better to long contexts than the base distilled model, due to DAPO's overlong filtering. However, it's longer responses are often truncated when the max length is capped at 16K, which can lower its scores.
|
| 216 |
+
|
| 217 |
+
| **Model** | **16K** | **32K** | **64K** |
|
| 218 |
+
| --- | --- | --- | --- |
|
| 219 |
+
| **DeepCoder-14B-Preview** | 45.6 | 57.9 | 60.6 |
|
| 220 |
+
| **DeepSeek-R1-Distill-Qwen-14B** | 50.2 | 53.0 | 53.0 |
|
| 221 |
+
|
| 222 |
+
A more detailed description of the training recipe can be found in our [blog post](https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51).
|
| 223 |
+
|
| 224 |
+
## Evaluation
|
| 225 |
+
|
| 226 |
+
We evaluate `Deepcoder-14B-Preview` on various coding benchmarks, including LiveCodeBench (LCBv5), Codeforces, and HumanEval+.
|
| 227 |
+
|
| 228 |
+
| **Model** | LCB (v5)(8/1/24-2/1/25) | Codeforces Rating | Codeforces Percentile | HumanEval+ |
|
| 229 |
+
| --- | --- | --- | --- | --- |
|
| 230 |
+
| **DeepCoder-14B-Preview (ours)** | ***60.6*** | ***1936*** | ***95.3*** | ***92.6*** |
|
| 231 |
+
| **DeepSeek-R1-Distill-Qwen-14B** | 53.0 | 1791 | 92.7 | 92.0 |
|
| 232 |
+
| **O1-2024-12-17 (Low)** | 59.5 | **1991** | **96.1** | 90.8 |
|
| 233 |
+
| **O3-Mini-2025-1-31 (Low)** | **60.9** | 1918 | 94.9 | 92.6 |
|
| 234 |
+
| **O1-Preview** | 42.7 | 1658 | 88.5 | 89 |
|
| 235 |
+
| **Deepseek-R1** | 62.8 | 1948 | 95.4 | 92.6 |
|
| 236 |
+
| **Llama-4-Behemoth** | 49.4 | - | - | - |
|
| 237 |
+
|
| 238 |
+
## Serving DeepCoder
|
| 239 |
+
Our model can be served using popular high-performance inference systems:
|
| 240 |
+
- vLLM
|
| 241 |
+
- Hugging Face Text Generation Inference (TGI)
|
| 242 |
+
- SGLang
|
| 243 |
+
- TensorRT-LLM
|
| 244 |
+
|
| 245 |
+
All these systems support the OpenAI Chat Completions API format.
|
| 246 |
+
|
| 247 |
+
### Usage Recommendations
|
| 248 |
+
Our usage recommendations are similar to those of R1 and R1 Distill series:
|
| 249 |
+
1. Avoid adding a system prompt; all instructions should be contained within the user prompt.
|
| 250 |
+
2. `temperature = 0.6`
|
| 251 |
+
3. `top_p = 0.95`
|
| 252 |
+
4. This model performs best with `max_tokens` set to at least `64000`
|
| 253 |
+
|
| 254 |
+
## EpistemeAI Training script
|
| 255 |
+
[Fine tune DeepCoder with unsloth](https://colab.research.google.com/drive/1If_NwF2aNvQrG7lyCClhJIFVbdHhMN8c?usp=sharing)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
## License
|
| 259 |
+
This project is released under the MIT License, reflecting our commitment to open and accessible AI development.
|
| 260 |
+
We believe in democratizing AI technology by making our work freely available for anyone to use, modify, and build upon.
|
| 261 |
+
This permissive license ensures that researchers, developers, and enthusiasts worldwide can leverage and extend our work without restrictions, fostering innovation and collaboration in the AI community.
|
| 262 |
+
|
| 263 |
+
## Acknowledgement
|
| 264 |
+
- Our training experiments are powered by our heavily modified fork of [Verl](https://github.com/agentica-project/verl), an open-source post-training library.
|
| 265 |
+
- Our model is trained on top of [`DeepSeek-R1-Distill-Qwen-14B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B).
|
| 266 |
+
- Our work is done as part of [Berkeley Sky Computing Lab](https://skycomputing.berkeley.edu/) and [Berkeley AI Research](https://bair.berkeley.edu/).
|
| 267 |
+
|
| 268 |
+
## Citation
|
| 269 |
+
```bibtex
|
| 270 |
+
@misc{deepcoder2025,
|
| 271 |
+
title={DeepCoder: A Fully Open-Source 14B Coder at O3-mini Level},
|
| 272 |
+
author={Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, Ion Stoica},
|
| 273 |
+
howpublished={\url{https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51}},
|
| 274 |
+
note={Notion Blog},
|
| 275 |
+
year={2025}
|
| 276 |
+
}
|
| 277 |
+
```
|
| 278 |
+
# Uploaded model
|
| 279 |
+
|
| 280 |
+
- **Developed by:** EpistemeAI
|
| 281 |
+
- **License:** apache-2.0
|
| 282 |
+
- **Finetuned from model :** agentica-org/DeepCoder-14B-Preview
|
| 283 |
+
|
| 284 |
+
This qwen2 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
| 285 |
+
|
| 286 |
+
[<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
|
| 287 |
+
|
| 288 |
+
","{""id"": ""EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth"", ""author"": ""EpistemeAI"", ""sha"": ""5a7aeb6c1af19f92861f9a3505013ba176361618"", ""last_modified"": ""2025-04-15 14:50:09+00:00"", ""created_at"": ""2025-04-09 22:09:05+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 9, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""text-generation-inference"", ""unsloth"", ""trl"", ""conversational"", ""en"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: agentica-org/DeepCoder-14B-Preview\nlanguage:\n- en\nlicense: mit\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- qwen2\n- trl"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<|vision_pad|>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-15 14:50:09+00:00"", ""cardData"": ""base_model: agentica-org/DeepCoder-14B-Preview\nlanguage:\n- en\nlicense: mit\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- qwen2\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67f6f001f018b75ed18178f6"", ""modelId"": ""EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth"", ""usedStorage"": 29551556738}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=EpistemeAI/DeepCoder-14B-Preview-safety-alignment-unsloth&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BEpistemeAI%2FDeepCoder-14B-Preview-safety-alignment-unsloth%5D(%2FEpistemeAI%2FDeepCoder-14B-Preview-safety-alignment-unsloth)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 289 |
+
https://huggingface.co/EpistemeAI/SAI-DeepCoder-14B-Preview-v1.0,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 290 |
+
mlx-community/DeepCoder-14B-Preview-bf16,"---
|
| 291 |
+
license: mit
|
| 292 |
+
library_name: mlx
|
| 293 |
+
datasets:
|
| 294 |
+
- PrimeIntellect/verifiable-coding-problems
|
| 295 |
+
- likaixin/TACO-verified
|
| 296 |
+
- livecodebench/code_generation_lite
|
| 297 |
+
language:
|
| 298 |
+
- en
|
| 299 |
+
base_model: agentica-org/DeepCoder-14B-Preview
|
| 300 |
+
pipeline_tag: text-generation
|
| 301 |
+
tags:
|
| 302 |
+
- mlx
|
| 303 |
+
---
|
| 304 |
+
|
| 305 |
+
# mlx-community/DeepCoder-14B-Preview-bf16
|
| 306 |
+
|
| 307 |
+
This model [mlx-community/DeepCoder-14B-Preview-bf16](https://huggingface.co/mlx-community/DeepCoder-14B-Preview-bf16) was
|
| 308 |
+
converted to MLX format from [agentica-org/DeepCoder-14B-Preview](https://huggingface.co/agentica-org/DeepCoder-14B-Preview)
|
| 309 |
+
using mlx-lm version **0.22.3**.
|
| 310 |
+
|
| 311 |
+
## Use with mlx
|
| 312 |
+
|
| 313 |
+
```bash
|
| 314 |
+
pip install mlx-lm
|
| 315 |
+
```
|
| 316 |
+
|
| 317 |
+
```python
|
| 318 |
+
from mlx_lm import load, generate
|
| 319 |
+
|
| 320 |
+
model, tokenizer = load(""mlx-community/DeepCoder-14B-Preview-bf16"")
|
| 321 |
+
|
| 322 |
+
prompt = ""hello""
|
| 323 |
+
|
| 324 |
+
if tokenizer.chat_template is not None:
|
| 325 |
+
messages = [{""role"": ""user"", ""content"": prompt}]
|
| 326 |
+
prompt = tokenizer.apply_chat_template(
|
| 327 |
+
messages, add_generation_prompt=True
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
response = generate(model, tokenizer, prompt=prompt, verbose=True)
|
| 331 |
+
```
|
| 332 |
+
","{""id"": ""mlx-community/DeepCoder-14B-Preview-bf16"", ""author"": ""mlx-community"", ""sha"": ""b36e3085dca80389d5f40f81cbf1341b34864fe1"", ""last_modified"": ""2025-04-08 21:51:04+00:00"", ""created_at"": ""2025-04-08 20:49:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 159, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""mlx"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""mlx"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""en"", ""dataset:PrimeIntellect/verifiable-coding-problems"", ""dataset:likaixin/TACO-verified"", ""dataset:livecodebench/code_generation_lite"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: agentica-org/DeepCoder-14B-Preview\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: mlx\nlicense: mit\npipeline_tag: text-generation\ntags:\n- mlx"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-08 21:51:04+00:00"", ""cardData"": ""base_model: agentica-org/DeepCoder-14B-Preview\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: mlx\nlicense: mit\npipeline_tag: text-generation\ntags:\n- mlx"", ""transformersInfo"": null, ""_id"": ""67f58be237c0eed4d3219721"", ""modelId"": ""mlx-community/DeepCoder-14B-Preview-bf16"", ""usedStorage"": 29551556644}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mlx-community/DeepCoder-14B-Preview-bf16&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmlx-community%2FDeepCoder-14B-Preview-bf16%5D(%2Fmlx-community%2FDeepCoder-14B-Preview-bf16)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 333 |
+
Gapeleon/DeepCoder-14B-Preview-int4-awq-ov,"---
|
| 334 |
+
license: mit
|
| 335 |
+
base_model:
|
| 336 |
+
- agentica-org/DeepCoder-14B-Preview
|
| 337 |
+
---
|
| 338 |
+
|
| 339 |
+
# OpenVINO quant of [agentica-org/DeepCoder-14B-Preview](https://huggingface.co/agentica-org/DeepCoder-14B-Preview-int4-awq-ov)
|
| 340 |
+
|
| 341 |
+
- Requires 12GB of VRAM (eg. Intel Arc A770 / B580).
|
| 342 |
+
- Won't fit on 8GB A750
|
| 343 |
+
|
| 344 |
+
# Performance on an A770 with [OpenArc](https://github.com/SearchSavior/OpenArc)
|
| 345 |
+
|
| 346 |
+
```
|
| 347 |
+
=== Streaming Performance ===
|
| 348 |
+
Total generation time: 65.078 seconds
|
| 349 |
+
Prompt evaluation: 1376 tokens in 0.841 seconds (1636.58 T/s)
|
| 350 |
+
Response generation: 982 tokens in (15.09 T/s)
|
| 351 |
+
```","{""id"": ""Gapeleon/DeepCoder-14B-Preview-int4-awq-ov"", ""author"": ""Gapeleon"", ""sha"": ""d47bf0f554c171f5ac75ec9b4f3ed9dee4b6e860"", ""last_modified"": ""2025-04-23 01:55:29+00:00"", ""created_at"": ""2025-04-09 11:06:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""openvino"", ""qwen2"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_detokenizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_detokenizer.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_tokenizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_tokenizer.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-23 01:55:29+00:00"", ""cardData"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""67f654a18fd7052199edb79a"", ""modelId"": ""Gapeleon/DeepCoder-14B-Preview-int4-awq-ov"", ""usedStorage"": 16938244268}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Gapeleon/DeepCoder-14B-Preview-int4-awq-ov&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BGapeleon%2FDeepCoder-14B-Preview-int4-awq-ov%5D(%2FGapeleon%2FDeepCoder-14B-Preview-int4-awq-ov)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 352 |
+
secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5,"---
|
| 353 |
+
library_name: transformers
|
| 354 |
+
license: mit
|
| 355 |
+
base_model: agentica-org/DeepCoder-14B-Preview
|
| 356 |
+
tags:
|
| 357 |
+
- llama-factory
|
| 358 |
+
- full
|
| 359 |
+
- generated_from_trainer
|
| 360 |
+
model-index:
|
| 361 |
+
- name: DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5
|
| 362 |
+
results: []
|
| 363 |
+
---
|
| 364 |
+
|
| 365 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 366 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 367 |
+
|
| 368 |
+
# DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5
|
| 369 |
+
|
| 370 |
+
This model is a fine-tuned version of [agentica-org/DeepCoder-14B-Preview](https://huggingface.co/agentica-org/DeepCoder-14B-Preview) on the DS-Noisy, the DS-Clean, the QWQ-Noisy and the QWQ-Clean datasets.
|
| 371 |
+
|
| 372 |
+
## Model description
|
| 373 |
+
|
| 374 |
+
More information needed
|
| 375 |
+
|
| 376 |
+
## Intended uses & limitations
|
| 377 |
+
|
| 378 |
+
More information needed
|
| 379 |
+
|
| 380 |
+
## Training and evaluation data
|
| 381 |
+
|
| 382 |
+
More information needed
|
| 383 |
+
|
| 384 |
+
## Training procedure
|
| 385 |
+
|
| 386 |
+
### Training hyperparameters
|
| 387 |
+
|
| 388 |
+
The following hyperparameters were used during training:
|
| 389 |
+
- learning_rate: 1e-05
|
| 390 |
+
- train_batch_size: 1
|
| 391 |
+
- eval_batch_size: 8
|
| 392 |
+
- seed: 42
|
| 393 |
+
- distributed_type: multi-GPU
|
| 394 |
+
- num_devices: 4
|
| 395 |
+
- gradient_accumulation_steps: 12
|
| 396 |
+
- total_train_batch_size: 48
|
| 397 |
+
- total_eval_batch_size: 32
|
| 398 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 399 |
+
- lr_scheduler_type: cosine
|
| 400 |
+
- lr_scheduler_warmup_ratio: 0.1
|
| 401 |
+
- num_epochs: 1.0
|
| 402 |
+
|
| 403 |
+
### Training results
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
### Framework versions
|
| 408 |
+
|
| 409 |
+
- Transformers 4.50.0
|
| 410 |
+
- Pytorch 2.6.0+cu124
|
| 411 |
+
- Datasets 3.1.0
|
| 412 |
+
- Tokenizers 0.21.0
|
| 413 |
+
","{""id"": ""secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5"", ""author"": ""secmlr"", ""sha"": ""0faf77cef5efe84efc0ad7057d697f5ef5d1b5eb"", ""last_modified"": ""2025-04-14 04:07:02+00:00"", ""created_at"": ""2025-04-14 00:59:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""llama-factory"", ""full"", ""generated_from_trainer"", ""conversational"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: agentica-org/DeepCoder-14B-Preview\nlibrary_name: transformers\nlicense: mit\ntags:\n- llama-factory\n- full\n- generated_from_trainer\nmodel-index:\n- name: DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5"", ""results"": []}], ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_log.jsonl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_loss.png', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-14 04:07:02+00:00"", ""cardData"": ""base_model: agentica-org/DeepCoder-14B-Preview\nlibrary_name: transformers\nlicense: mit\ntags:\n- llama-factory\n- full\n- generated_from_trainer\nmodel-index:\n- name: DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67fc5dd79808e92cc1f4a913"", ""modelId"": ""secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5"", ""usedStorage"": 29551564853}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=secmlr/DS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsecmlr%2FDS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5%5D(%2Fsecmlr%2FDS-Noisy_DS-Clean_QWQ-Noisy_QWQ-Clean_DeepCoder-14B-Preview_full_sft_1e-5)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 414 |
+
Apel-sin/deepcoder-14B-preview-exl2,"---
|
| 415 |
+
license: mit
|
| 416 |
+
library_name: transformers
|
| 417 |
+
datasets:
|
| 418 |
+
- PrimeIntellect/verifiable-coding-problems
|
| 419 |
+
- likaixin/TACO-verified
|
| 420 |
+
- livecodebench/code_generation_lite
|
| 421 |
+
language:
|
| 422 |
+
- en
|
| 423 |
+
base_model:
|
| 424 |
+
- agentica-org/DeepCoder-14B-Preview
|
| 425 |
+
pipeline_tag: text-generation
|
| 426 |
+
---
|
| 427 |
+
|
| 428 |
+
<div align=""center"">
|
| 429 |
+
<span style=""font-family: default; font-size: 1.5em;"">DeepCoder-14B-Preview</span>
|
| 430 |
+
<div>
|
| 431 |
+
🚀 Democratizing Reinforcement Learning for LLMs (RLLM) 🌟
|
| 432 |
+
</div>
|
| 433 |
+
</div>
|
| 434 |
+
<br>
|
| 435 |
+
<div align=""center"" style=""line-height: 1;"">
|
| 436 |
+
<a href=""https://github.com/agentica-project/rllm"" style=""margin: 2px;"">
|
| 437 |
+
<img alt=""Code"" src=""https://img.shields.io/badge/RLLM-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 438 |
+
</a>
|
| 439 |
+
<a href=""https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51"" target=""_blank"" style=""margin: 2px;"">
|
| 440 |
+
<img alt=""Blog"" src=""https://img.shields.io/badge/Notion-%23000000.svg?style=for-the-badge&logo=notion&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 441 |
+
</a>
|
| 442 |
+
<a href=""https://x.com/Agentica_"" style=""margin: 2px;"">
|
| 443 |
+
<img alt=""X.ai"" src=""https://img.shields.io/badge/Agentica-white?style=for-the-badge&logo=X&logoColor=000&color=000&labelColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 444 |
+
</a>
|
| 445 |
+
<a href=""https://huggingface.co/agentica-org"" style=""margin: 2px;"">
|
| 446 |
+
<img alt=""Hugging Face"" src=""https://img.shields.io/badge/Agentica-fcd022?style=for-the-badge&logo=huggingface&logoColor=000&labelColor"" style=""display: inline-block; vertical-align: middle;""/>
|
| 447 |
+
</a>
|
| 448 |
+
<a href=""https://www.together.ai"" style=""margin: 2px;"">
|
| 449 |
+
<img alt=""Together AI"" src=""https://img.shields.io/badge/-Together_AI%20-white?style=for-the-badge&logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAAUAAAAFACAMAAAD6TlWYAAAC7lBMVEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAPb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8Pb%2F8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADIBDt6AAAA%2BnRSTlMAAiQEKgcdKQwiHBMUzrtSUEmjhmZGH96yv8n1ey7nL3y1U%2FZfCaIo1WFg1NrcsHYrA2%2Fv80J%2BMeilnpefqKw%2B64%2BQlSbYZGVnBGkCV%2BxW8XJube6WJ9kZF9bSzBALRynPQfLhIjvwyBEAXOTLp3o%2FJA9Y9%2F7%2F9FEKDhIVFo4GHkVzjGz8icrHzY39iHR1i0M8Jj14LLZUvb7DxMXGoQEFeQcgSBOHaPvm4uOdRLMMqcDTLbcII0sNuVn4TKaRd6RKIeDd37Svra6xuLpaW17lXUAlHh8WGxUPIS4JGQoFECMsBg4gFwsRJRIrCC0oAycaFC8NMDIzMRgBsVt9rwAAD25JREFUeNrs3QVzG0kWB%2FA3ikHhZeYwk3LMbF7GcBasOGw9hb3MzLyKw8zMzMx2rsokhySNY2mmR1N4xXV3a7sHuzWu%2BX2Ef3XPG%2Br3wOVyuVwul8vlcrlcLpfL5XK5dOlXOHTIvLnb27Xd%2FasBvrt9A%2B7r1bbdTTffcmuXwhzgTYwk6q%2BHr2RWlcclRYqXV2VeCV%2Bvr4mIkCJKZ83uc9NLC0fMD%2BD%2FCswfMfLtzh%2FeelsJcKJW19SG66KSTP6fLEXrwrU11Srw5Z8zbuzePcUBbFyg%2BPY7Pv%2Bs0A%2Bsid7ayiqFNEWp8iS9Ir%2F0Cl957bkRAaQLFLz15sBBfpbpJc7FJKKFFGuV4JJh6N573g6idr7vP%2F8iC9iI1NZJRDupLnlRBbaW3XjTfQHUJ3D8d68MBtsJiTNRold5uEYAdibkHgqiESMefGi9zfFVeCRihOS5LLJafV99XYxGddgwabKt8SmEyEQ%2FmRDlSoUA9gsNvKMDmhE8MC4L7OFtSYmPFmFlAmzm%2F9tfH0Oz8v6yFmxQ3SpOiY8eYTwjHew0%2BB9%2FD6B5ga4dLd%2FHQus0SnzaIrzWWgDb9P19MVqjw01dwFLpYYVYQymLgD1Kjj6J1umaHwLLqJfpy0%2FHIryqgg2mvetDKxXMnQMWEa9LxEpSqxZguS%2B%2BfA%2Bt9cZBi7ZxeqVMX376FqEnAtbyv7ISrTfspB%2FM82bq3r70BNMSYKV%2Bo4rQDiPzc8Csy1Fih%2BhVsE7o0cfQHnn%2FygJz6uNEJtaTSfy8ChYpnelDuxQ8HAIT1LOS8fwoCSq1FiVYcs%2FdaJ%2FgNhMJqrWKqfwoCSYtSTA08260U%2FBh47v4LDU%2F%2FgnmPOJDexX86ycwpp6yf80neB7M8o96DO2Wl2%2Bw%2FlLrh%2FlKYroW31qE9ht5EgzwRs3nR00wmgBTVq1EFtp2Ad0imdbkR0kwLQImTP8S2eg9B3QSKwkbHhPPxSUzAsjGe3P1luLrMmGklQpGjfIhKwU6C8llibBJUCaS4UKy6klkp0cX0CE9zcr8KAlei4Ahy36PLHXuBJqpYcJSmQBG3LIJWerQETS7qhCWlHowoMvfka2Va0Gjaus3MGUTp4NuWY8ja3%2FuB9q0IqydBt1eeQxZ%2B9MfQRNvnLAWT%2BiuIEuRvT9MBg3UlkQmbMmkUgB9cjsge8EbQIMLCmFPuQy6DPoGeVi9HqgED5EJazL5VAQ9Nm5CHjq0B6oKhZCUX4LrNyAfSycDhVBJZMKeTK4IoN26IPJRsAQoEhLhQ7kAmoV%2Bjbwspt0LniF8yKRMBa1%2B%2BSvkZVFfaFIkSngpvwha%2FQL56QNNqiX8%2FBs0mnMX8vPtBGiCWEf4iYmgzey7kZ8Rw6EJXonwo9SANn9GnuZCE84RnlqBJm3aIk8vFUKjxBjhKbMFaDHQhzy9%2BAI06pJEeJIS%2FGuwBn1M1WD%2BdXjNauSrdwk0Qq0kfHlUoFs7Evnq9TI0orqK8BVN1%2FIcvAn56vAKNCKhEDruz8NjkbdXOV4CKZJA1W8M8vbjT9CwMOGtDKjmjEbefpgCDRLqCB33p7kvipC3kc83UkOihLdohF5DfMjbiBf43UZTSPQq8vobyNsbudCgyzLhTT4PNK8hpmoZPkv4awU0y5G%2F1%2Fj90WG%2BDK9ATNX7mDDh71OgWYn83RHi9yRMkQY0I5G%2FOydDA4RPCX9RoMlD%2Fu6a0mCAMcJfHGh8yN%2BwqdAAMZPwJwFNB%2BRv5TRoQIs0wp%2FiiAB7TG%2B2Abor0L0GmiO5VdicuHsfaE7UfRIxJ80Rz8Kdnfss7L6NoShz8vvAWsLfOUe8kZ7o5DfSm1Pgm8gnTv4msqoIzXC%2FyrUZjWa434XdPxOoRZjiHjTD%2FTcGNm9Cg9y%2Fs9z%2FAymi1e4fqqZ4VPcfaQZnlQYGkacXP3H6X%2FrT2qIZ7jkR%2BAvy9L5jTyq5Z%2BUolBpHnNYc5PDTmubrsHtemOeJ9aJmcWI9tAV5%2BQ29Z4Kc%2Bj0TYHOQVwl5pVl07YD1h9EMt28MHOHUueihZtK5CArvRB4OTWkuvbNgYjGyF5wEGlQ4oXsbrF%2BK7O2fDBoIPPoHegQndLAc14w6WELot8jaX5pVD1Xo8iSy1WM8nzbcFMZbcf%2BLcR%2Fp7qBZayf0kYZly5GlzpOd3Mmcfy%2F9rl1AhwjTXvoXwaATDKc55Dp6mgP%2FeSLvZ4E%2B55wwTwSmr0Y2Djp6og3%2FmUrDhqbuTKWLYMqQ42i%2FkcNTdqpXeQ2Y4z82AO2Wl8txrpz5AkLRr38Q7TUiOydlJxueBfNCYzugnYKvOn62JkXpA3YmGPy8xPnTXanzhYP27d8PSvjPFzafH0Wov12VJC87ZSdcS2dVsEy%2FE8fRDgtznTFj3Tz%2FrT3QesOGO2bKv3mrVr%2BH1nrjjqFgiUilTGRr8%2FNEwHLTZ%2FisLR9vzgGLiOckYiWpVQuwQcmonmidZ3JDYBn1chohslXL79pVFWzh%2F2L5JrRG8fahYKlIWCHWUMoiYJtl%2F3wygOYFunabDBYTWmtdhJTlVy%2BAjfxPPP4YmpW3dTzYID0jTo%2BQEl88Ix1sFlqytAOacfe%2Bk1lgD29LxXiEMiFKZUIF%2By3L%2F6YYjSpu134w2EaouEKPsNH4rlwWgI0JEzcE0Qjfl19NAVsJFR6JGCF5LovAzrId2%2B8LoD6BBT8OGQy2E2rCUaJXebhGALZC9z%2FwUhC18%2F0wc1UWsBFJ1klEOymWvKgCe%2F7CW999xxdAusCI0R99PMgP7IiJczFJY3qtEiLw8tOckw88uKs40FR4xXuWzvzjVD%2BwJnqTlVUKaYpS5Ul6ReCsdOeOmVveKgq%2Bh%2F%2FvveCiu7Zvmz2rFDhRq2tqw7GoJJP%2FJ0vRWFmyplqF1NBv0KmTJz7fumX1d889%2B8yTzzz73Ldfbtm6bdS48RNygDcx3Xu1NqPMUxdLS7uWlhar85RlJK9600VIOf6c0mWDpj391NNtBg0uyfFDSlEF8T%2Ft3eFyqjwTwPGNiKq9eq%2BtqiCeoxZVEcRW4mK%2Bvc%2F5%2Bk7bBSDZOJPfFfwHWkEMG%2B%2BfXChwHMdxHMdxHMdxHMdxHMdxHIeV4yiR%2FyOUS6tHfBxP88Vse74N%2F7mdt7PF%2FHT8EFakbYg0XupvMZ%2Fddt%2F%2Ber27zebFX%2BXSfpQfD%2BMLsX7iMp4fc460%2BfgiqbSD1jSCGH1WXAV1v32OhOm0O1Yh9aUR0sNUYnVyekjBEH9eL%2B2mIY2gilmGdWXvhTKQNnpvkDYrBJgjNluJTchtIDSnBY3TNgLMUEGvbL4Qvhco3WkPbOS%2FNAEGjMay1bsEMjyCJsewXVo5HoFuH5P2b7OsJh9a0har1mn3tmkElXTzPlU%2FUd2nDfnTKH53b%2FTN%2FI7TZp2l7X3QZNPlO6X9jb1pJwUa5J8SuyQ%2Fc2vTFjl0zu%2F8vfrH2O8obdx52jaFjmmZ7HAdQQeOVw1pwxF0StNskd0GWtvsUIfsBB3SNt3m%2FgUtva1402jEfCXm%2BUBLjWkHBZ2gJ3zxHcG51JhWdnQENc%2BYk3O2vz%2F6CEJrBqYcyi9o6E172hJaMjJn876BRjYG0k7QiqFJr7tRo7SdgbSsgBaMzRoe%2BlCbfzWTlkILxqZdj%2FPaaWM0Y%2BtBUwbnrT8%2BoaZPY2kLBc2Ynfi%2FgVo2BtNO0JDRPSf6PtTgm0y7pNCI2KNJewWVqZnZNAH1md93J4HKEsNpb1Abw85P%2FQ%2Bo6GNoOs2H%2BgZo2gQqWqBpA6iNY%2Fe7EVRyXNm%2FMR%2FP%2FotjBRWokCFtK6AOrh1AA6ggkBxpG6hFnImzzLUFKNv2uOec5Q9Qw3kO7N%2BgmT7LjB81asuU1hNQXSyRhyyAULClxVDdHh%2FI4YEzIMzY0vZQWZQhlyyFX6V8aasIqnoinwP86oB8nlBRfkM%2Btxx%2BIaZWpNGf03zkCH4xYk0r7PiuTljALz6R0wQqya%2FI6ZrTHy78acS%2FCSd5hB8dmdNGdlyDCQfiGmz7dVhtkddWWZvWU0D72CGv3Qf84O%2BFP40Wl8irLOAHBXtaDLQDoq0fgnPk9gTaHrnt4Qcz5Bba8T2OcBPwLUGnWXAnmGbILfP5Lm%2BELLX3WSp9v3q0IC0GytcDuT1O8K2TBWlLq58kEJfhOfJbACVEfhN7z20IlDPy2xM3WIymQBkiv57i%2ByZM6ANlh%2FymAr6hpshvB5QVoqW3q%2BKK%2FO5AkchvmMM38iHyk0ApkV%2Ffg294feRXugPoDiCr0n0GtiPdVbid%2BwvfB4op8svcN5F2%2Bu67cDvTV34aM0F%2B4Ss%2FDzzYcW4JSwse%2Byav%2FETa4t9ERhakBS%2F9q5wFaRH%2F6kDaNbf3d2EPXuAyvLd30UQItCdyO9i7bOf5EquzYnvTgpdeH8iflvlAUz3kZf8KVcs%2FBJ%2F2rl1cQxWFvUvhR8xpBVThDfnvAu28SR16UMkEOS3sfdQxgGri0tp%2Fk0Lac39l6T%2FKLbd2AfLVg4rW9t7rPy24BtOiFXJZRda%2BTL%2F6A1Wp0N7BBHu2tFBBZUGJPGRs7QPfMrB9cBExnIV7pM1ZQA0nrvFA9qYlUEc%2B5R9QZddYrymdxn%2Bey5O9g%2BUSqEf0rB3SJ7YMaT0BNRUMEywLa9NkDHWpdzRtYO9413cFtaUXw6NyL76VA4abj%2BL%2BMjys%2BcvaEdePJTQhxmhSKGqkhWjSWEAj0cXagfWpybRdBA0lpbktExJrN5oo36ApNUFTJqpm2gJNGShozOuhGT3P2rSzBy1EfSMbF%2FVTqC01lBZBK%2FHK2q2zisxA2iqGlhKpf%2FO2pGHaXXuafOPfGZKMLJeMO0MSaXNoTz1LvRtYPhXftqlE2lpBB9SayOQ6fgDqqTXtk07jzKSPH00dpL60tbJ9h%2Bb2%2BzODWt7tSKM34tZhlUBrSaYn7Q06Ffc1bKXfj6EDhQ1ptOhcP5OI7EXQibTXedo5gs55gxK7VE68ztImstu0gQcaqGSH%2BOjqHF8S1WXapcO03ZsCPaLxA7tRhhF0Kg1L7MZjHIE24os%2B05X%2B%2FL6ErWm7pQCd0ndJdxKN93cfNPDf763T5CwFzVTcK%2BnOXxrLXqE0pRXbtmmxAv3EaUp3%2Ftg4PQlL0x7TRIAZeXIusYnyfMo1p50apyU5mCOCcIV1rcJA2J9mivqzvpZYXXldR8pQWlQ77Y8CBnk8GFYLlcNBnJtNmwwlVlH%2Bl%2BYBG69Yn7Py98Ksty48lrQemXY2kEZRfvAMr5l84P97yOwaPgNfWZq2NpZG86JgPhlP%2B9ldlo9S3rP%2BdDyZB5FnRdqygzTHcRzHcRzHcRzHcRzHcZz%2FAbyvLkVmYcs9AAAAAElFTkSuQmCC&link=https%3A%2F%2Fwww.together.ai"" style=""display: inline-block; vertical-align: middle;""/>
|
| 450 |
+
</a>
|
| 451 |
+
</div>
|
| 452 |
+
</div>
|
| 453 |
+
</div>
|
| 454 |
+
|
| 455 |
+
## DeepCoder Overview
|
| 456 |
+
DeepCoder-14B-Preview is a code reasoning LLM fine-tuned from DeepSeek-R1-Distilled-Qwen-14B using distributed reinforcement learning (RL) to scale up to long context lengths. The model achieves 60.6% Pass@1 accuracy on LiveCodeBench v5 (8/1/24-2/1/25), representing a 8% improvement over the base model (53%) and achieving similar performance to OpenAI's o3-mini with just 14B parameters.
|
| 457 |
+
|
| 458 |
+
<div style=""margin: 0 auto;"">
|
| 459 |
+
<img src=""https://cdn-uploads.huggingface.co/production/uploads/654037be97949fd2304aab7f/r3-vzkItOCrMf1qldW0Mj.png"" style=""width: 100%;"" />
|
| 460 |
+
</div>
|
| 461 |
+
|
| 462 |
+
## Data
|
| 463 |
+
Our training dataset consists of approximately 24K unique problem-tests pairs compiled from:
|
| 464 |
+
- Taco-Verified
|
| 465 |
+
- PrimeIntellect SYNTHETIC-1
|
| 466 |
+
- LiveCodeBench v5 (5/1/23-7/31/24)
|
| 467 |
+
|
| 468 |
+
## Training Recipe
|
| 469 |
+
|
| 470 |
+
Our training recipe relies on an improved version of GRPO (GRPO+) and iterative context lengthening, introduced in DeepScaleR.
|
| 471 |
+
|
| 472 |
+
### GRPO+
|
| 473 |
+
|
| 474 |
+
We enhance the original GRPO algorithm with insights from DAPO to enable more stable training:
|
| 475 |
+
|
| 476 |
+
- **Offline Difficulty Filtering:** DAPO employs online dynamic sampling, discarding both entirely correct and entirely incorrect samples on the fly. While this helps maintain a more stable effective batch size, it introduces significant runtime overhead due to rejection sampling. Instead, we perform offline difficulty filtering on a subset of coding problems to ensure the training dataset remains within a suitable difficulty range.
|
| 477 |
+
- **No Entropy Loss:** We observed that including an entropy loss term often led to instability, with entropy growing exponentially and ultimately collapsing training. To mitigate this, we eliminate the entropy loss entirely.
|
| 478 |
+
- **No KL Loss:** Eliminating KL loss prevents the LLM from staying within trust region of the original SFT model. This removal also obviates the need to compute log probabilities for the reference policy, thereby accelerating training.
|
| 479 |
+
- **Overlong Filtering** **(from DAPO):** To preserve long-context reasoning, we mask the loss for truncated sequences. This technique enables DeepCoder to generalize to 64K-context inference despite being trained with a 32K context.
|
| 480 |
+
- **Clip High (from DAPO):** By increasing the upper bound in GRPO/PPO’s surrogate loss, we encourage more exploration and more stable entropy.
|
| 481 |
+
|
| 482 |
+
### Iterative Context Lengthening
|
| 483 |
+
|
| 484 |
+
Our original `Deepscaler-1.5B-Preview` scaled long context training from 8K→16K→24K, achieving 33→38→43% on AIME respectively. Similarly, `Deepcoder-14B-Preview` is trained on 16K→32K, achieving 54→58% on LiveCodeBench (v5). `DeepCoder-14B-Preview` successfully generalizes to longer contexts when evaluated at 64K context, reaching 60.6%.
|
| 485 |
+
|
| 486 |
+
DeepCoder generalizes better to long contexts than the base distilled model, due to DAPO's overlong filtering. However, it's longer responses are often truncated when the max length is capped at 16K, which can lower its scores.
|
| 487 |
+
|
| 488 |
+
| **Model** | **16K** | **32K** | **64K** |
|
| 489 |
+
| --- | --- | --- | --- |
|
| 490 |
+
| **DeepCoder-14B-Preview** | 45.6 | 57.9 | 60.6 |
|
| 491 |
+
| **DeepSeek-R1-Distill-Qwen-14B** | 50.2 | 53.0 | 53.0 |
|
| 492 |
+
|
| 493 |
+
A more detailed description of the training recipe can be found in our [blog post](https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51).
|
| 494 |
+
|
| 495 |
+
## Evaluation
|
| 496 |
+
|
| 497 |
+
We evaluate `Deepcoder-14B-Preview` on various coding benchmarks, including LiveCodeBench (LCBv5), Codeforces, and HumanEval+.
|
| 498 |
+
|
| 499 |
+
| **Model** | LCB (v5)(8/1/24-2/1/25) | Codeforces Rating | Codeforces Percentile | HumanEval+ |
|
| 500 |
+
| --- | --- | --- | --- | --- |
|
| 501 |
+
| **DeepCoder-14B-Preview (ours)** | ***60.6*** | ***1936*** | ***95.3*** | ***92.6*** |
|
| 502 |
+
| **DeepSeek-R1-Distill-Qwen-14B** | 53.0 | 1791 | 92.7 | 92.0 |
|
| 503 |
+
| **O1-2024-12-17 (Low)** | 59.5 | **1991** | **96.1** | 90.8 |
|
| 504 |
+
| **O3-Mini-2025-1-31 (Low)** | **60.9** | 1918 | 94.9 | 92.6 |
|
| 505 |
+
| **O1-Preview** | 42.7 | 1658 | 88.5 | 89 |
|
| 506 |
+
| **Deepseek-R1** | 62.8 | 1948 | 95.4 | 92.6 |
|
| 507 |
+
| **Llama-4-Behemoth** | 49.4 | - | - | - |
|
| 508 |
+
|
| 509 |
+
## Serving DeepCoder
|
| 510 |
+
Our model can be served using popular high-performance inference systems:
|
| 511 |
+
- vLLM
|
| 512 |
+
- Hugging Face Text Generation Inference (TGI)
|
| 513 |
+
- SGLang
|
| 514 |
+
- TensorRT-LLM
|
| 515 |
+
|
| 516 |
+
All these systems support the OpenAI Chat Completions API format.
|
| 517 |
+
|
| 518 |
+
## License
|
| 519 |
+
This project is released under the MIT License, reflecting our commitment to open and accessible AI development.
|
| 520 |
+
We believe in democratizing AI technology by making our work freely available for anyone to use, modify, and build upon.
|
| 521 |
+
This permissive license ensures that researchers, developers, and enthusiasts worldwide can leverage and extend our work without restrictions, fostering innovation and collaboration in the AI community.
|
| 522 |
+
|
| 523 |
+
## Acknowledgement
|
| 524 |
+
- Our training experiments are powered by our heavily modified fork of [Verl](https://github.com/agentica-project/verl), an open-source post-training library.
|
| 525 |
+
- Our model is trained on top of [`DeepSeek-R1-Distill-Qwen-14B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B).
|
| 526 |
+
- Our work is done as part of [Berkeley Sky Computing Lab](https://skycomputing.berkeley.edu/) and [Berkeley AI Research](https://bair.berkeley.edu/).
|
| 527 |
+
|
| 528 |
+
## Citation
|
| 529 |
+
```bibtex
|
| 530 |
+
@misc{deepcoder2025,
|
| 531 |
+
title={DeepCoder: A Fully Open-Source 14B Coder at O3-mini Level},
|
| 532 |
+
author={Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, Ion Stoica, Tianjun Zhang},
|
| 533 |
+
howpublished={\url{https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51}},
|
| 534 |
+
note={Notion Blog},
|
| 535 |
+
year={2025}
|
| 536 |
+
}
|
| 537 |
+
```","{""id"": ""Apel-sin/deepcoder-14B-preview-exl2"", ""author"": ""Apel-sin"", ""sha"": ""95b6fb26de354f6f87bc6d121ce6649bbe6bf405"", ""last_modified"": ""2025-04-16 12:52:30+00:00"", ""created_at"": ""2025-04-16 12:51:45+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""text-generation"", ""en"", ""dataset:PrimeIntellect/verifiable-coding-problems"", ""dataset:likaixin/TACO-verified"", ""dataset:livecodebench/code_generation_lite"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='measurement.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-16 12:52:30+00:00"", ""cardData"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\ndatasets:\n- PrimeIntellect/verifiable-coding-problems\n- likaixin/TACO-verified\n- livecodebench/code_generation_lite\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67ffa7e1f71379c75f70b5da"", ""modelId"": ""Apel-sin/deepcoder-14B-preview-exl2"", ""usedStorage"": 15550291930}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Apel-sin/deepcoder-14B-preview-exl2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BApel-sin%2Fdeepcoder-14B-preview-exl2%5D(%2FApel-sin%2Fdeepcoder-14B-preview-exl2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 538 |
+
wasim845/dfgh,"---
|
| 539 |
+
language:
|
| 540 |
+
- af
|
| 541 |
+
metrics:
|
| 542 |
+
- cer
|
| 543 |
+
base_model:
|
| 544 |
+
- agentica-org/DeepCoder-14B-Preview
|
| 545 |
+
---","{""id"": ""wasim845/dfgh"", ""author"": ""wasim845"", ""sha"": ""71e0c07e2c4e331839754d53d274984eb53945a8"", ""last_modified"": ""2025-04-21 18:38:51+00:00"", ""created_at"": ""2025-04-21 18:38:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""af"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlanguage:\n- af\nmetrics:\n- cer"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-21 18:38:51+00:00"", ""cardData"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlanguage:\n- af\nmetrics:\n- cer"", ""transformersInfo"": null, ""_id"": ""680690a771c70b05daf501cf"", ""modelId"": ""wasim845/dfgh"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=wasim845/dfgh&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bwasim845%2Fdfgh%5D(%2Fwasim845%2Fdfgh)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 546 |
+
rieon/DeepCoder-14B-Preview-Suger,"---
|
| 547 |
+
license: apache-2.0
|
| 548 |
+
pipeline_tag: text-generation
|
| 549 |
+
base_model:
|
| 550 |
+
- agentica-org/DeepCoder-14B-Preview
|
| 551 |
+
---","{""id"": ""rieon/DeepCoder-14B-Preview-Suger"", ""author"": ""rieon"", ""sha"": ""dec3e9f412ae94cd14d1cbfc88a289d7b2a88593"", ""last_modified"": ""2025-04-24 23:08:31+00:00"", ""created_at"": ""2025-04-24 21:43:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""base_model:agentica-org/DeepCoder-14B-Preview"", ""base_model:finetune:agentica-org/DeepCoder-14B-Preview"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlicense: apache-2.0\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-24 23:08:31+00:00"", ""cardData"": ""base_model:\n- agentica-org/DeepCoder-14B-Preview\nlicense: apache-2.0\npipeline_tag: text-generation"", ""transformersInfo"": null, ""_id"": ""680ab07645884469f943b21d"", ""modelId"": ""rieon/DeepCoder-14B-Preview-Suger"", ""usedStorage"": 29551556738}",1,,0,,0,https://huggingface.co/mradermacher/DeepCoder-14B-Preview-Suger-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=rieon/DeepCoder-14B-Preview-Suger&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brieon%2FDeepCoder-14B-Preview-Suger%5D(%2Frieon%2FDeepCoder-14B-Preview-Suger)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
DeepSeek-R1-Distill-Qwen-7B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
DeepSeek-V3-0324_finetunes_20250424_145241.csv_finetunes_20250424_145241.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Fimbulvetr-11B-v2_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Sao10K/Fimbulvetr-11B-v2,"---
|
| 3 |
+
license: cc-by-nc-4.0
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+

|
| 9 |
+
|
| 10 |
+
*Cute girl to catch your attention.*
|
| 11 |
+
|
| 12 |
+
**https://huggingface.co/Sao10K/Fimbulvetr-11B-v2-GGUF <------ GGUF**
|
| 13 |
+
|
| 14 |
+
Fimbulvetr-v2 - A Solar-Based Model
|
| 15 |
+
|
| 16 |
+
***
|
| 17 |
+
|
| 18 |
+
4/4 Status Update:
|
| 19 |
+
|
| 20 |
+
got a few reqs on wanting to support me: https://ko-fi.com/sao10k
|
| 21 |
+
|
| 22 |
+
anyway, status on v3 - Halted for time being, working on dataset work mainly. it's a pain, to be honest.
|
| 23 |
+
the data I have isn't up to my standard for now. it's good, just not good enough
|
| 24 |
+
|
| 25 |
+
***
|
| 26 |
+
|
| 27 |
+
Prompt Formats - Alpaca or Vicuna. Either one works fine.
|
| 28 |
+
Recommended SillyTavern Presets - Universal Light
|
| 29 |
+
|
| 30 |
+
Alpaca:
|
| 31 |
+
```
|
| 32 |
+
### Instruction:
|
| 33 |
+
<Prompt>
|
| 34 |
+
### Input:
|
| 35 |
+
<Insert Context Here>
|
| 36 |
+
### Response:
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
Vicuna:
|
| 40 |
+
```
|
| 41 |
+
System: <Prompt>
|
| 42 |
+
|
| 43 |
+
User: <Input>
|
| 44 |
+
|
| 45 |
+
Assistant:
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
****
|
| 50 |
+
|
| 51 |
+
Changelogs:
|
| 52 |
+
|
| 53 |
+
25/2 - repo renamed to remove test, model card redone. Model's officially out.
|
| 54 |
+
<br>15/2 - Heavy testing complete. Good feedback.
|
| 55 |
+
|
| 56 |
+
***
|
| 57 |
+
|
| 58 |
+
<details><summary>Rant - Kept For Historical Reasons</summary>
|
| 59 |
+
|
| 60 |
+
Ramble to meet minimum length requirements:
|
| 61 |
+
|
| 62 |
+
Tbh i wonder if this shit is even worth doing. Like im just some broke guy lmao I've spent so much. And for what? I guess creds. Feels good when a model gets good feedback, but it seems like im invisible sometimes. I should be probably advertising myself and my models on other places but I rarely have the time to. Probably just internal jealousy sparking up here and now. Wahtever I guess.
|
| 63 |
+
|
| 64 |
+
Anyway cool EMT vocation I'm doing is cool except it pays peanuts, damn bruh 1.1k per month lmao. Government to broke to pay for shit. Pays the bills I suppose.
|
| 65 |
+
|
| 66 |
+
Anyway cool beans, I'm either going to continue the Solar Train or go to Mixtral / Yi when I get paid.
|
| 67 |
+
|
| 68 |
+
You still here?
|
| 69 |
+
</details><br>
|
| 70 |
+
","{""id"": ""Sao10K/Fimbulvetr-11B-v2"", ""author"": ""Sao10K"", ""sha"": ""b2dcd534dc3a53ff84e60a53b87816185169be19"", ""last_modified"": ""2024-04-04 00:16:55+00:00"", ""created_at"": ""2024-02-06 14:29:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 154, ""downloads_all_time"": null, ""likes"": 179, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""en"", ""license:cc-by-nc-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: cc-by-nc-4.0"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cute1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""featherless-ai/try-this-model"", ""KBaba7/Quant"", ""Darok/Featherless-Feud"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""emekaboris/try-this-model"", ""ruslanmv/convert_to_gguf"", ""SC999/NV_Nemotron"", ""JackHoltone/try-this-model"", ""k11112/try-this-model"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""F16"": 10731524096}, ""total"": 10731524096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-04 00:16:55+00:00"", ""cardData"": ""language:\n- en\nlicense: cc-by-nc-4.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65c24249c9061603c6400713"", ""modelId"": ""Sao10K/Fimbulvetr-11B-v2"", ""usedStorage"": 21463591819}",0,"https://huggingface.co/KaraKaraWarehouse/UnFimbulvetr-20B, https://huggingface.co/KaraKaraWarehouse/UnFimbulvetr-20B-V2, https://huggingface.co/matchaaaaa/Chaifighter-20B, https://huggingface.co/matchaaaaa/Chaifighter-20B-v2, https://huggingface.co/patruff/chucklesDPOadapterB, https://huggingface.co/patruff/probablyBadFineTune11B, https://huggingface.co/matchaaaaa/Chaifighter-20B-v2.1, https://huggingface.co/ClaudioItaly/Fimbulvetr-Cognitive-11B-V3, https://huggingface.co/ClaudioItaly/Fimbulvetr-40, https://huggingface.co/mlx-community/Fimbulvetr-11B-v2, https://huggingface.co/TheHierophant/Fimbulvetr-11B-Attention-V0.1-test",11,,0,"https://huggingface.co/mradermacher/Fimbulvetr-11B-v2-i1-GGUF, https://huggingface.co/mradermacher/Fimbulvetr-11B-v2-GGUF, https://huggingface.co/backyardai/Fimbulvetr-11B-v2-GGUF, https://huggingface.co/Lewdiculous/Fimbulvetr-11B-v2-GGUF-IQ-Imatrix, https://huggingface.co/darkc0de/UnderbossUncensored-GGUF, https://huggingface.co/Clevyby/Fimbulvetr-11B-v2-Q5_K_S-GGUF, https://huggingface.co/faerdhinen/Fimbulvetr-11B-v2-Q4_K_M-GGUF, https://huggingface.co/ClaudioItaly/Fimbulvetr-11B-Imatrix, https://huggingface.co/jtjones/Fimbulvetr-11B-v2-Q6_K-GGUF, https://huggingface.co/mlx-community/Fimbulvetr-11B-v2-8bit, https://huggingface.co/featherless-ai-quants/Sao10K-Fimbulvetr-11B-v2-GGUF, https://huggingface.co/tensorblock/Fimbulvetr-11B-v2-GGUF, https://huggingface.co/noesis-ai/Fimbulvetr-11B-v2-GGUF",13,"https://huggingface.co/backyardai/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B, https://huggingface.co/FallenMerick/Chewy-Lemon-Cookie-11B, https://huggingface.co/SteelStorage/Umbra-v3-MoE-4x11b, https://huggingface.co/AzureBlack/Umbra-v3-MoE-4x11b-exl2-8bpw-8h-rpcal, https://huggingface.co/grenforcer/experiment, https://huggingface.co/weezywitasneezy/OxytocinEngineering_v0-10B-passthrough, https://huggingface.co/weezywitasneezy/OxytocinEngineering_v0.1-14B-passthrough, https://huggingface.co/PJMixers-Archive/Fimbulvetr-Holodeck-Erebus-10.7B-GGUF, https://huggingface.co/Rama-adi/test-merge, https://huggingface.co/PJMixers-Archive/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B-GGUF, https://huggingface.co/PJMixers-Archive/FimbulWestHolobuSiliKunoichiBeagle-10.7B-GGUF, https://huggingface.co/steinzer-narayan/fimbulhermes-15B-v0.1, https://huggingface.co/steinzer-narayan/fimbulhermes-15B-v0.1_exl2_6.5bpw, https://huggingface.co/PJMixers-Archive/EmptiSolar-4-10.7B-GGUF, https://huggingface.co/weezywitasneezy/OxytocinEngineering-45B-passthrough, https://huggingface.co/weezywitasneezy/OxytocinEngineering-v2-45B-passthrough, https://huggingface.co/SteelStorage/Umbra-v3-MoE-4x11b-2ex, https://huggingface.co/mergekit-community/mergekit-slerp-oskyrzi, https://huggingface.co/Nanner-kins/Umbra-v3-MoE-4x11b-3.5bpw-h6-exl2, https://huggingface.co/matchaaaaa/Fimbul-Airo-18B, https://huggingface.co/mergekit-community/Synaptica, https://huggingface.co/mergekit-community/YorkShire11, https://huggingface.co/FallenMerick/Chunky-Lemon-Cookie-11B, https://huggingface.co/mergekit-community/Fimburs11V3, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-2_2bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-2_5bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-3_0bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-3_5bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-3_75bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-4_0bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-4_25bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-6_0bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-5_0bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-6_5bpw_exl2, https://huggingface.co/Zoyd/FallenMerick_Chunky-Lemon-Cookie-11B-8_0bpw_exl2, https://huggingface.co/ClaudioItaly/Underground, https://huggingface.co/v000000/SyntheticMoist-11B-v2, https://huggingface.co/matchaaaaa/Chaifighter-20B-v3, https://huggingface.co/ClaudioItaly/Top11Evolution, https://huggingface.co/ClaudioItaly/EvolutionFimbMoister-11, https://huggingface.co/ClaudioItaly/Eutopia, https://huggingface.co/ClaudioItaly/FimbMoister-11Range, https://huggingface.co/ClaudioItaly/Sharp11, https://huggingface.co/QuantFactory/Chewy-Lemon-Cookie-11B-GGUF, https://huggingface.co/darkc0de/CrystalDeathUncensored, https://huggingface.co/mergekit-community/mergekit-model_stock-czgudbn",46,"Darok/Featherless-Feud, FallnAI/Quantize-HF-Models, JackHoltone/try-this-model, K00B404/LLM_Quantization, KBaba7/Quant, SC999/NV_Nemotron, bhaskartripathi/LLM_Quantization, emekaboris/try-this-model, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/new?title=Sao10K/Fimbulvetr-11B-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSao10K%2FFimbulvetr-11B-v2%5D(%2FSao10K%2FFimbulvetr-11B-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, k11112/try-this-model, ruslanmv/convert_to_gguf, totolook/Quant",13
|
| 71 |
+
KaraKaraWarehouse/UnFimbulvetr-20B,"---
|
| 72 |
+
base_model: [""Sao10K/Fimbulvetr-11B-v2""]
|
| 73 |
+
library_name: transformers
|
| 74 |
+
tags:
|
| 75 |
+
- mergekit
|
| 76 |
+
- merge
|
| 77 |
+
|
| 78 |
+
---
|
| 79 |
+
# UnFimbulvetr-20B
|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
|
| 83 |
+
*Waifu to catch your attention*
|
| 84 |
+
|
| 85 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 86 |
+
|
| 87 |
+
NOTE: *Only tested this just for a bit. YMMV.*
|
| 88 |
+
|
| 89 |
+
## Next Day Tests...
|
| 90 |
+
|
| 91 |
+
Downloaded the GGUF model that someone quantized... And... nope. No.
|
| 92 |
+
|
| 93 |
+
**Do not use model.**
|
| 94 |
+
|
| 95 |
+
## Merge Details
|
| 96 |
+
### Merge Method
|
| 97 |
+
|
| 98 |
+
This model was merged using the passthrough merge method.
|
| 99 |
+
|
| 100 |
+
### Models Merged
|
| 101 |
+
|
| 102 |
+
The following models were included in the merge:
|
| 103 |
+
* Sao10K/Fimbulvetr-11B-v2
|
| 104 |
+
|
| 105 |
+
### Configuration
|
| 106 |
+
|
| 107 |
+
The following YAML configuration was used to produce this model:
|
| 108 |
+
|
| 109 |
+
```yaml
|
| 110 |
+
slices:
|
| 111 |
+
- sources:
|
| 112 |
+
- model: FimbMagic
|
| 113 |
+
layer_range: [0, 13]
|
| 114 |
+
- sources:
|
| 115 |
+
- model: FimbMagic
|
| 116 |
+
layer_range: [8, 13]
|
| 117 |
+
- sources:
|
| 118 |
+
- model: FimbMagic
|
| 119 |
+
layer_range: [12, 36]
|
| 120 |
+
- sources:
|
| 121 |
+
- model: FimbMagic
|
| 122 |
+
layer_range: [12, 36]
|
| 123 |
+
- sources:
|
| 124 |
+
- model: FimbMagic
|
| 125 |
+
layer_range: [36, 48]
|
| 126 |
+
- sources:
|
| 127 |
+
- model: FimbMagic
|
| 128 |
+
layer_range: [36, 48]
|
| 129 |
+
merge_method: passthrough
|
| 130 |
+
dtype: bfloat16
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
### Additional Notes
|
| 134 |
+
|
| 135 |
+
Fimbulvetr 11B is still a very good model. This model is for extreme trailblazers who wants to test stuff!
|
| 136 |
+
|
| 137 |
+
Eval results? Don't bother.
|
| 138 |
+
|
| 139 |
+
Last one before I sleep: *I'm so sorry Sao10K...*","{""id"": ""KaraKaraWarehouse/UnFimbulvetr-20B"", ""author"": ""KaraKaraWarehouse"", ""sha"": ""cbea6268b5a2b123075409a712f0574d5f29c1ab"", ""last_modified"": ""2024-05-08 15:16:37+00:00"", ""created_at"": ""2024-05-07 16:06:50+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='UnFimbulator.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 19892228096}, ""total"": 19892228096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-08 15:16:37+00:00"", ""cardData"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""663a519a13f627b816e8d973"", ""modelId"": ""KaraKaraWarehouse/UnFimbulvetr-20B"", ""usedStorage"": 91435743075}",1,,0,,0,https://huggingface.co/mradermacher/UnFimbulvetr-20B-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=KaraKaraWarehouse/UnFimbulvetr-20B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BKaraKaraWarehouse%2FUnFimbulvetr-20B%5D(%2FKaraKaraWarehouse%2FUnFimbulvetr-20B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 140 |
+
KaraKaraWarehouse/UnFimbulvetr-20B-V2,"---
|
| 141 |
+
base_model: [""Sao10K/Fimbulvetr-11B-v2""]
|
| 142 |
+
library_name: transformers
|
| 143 |
+
tags:
|
| 144 |
+
- mergekit
|
| 145 |
+
- merge
|
| 146 |
+
|
| 147 |
+
---
|
| 148 |
+
# UnFimbulvetr-20B-V2
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
|
| 152 |
+
*Waifu to catch your attention*
|
| 153 |
+
|
| 154 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 155 |
+
|
| 156 |
+
## GGUF Tests
|
| 157 |
+
|
| 158 |
+
Seems usable. But I'm having difficulty telling if the stacked layers were beneficial. While I think there's something added, I really can't tell.
|
| 159 |
+
|
| 160 |
+
## Merge Details
|
| 161 |
+
### Merge Method
|
| 162 |
+
|
| 163 |
+
This model was merged using the passthrough merge method.
|
| 164 |
+
|
| 165 |
+
### Models Merged
|
| 166 |
+
|
| 167 |
+
The following models were included in the merge:
|
| 168 |
+
* [Sao10K/Fimbulvetr-11B-v2](https://huggingface.co/Sao10K/Fimbulvetr-11B-v2)
|
| 169 |
+
|
| 170 |
+
### Configuration
|
| 171 |
+
|
| 172 |
+
The following YAML configuration was used to produce this model:
|
| 173 |
+
|
| 174 |
+
```yaml
|
| 175 |
+
slices:
|
| 176 |
+
- sources:
|
| 177 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 178 |
+
layer_range: [0, 13]
|
| 179 |
+
- sources:
|
| 180 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 181 |
+
layer_range: [8, 13]
|
| 182 |
+
- sources:
|
| 183 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 184 |
+
layer_range: [12, 36]
|
| 185 |
+
- sources:
|
| 186 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 187 |
+
layer_range: [12, 36]
|
| 188 |
+
- sources:
|
| 189 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 190 |
+
layer_range: [20, 36]
|
| 191 |
+
- sources:
|
| 192 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 193 |
+
layer_range: [36, 48]
|
| 194 |
+
merge_method: passthrough
|
| 195 |
+
dtype: bfloat16
|
| 196 |
+
|
| 197 |
+
```
|
| 198 |
+
","{""id"": ""KaraKaraWarehouse/UnFimbulvetr-20B-V2"", ""author"": ""KaraKaraWarehouse"", ""sha"": ""d6e6549a7a9d8da10ee166ab4607838a1e3555d2"", ""last_modified"": ""2024-05-16 03:53:00+00:00"", ""created_at"": ""2024-05-15 09:27:40+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 13, ""downloads_all_time"": null, ""likes"": 3, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='UnFimbulator.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 20764676096}, ""total"": 20764676096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-16 03:53:00+00:00"", ""cardData"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6644800c7a482e37da9ebd30"", ""modelId"": ""KaraKaraWarehouse/UnFimbulvetr-20B-V2"", ""usedStorage"": 41529944571}",1,,0,,0,"https://huggingface.co/mradermacher/UnFimbulvetr-20B-V2-GGUF, https://huggingface.co/mradermacher/UnFimbulvetr-20B-V2-i1-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=KaraKaraWarehouse/UnFimbulvetr-20B-V2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BKaraKaraWarehouse%2FUnFimbulvetr-20B-V2%5D(%2FKaraKaraWarehouse%2FUnFimbulvetr-20B-V2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 199 |
+
matchaaaaa/Chaifighter-20B,"---
|
| 200 |
+
base_model:
|
| 201 |
+
- Sao10K/Fimbulvetr-11B-v2
|
| 202 |
+
library_name: transformers
|
| 203 |
+
tags:
|
| 204 |
+
- mergekit
|
| 205 |
+
- merge
|
| 206 |
+
license: cc-by-4.0
|
| 207 |
+
language:
|
| 208 |
+
- en
|
| 209 |
+
---
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
|
| 213 |
+
[**V2 IS OUT!!!**](https://huggingface.co/matchaaaaa/Chaifighter-20b-v2)
|
| 214 |
+
|
| 215 |
+
**GGUFs available [here](https://huggingface.co/FaradayDotDev/Chaifighter-20B-GGUF)!** Thanks to [@brooketh](https://huggingface.co/brooketh) for providing the quantized models!
|
| 216 |
+
|
| 217 |
+
# Chaifighter 20B
|
| 218 |
+
|
| 219 |
+
Meet Chaifighter 20B. This is my shot at making [Fimbulvetr 11B v2](https://huggingface.co/Sao10K/Fimbulvetr-11B-v2) a bit more creative and verbose while retaining its incredible coherence and intelligence. It also shows that SOLAR-based models and Mistral-based models can be merged, as SOLAR 10.7B was based on a Mistral 7B frankenmerge and finetuned a bit.
|
| 220 |
+
|
| 221 |
+
I also wanted to provide an alternative to [Psyonic Cetacean 20B](https://huggingface.co/jebcarter/psyonic-cetacean-20B), which is a fantastic model that you should check out if you haven't already! The issue with that model is that it's based on Llama 2, which is outdated now. The older architecture lacked many performance enhancements that were introduced by the Mistral architecture, and on my 16 GB RTX 4060 Ti, those performance enhancements were the difference between decently speedy and intolerably sluggish. I wanted to help cater towards those who can run a more than a 13B but not a 34B, so this is a good middle ground.
|
| 222 |
+
|
| 223 |
+
Chaifighter 20B is geared towards long-form roleplay chats rather than short-form IRC/Discord RP chats. It loves verbosity and detail, and its quality will depend on how much ""ammunition"" you can give it. While it sorta-kinda can do short-form with some swiping, it isn't really ideal. But for those essay-writing powerhouses that love typing up a storm in the character card, this one's for you.
|
| 224 |
+
|
| 225 |
+
Chaifighter 20B natively supports a context window of only 4096 tokens maximum. I tried RoPE scaling but it was not happy from the limited testing I did. Your mileage may vary, and if anyone can manage to get it working higher, I'd love to hear about it!
|
| 226 |
+
|
| 227 |
+
Stay tuned for V2! Feedback is welcomed and appreciated!!
|
| 228 |
+
|
| 229 |
+
## Recommended Parameters:
|
| 230 |
+
|
| 231 |
+
* Temperature: **1.0 - 1.25**
|
| 232 |
+
* Min-P: **0.1**
|
| 233 |
+
* Repetition Penalty: **1.05-1.1**
|
| 234 |
+
* *All other samplers disabled*
|
| 235 |
+
|
| 236 |
+
Or, alternatively, use **Universal Light** in SillyTavern!
|
| 237 |
+
|
| 238 |
+
## Prompt Template: Alpaca
|
| 239 |
+
|
| 240 |
+
```
|
| 241 |
+
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
| 242 |
+
|
| 243 |
+
### Instruction:
|
| 244 |
+
{prompt}
|
| 245 |
+
|
| 246 |
+
### Response:
|
| 247 |
+
```
|
| 248 |
+
|
| 249 |
+
## Mergekit
|
| 250 |
+
|
| 251 |
+
Chaifighter 20B is a frankenmerge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 252 |
+
|
| 253 |
+
## Merge Details
|
| 254 |
+
### Merge Method
|
| 255 |
+
|
| 256 |
+
This model was merged using the passthrough merge method.
|
| 257 |
+
|
| 258 |
+
### Models Merged
|
| 259 |
+
|
| 260 |
+
The following models were included in the merge:
|
| 261 |
+
* [Sao10K/Fimbulvetr-11B-v2](https://huggingface.co/Sao10K/Fimbulvetr-11B-v2)
|
| 262 |
+
* [SanjiWatsuki/Kunoichi-7B](https://huggingface.co/SanjiWatsuki/Kunoichi-7B)
|
| 263 |
+
* [Gryphe/MythoMist-7b](https://huggingface.co/Gryphe/MythoMist-7b)
|
| 264 |
+
* [Undi95/Toppy-M-7B](https://huggingface.co/Undi95/Toppy-M-7B)
|
| 265 |
+
|
| 266 |
+
### The Sauceeeeee
|
| 267 |
+
|
| 268 |
+
```yaml
|
| 269 |
+
slices:
|
| 270 |
+
- sources:
|
| 271 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 272 |
+
layer_range: [0, 40] # all but last 8 layers
|
| 273 |
+
- sources:
|
| 274 |
+
- model: SanjiWatsuki/Kunoichi-7B
|
| 275 |
+
layer_range: [0, 24] # all but last 8 layers
|
| 276 |
+
- sources:
|
| 277 |
+
- model: Undi95/Toppy-M-7B
|
| 278 |
+
layer_range: [16, 24] # 16 layers of Toppy and MythoMist split and interleaved to (in theory) boost the model's coherence
|
| 279 |
+
- sources:
|
| 280 |
+
- model: Gryphe/MythoMist-7b
|
| 281 |
+
layer_range: [16, 24]
|
| 282 |
+
- sources:
|
| 283 |
+
- model: Undi95/Toppy-M-7B
|
| 284 |
+
layer_range: [25, 32]
|
| 285 |
+
- sources:
|
| 286 |
+
- model: Gryphe/MythoMist-7b
|
| 287 |
+
layer_range: [25, 32]
|
| 288 |
+
merge_method: passthrough
|
| 289 |
+
dtype: bfloat16
|
| 290 |
+
```
|
| 291 |
+
Yeah, it's mad sussy. I know what I did, but I'm not sorry.
|
| 292 |
+
|
| 293 |
+
## Other stuff
|
| 294 |
+
|
| 295 |
+
Okay! Fine! It's not really a 20B, it's a 21B, but I did everything planning for a 20B before deciding to add 4 more layers to the model to make it more stable. It made a big difference.
|
| 296 |
+
|
| 297 |
+
Yapping time. As far as the name is concerned, I'm going for a tea/coffee/hot drink motif for my models, and one of the names I was debating on using for this model was Chai-Latte. As I worked on this merge, I got the idea of naming it ""Chaifighter"" as a play on ""Psyfighter2"", one of the models making up Psyonic Cetacean and also a play on a model called ""Tiefighter"" from which it was derived. Both are fantastic models, especially given their age. They're both worth checking out too if you haven't done so. ""Chai"" itself is a play on a certain AI chatting website (CAI) that got me into this lovely mess in the first place. So I guess it's fitting to name the first model of the series after it.
|
| 298 |
+
|
| 299 |
+
And lastly, of course, thank you for checking out my model! Have a great day and please take care of yourself, alright? :)","{""id"": ""matchaaaaa/Chaifighter-20B"", ""author"": ""matchaaaaa"", ""sha"": ""7746b6dc78fd135559b8b5a1330cb18f0abb2cab"", ""last_modified"": ""2024-05-19 06:47:52+00:00"", ""created_at"": ""2024-05-15 22:08:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 67, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""en"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""license:cc-by-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlanguage:\n- en\nlibrary_name: transformers\nlicense: cc-by-4.0\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chaifighter-cute.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00044.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 20764676096}, ""total"": 20764676096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-19 06:47:52+00:00"", ""cardData"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlanguage:\n- en\nlibrary_name: transformers\nlicense: cc-by-4.0\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""664532543a75d60bfa6f8393"", ""modelId"": ""matchaaaaa/Chaifighter-20B"", ""usedStorage"": 41529944387}",1,,0,,0,"https://huggingface.co/mradermacher/Chaifighter-20B-GGUF, https://huggingface.co/mradermacher/Chaifighter-20B-i1-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=matchaaaaa/Chaifighter-20B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmatchaaaaa%2FChaifighter-20B%5D(%2Fmatchaaaaa%2FChaifighter-20B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 300 |
+
matchaaaaa/Chaifighter-20B-v2,"---
|
| 301 |
+
base_model:
|
| 302 |
+
- Sao10K/Fimbulvetr-11B-v2
|
| 303 |
+
library_name: transformers
|
| 304 |
+
tags:
|
| 305 |
+
- mergekit
|
| 306 |
+
- merge
|
| 307 |
+
license: apache-2.0
|
| 308 |
+
pipeline_tag: text-generation
|
| 309 |
+
---
|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
|
| 313 |
+
**Thank you @brooketh for the [iMat + static GGUFs](https://huggingface.co/FaradayDotDev/Chaifighter-20B-v2-GGUF) on the Faraday model hub!**
|
| 314 |
+
|
| 315 |
+
**Thank you @mradermacher for also making [GGUFs](https://huggingface.co/mradermacher/Chaifighter-20B-v2-GGUF) and [iMat GGUFs](https://huggingface.co/mradermacher/Chaifighter-20B-v2-i1-GGUF)**
|
| 316 |
+
|
| 317 |
+
# Chaifighter 20B v2 (aaaaand it's BASICALLY a 20B this time!)
|
| 318 |
+
|
| 319 |
+
Meet Chaifighter 20B v2, my flagship Mistral 20B frankenmerge model! Boasting creativity, coherence, and cognitive thinking, this model is a great pick for those awkwardly stuck between 13B's and 34B's.
|
| 320 |
+
|
| 321 |
+
I also wanted to provide an alternative to Jeb Carter's [Psyonic Cetacean 20B](https://huggingface.co/jebcarter/psyonic-cetacean-20B), which is a fantastic model that you should check out if you haven't already! The issue with that model is that it's based on Llama 2, which is outdated now. The older architecture lacked many performance enhancements that were introduced by the Mistral architecture, and on my 16 GB RTX 4060 Ti, those performance enhancements were the difference between decently speedy and intolerably sluggish.
|
| 322 |
+
|
| 323 |
+
Chaifighter 20B is geared towards long-form roleplay chats rather than short-form IRC/Discord RP chats. It loves verbosity and detail, and its quality will depend on how much ""ammunition"" you can give it. While it sorta-kinda can do short-form with some swiping, it isn't really ideal. But for those essay-writing powerhouses that love typing up a storm in the character card, this one's for you.
|
| 324 |
+
|
| 325 |
+
Chaifighter 20B natively supports a context window of only 4096 tokens maximum.
|
| 326 |
+
|
| 327 |
+
## Prompt Template: Alpaca
|
| 328 |
+
|
| 329 |
+
```
|
| 330 |
+
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
| 331 |
+
|
| 332 |
+
### Instruction:
|
| 333 |
+
{prompt}
|
| 334 |
+
|
| 335 |
+
### Response:
|
| 336 |
+
```
|
| 337 |
+
|
| 338 |
+
## Recommended Settings: Universal-Light
|
| 339 |
+
|
| 340 |
+
Here are some settings ranges that tend to work for me. They aren't strict values, and there's a bit of leeway in them. Feel free to experiment a bit!
|
| 341 |
+
|
| 342 |
+
* Temperature: **1.0** *to* **1.25** (adjust to taste, but keep it low. Chaifighter is creative enough on its own)
|
| 343 |
+
* Min-P: **0.1** (increasing might help if it goes cuckoo, but I suggest keeping it there)
|
| 344 |
+
* Repetition Penalty: **1.05** *to* **1.1** (high values aren't needed and usually degrade output)
|
| 345 |
+
* Rep. Penalty Range: **256** *or* **512**
|
| 346 |
+
* *(all other samplers disabled)*
|
| 347 |
+
|
| 348 |
+
## Merge Details
|
| 349 |
+
|
| 350 |
+
### Mergekit
|
| 351 |
+
|
| 352 |
+
Chaifighter 20B is a frankenmerge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 353 |
+
|
| 354 |
+
### Merge Method
|
| 355 |
+
|
| 356 |
+
This model was merged using the passthrough merge method.
|
| 357 |
+
|
| 358 |
+
### Models Merged
|
| 359 |
+
|
| 360 |
+
The following models were included in the merge:
|
| 361 |
+
* [Gryphe/MythoMist-7b](https://huggingface.co/Gryphe/MythoMist-7b)
|
| 362 |
+
* [KatyTheCutie/LemonadeRP-4.5.3](https://huggingface.co/KatyTheCutie/LemonadeRP-4.5.3)
|
| 363 |
+
* [SanjiWatsuki/Kunoichi-7B](https://huggingface.co/SanjiWatsuki/Kunoichi-7B)
|
| 364 |
+
* [Sao10K/Fimbulvetr-11B-v2](https://huggingface.co/Sao10K/Fimbulvetr-11B-v2)
|
| 365 |
+
|
| 366 |
+
### The Sauceeeeeee e ee
|
| 367 |
+
|
| 368 |
+
The following YAML configuration was used to produce this model:
|
| 369 |
+
|
| 370 |
+
```yaml
|
| 371 |
+
slices:
|
| 372 |
+
- sources:
|
| 373 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 374 |
+
layer_range: [0, 40]
|
| 375 |
+
- sources:
|
| 376 |
+
- model: SanjiWatsuki/Kunoichi-7B
|
| 377 |
+
layer_range: [8, 16]
|
| 378 |
+
- sources:
|
| 379 |
+
- model: Mytho-Lemon-11B # my own merge (see below).
|
| 380 |
+
layer_range: [8, 48]
|
| 381 |
+
merge_method: passthrough
|
| 382 |
+
dtype: bfloat16
|
| 383 |
+
```
|
| 384 |
+
|
| 385 |
+
And here's Mytho-Lemon-11B. Yep, named it backwards.
|
| 386 |
+
|
| 387 |
+
```yaml
|
| 388 |
+
slices:
|
| 389 |
+
- sources:
|
| 390 |
+
- model: KatyTheCutie/LemonadeRP-4.5.3
|
| 391 |
+
layer_range: [0, 24]
|
| 392 |
+
- sources:
|
| 393 |
+
- model: Gryphe/MythoMist-7B # manually added tokenizer files
|
| 394 |
+
layer_range: [8, 32]
|
| 395 |
+
merge_method: passthrough
|
| 396 |
+
dtype: bfloat16
|
| 397 |
+
```
|
| 398 |
+
|
| 399 |
+
It's a lot better than v1 :skull:
|
| 400 |
+
|
| 401 |
+
So, the idea was to start with Fimbulvetr-11B-v2, a super solid RP model that punches wayyy above its weight especially for its coherence, reasoning, and even spatial awareness. Keeping the layers intact apparently is somewhat unusual, but I wanted to keep it closest to the input layers. I thought it would improve logic and open the door for more creativity later in the stack. I added Kunoichi next for its context and instruction following skills. This worked very well in v1. Lastly, I used a frankenmerge of MythoMist and LemonadeRP for the last layers. These are pretty creative models with solid writing. MythoMist in theory would give the model flavor and verbosity. LemonadeRP was recommended by a friend, and I thought it really complimented the rest of the mix quite nicely!
|
| 402 |
+
|
| 403 |
+
## Thanks and Other Stuff
|
| 404 |
+
|
| 405 |
+
I want to thank everyone who helped me make this model. [@brooketh](https://huggingface.co/brooketh), [@FallenMerick](https://huggingface.co/FallenMerick), [@jebcarter](https://huggingface.co/jebcarter), [@Qonsol](https://huggingface.co/Qonsol), [@PacmanIncarnate](https://huggingface.co/PacmanIncarnate), and many others: thank you so much. Without the help, feedback, and encouragement these people gave, Chaifighter v2 would not have happened. The flaws in v1 were numerous and tricky to solve, especially for someone still super new to this (me). I don't know what I'd do without these kindhearted and generous people!
|
| 406 |
+
|
| 407 |
+
Yapping time. As far as the name is concerned, I'm going for a tea/coffee/hot drink motif for my models, and one of the names I was debating on using for this model was Chai-Latte. As I worked on this merge, I got the idea of naming it ""Chaifighter"" as a play on ""Psyfighter2"", one of the models making up Psyonic Cetacean and also a play on a model called ""Tiefighter"" from which it was derived. Both are fantastic models, especially given their age. They're both worth checking out too if you haven't done so. ""Chai"" itself is a play on a certain AI chatting website (CAI) that got me into this lovely mess in the first place. So I guess it's fitting to name the first model of the series after it.
|
| 408 |
+
|
| 409 |
+
And lastly, of course, thank you for checking out my model! Remember that you're super amazing, and have a fantastic day! :)","{""id"": ""matchaaaaa/Chaifighter-20B-v2"", ""author"": ""matchaaaaa"", ""sha"": ""dcdc4aa7bcce0baf5cc6e20bddc75edc3100f312"", ""last_modified"": ""2024-05-20 06:00:09+00:00"", ""created_at"": ""2024-05-19 04:24:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 20, ""downloads_all_time"": null, ""likes"": 7, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chaifighter-cute.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 19456004096}, ""total"": 19456004096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-20 06:00:09+00:00"", ""cardData"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66497f135fdb710820dd481f"", ""modelId"": ""matchaaaaa/Chaifighter-20B-v2"", ""usedStorage"": 38912594259}",1,,0,,0,"https://huggingface.co/backyardai/Chaifighter-20B-v2-GGUF, https://huggingface.co/mradermacher/Chaifighter-20B-v2-GGUF, https://huggingface.co/mradermacher/Chaifighter-20B-v2-i1-GGUF",3,https://huggingface.co/MadameMoonflower/CitrusTea-Test,1,huggingface/InferenceSupport/discussions/new?title=matchaaaaa/Chaifighter-20B-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmatchaaaaa%2FChaifighter-20B-v2%5D(%2Fmatchaaaaa%2FChaifighter-20B-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 410 |
+
patruff/chucklesDPOadapterB,"---
|
| 411 |
+
language:
|
| 412 |
+
- en
|
| 413 |
+
license: apache-2.0
|
| 414 |
+
tags:
|
| 415 |
+
- text-generation-inference
|
| 416 |
+
- transformers
|
| 417 |
+
- unsloth
|
| 418 |
+
- llama
|
| 419 |
+
- trl
|
| 420 |
+
base_model: Sao10K/Fimbulvetr-11B-v2
|
| 421 |
+
---
|
| 422 |
+
|
| 423 |
+
# Uploaded model
|
| 424 |
+
|
| 425 |
+
- **Developed by:** patruff
|
| 426 |
+
- **License:** apache-2.0
|
| 427 |
+
- **Finetuned from model :** Sao10K/Fimbulvetr-11B-v2
|
| 428 |
+
|
| 429 |
+
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
| 430 |
+
|
| 431 |
+
[<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
|
| 432 |
+
","{""id"": ""patruff/chucklesDPOadapterB"", ""author"": ""patruff"", ""sha"": ""137283275f451c56932913c7ed1b6249c65cc5f1"", ""last_modified"": ""2024-05-29 05:14:11+00:00"", ""created_at"": ""2024-05-29 05:13:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation-inference"", ""unsloth"", ""llama"", ""trl"", ""en"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Sao10K/Fimbulvetr-11B-v2\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-29 05:14:11+00:00"", ""cardData"": ""base_model: Sao10K/Fimbulvetr-11B-v2\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6656b995ea685cc9d4a86482"", ""modelId"": ""patruff/chucklesDPOadapterB"", ""usedStorage"": 164097339}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=patruff/chucklesDPOadapterB&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpatruff%2FchucklesDPOadapterB%5D(%2Fpatruff%2FchucklesDPOadapterB)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 433 |
+
patruff/probablyBadFineTune11B,"---
|
| 434 |
+
base_model: Sao10K/Fimbulvetr-11B-v2
|
| 435 |
+
language:
|
| 436 |
+
- en
|
| 437 |
+
license: apache-2.0
|
| 438 |
+
tags:
|
| 439 |
+
- text-generation-inference
|
| 440 |
+
- transformers
|
| 441 |
+
- unsloth
|
| 442 |
+
- llama
|
| 443 |
+
- trl
|
| 444 |
+
---
|
| 445 |
+
|
| 446 |
+
# Uploaded model
|
| 447 |
+
|
| 448 |
+
- **Developed by:** patruff
|
| 449 |
+
- **License:** apache-2.0
|
| 450 |
+
- **Finetuned from model :** Sao10K/Fimbulvetr-11B-v2
|
| 451 |
+
|
| 452 |
+
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
| 453 |
+
|
| 454 |
+
[<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
|
| 455 |
+
","{""id"": ""patruff/probablyBadFineTune11B"", ""author"": ""patruff"", ""sha"": ""fa27795c9cddc19d2929533ecf4be4af0256a470"", ""last_modified"": ""2024-06-26 10:59:59+00:00"", ""created_at"": ""2024-06-26 10:59:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation-inference"", ""unsloth"", ""llama"", ""trl"", ""en"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Sao10K/Fimbulvetr-11B-v2\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-26 10:59:59+00:00"", ""cardData"": ""base_model: Sao10K/Fimbulvetr-11B-v2\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""667bf48ed0f52ca58a4c5fa3"", ""modelId"": ""patruff/probablyBadFineTune11B"", ""usedStorage"": 251748704}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=patruff/probablyBadFineTune11B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpatruff%2FprobablyBadFineTune11B%5D(%2Fpatruff%2FprobablyBadFineTune11B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 456 |
+
https://huggingface.co/matchaaaaa/Chaifighter-20B-v2.1,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 457 |
+
ClaudioItaly/Fimbulvetr-Cognitive-11B-V3,"---
|
| 458 |
+
base_model:
|
| 459 |
+
- Sao10K/Fimbulvetr-11B-v2
|
| 460 |
+
library_name: transformers
|
| 461 |
+
tags:
|
| 462 |
+
- mergekit
|
| 463 |
+
- merge
|
| 464 |
+
|
| 465 |
+
---
|
| 466 |
+
# merge
|
| 467 |
+
|
| 468 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 469 |
+
|
| 470 |
+
## Merge Details
|
| 471 |
+
### Merge Method
|
| 472 |
+
|
| 473 |
+
This model was merged using the passthrough merge method.
|
| 474 |
+
|
| 475 |
+
### Models Merged
|
| 476 |
+
1. Enhance Upper Layers (Deep Layers):
|
| 477 |
+
- Focus on deep layers: Higher layers (e.g., from 30 upwards in a model with many layers) tend to capture abstract concepts and complex semantic connections between words or phrases. Increasing their weight or importance (scaling up projections like o_proj or down_proj) can improve the model's reasoning capabilities.
|
| 478 |
+
- Increase model depth: If possible, you can also add new layers or extend the range of existing ones, instead of cutting them. Greater depth allows the model to capture high-level patterns and improve its ability to solve complex cognitive tasks.
|
| 479 |
+
|
| 480 |
+
2. Optimize Intermediate Layers to Improve Semantic Coherence:
|
| 481 |
+
- Enhance intermediate layers: These layers are responsible for linking low-level understanding (syntax and structure) to high-level understanding (abstraction). By enhancing these layers, the model will be able to maintain stronger coherence in cognitive processes. You can do this by increasing projection parameters and reducing penalties on the weights of these layers.
|
| 482 |
+
- Increase long-term memory capacity: If you configure the intermediate layers with a greater capacity to ""memorize"" longer contexts, the model will be better at maintaining the logical thread of more complex texts.
|
| 483 |
+
|
| 484 |
+
3. Increase Capacity of Critical Projections:
|
| 485 |
+
- Positive scaling of projections: Projections like o_proj and down_proj handle the transformation and transmission of information between layers. Increasing their weight, rather than reducing or zeroing it, amplifies the model's ability to process and propagate relevant information. This can improve the model's ability to make deep inferences, resolve linguistic ambiguities, and maintain logical coherence in longer texts.
|
| 486 |
+
|
| 487 |
+
4. Custom Configuration for Complex Tasks:
|
| 488 |
+
- Focus on specific tasks: If you want to enhance the model for advanced cognitive tasks, you can customize the configuration depending on the type of task. For example, for long-term coherent and fluent text generation, you can emphasize the final layers and projection parameters, while for classification tasks, you can give more weight to the intermediate layers.
|
| 489 |
+
- Improve abstraction and contextual understanding: Tasks such as logical reasoning or long text comprehension require greater abstraction capacity. By enhancing the model's final layers, where more abstract concepts develop, you can make the model more performant in these areas.
|
| 490 |
+
|
| 491 |
+
5. Add or Improve Attention Functions:
|
| 492 |
+
- Multi-Head Attention: Attention is a key mechanism for abstraction and inference. By increasing the number of attention heads or enhancing their contribution in the advanced stages of the model, you can improve the model's ability to focus on complex semantic relationships in the text.
|
| 493 |
+
- Strengthen attention capacity in upper layers: By giving greater importance to attention in deeper layers, the model will be able to handle long contexts and maintain long-term coherence, a critical factor in tasks requiring deep cognitive understanding.
|
| 494 |
+
|
| 495 |
+
6. Maintain Computational Capacity:
|
| 496 |
+
- Don't reduce intermediate and upper layers: Unlike a reduction process where you eliminate or reduce the importance of some layers, here it's crucial to maintain (or enhance) all layers from a computational perspective. Each layer has a role in creating more refined representations, and by maintaining the integrity of the entire network, the model will be able to handle complex reasoning tasks.
|
| 497 |
+
|
| 498 |
+
7. Effect of the Process:
|
| 499 |
+
- Increased accuracy and understanding: A model optimized to increase cognitive abilities will be more precise in complex tasks, such as generating fluent text, resolving semantic ambiguities, reasoning on long texts, and understanding context.
|
| 500 |
+
- Greater computational load: However, with this configuration, the computational load and inference times will also increase, as you are increasing the complexity of the model's operations. This is the trade-off: more cognitive capabilities mean more resource usage.
|
| 501 |
+
The following models were included in the merge:
|
| 502 |
+
* [Sao10K/Fimbulvetr-11B-v2](https://huggingface.co/Sao10K/Fimbulvetr-11B-v2)
|
| 503 |
+
|
| 504 |
+
### Configuration
|
| 505 |
+
|
| 506 |
+
The following YAML configuration was used to produce this model:
|
| 507 |
+
|
| 508 |
+
```yaml
|
| 509 |
+
slices:
|
| 510 |
+
- sources:
|
| 511 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 512 |
+
layer_range: [0, 4]
|
| 513 |
+
- sources:
|
| 514 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 515 |
+
layer_range: [4, 8]
|
| 516 |
+
parameters:
|
| 517 |
+
scale:
|
| 518 |
+
- filter: o_proj
|
| 519 |
+
value: 1.5
|
| 520 |
+
- filter: down_proj
|
| 521 |
+
value: 1.5
|
| 522 |
+
- sources:
|
| 523 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 524 |
+
layer_range: [8, 12]
|
| 525 |
+
parameters:
|
| 526 |
+
scale:
|
| 527 |
+
- filter: o_proj
|
| 528 |
+
value: 1.5
|
| 529 |
+
- filter: down_proj
|
| 530 |
+
value: 1.5
|
| 531 |
+
- sources:
|
| 532 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 533 |
+
layer_range: [12, 16]
|
| 534 |
+
parameters:
|
| 535 |
+
scale:
|
| 536 |
+
- filter: o_proj
|
| 537 |
+
value: 2.0
|
| 538 |
+
- filter: down_proj
|
| 539 |
+
value: 2.0
|
| 540 |
+
- sources:
|
| 541 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 542 |
+
layer_range: [16, 20]
|
| 543 |
+
parameters:
|
| 544 |
+
scale:
|
| 545 |
+
- filter: o_proj
|
| 546 |
+
value: 2.0
|
| 547 |
+
- filter: down_proj
|
| 548 |
+
value: 2.0
|
| 549 |
+
- sources:
|
| 550 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 551 |
+
layer_range: [20, 24]
|
| 552 |
+
parameters:
|
| 553 |
+
scale:
|
| 554 |
+
- filter: o_proj
|
| 555 |
+
value: 2.5
|
| 556 |
+
- filter: down_proj
|
| 557 |
+
value: 2.5
|
| 558 |
+
- sources:
|
| 559 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 560 |
+
layer_range: [24, 28]
|
| 561 |
+
parameters:
|
| 562 |
+
scale:
|
| 563 |
+
- filter: o_proj
|
| 564 |
+
value: 2.5
|
| 565 |
+
- filter: down_proj
|
| 566 |
+
value: 2.5
|
| 567 |
+
- sources:
|
| 568 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 569 |
+
layer_range: [28, 32]
|
| 570 |
+
parameters:
|
| 571 |
+
scale:
|
| 572 |
+
- filter: o_proj
|
| 573 |
+
value: 3.0
|
| 574 |
+
- filter: down_proj
|
| 575 |
+
value: 3.0
|
| 576 |
+
- sources:
|
| 577 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 578 |
+
layer_range: [32, 36]
|
| 579 |
+
parameters:
|
| 580 |
+
scale:
|
| 581 |
+
- filter: o_proj
|
| 582 |
+
value: 3.0
|
| 583 |
+
- filter: down_proj
|
| 584 |
+
value: 3.0
|
| 585 |
+
- sources:
|
| 586 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 587 |
+
layer_range: [36, 40]
|
| 588 |
+
parameters:
|
| 589 |
+
scale:
|
| 590 |
+
- filter: o_proj
|
| 591 |
+
value: 3.5
|
| 592 |
+
- filter: down_proj
|
| 593 |
+
value: 3.5
|
| 594 |
+
- sources:
|
| 595 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 596 |
+
layer_range: [40, 44]
|
| 597 |
+
parameters:
|
| 598 |
+
scale:
|
| 599 |
+
- filter: o_proj
|
| 600 |
+
value: 3.5
|
| 601 |
+
- filter: down_proj
|
| 602 |
+
value: 3.5
|
| 603 |
+
- sources:
|
| 604 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 605 |
+
layer_range: [44, 47]
|
| 606 |
+
parameters:
|
| 607 |
+
scale:
|
| 608 |
+
- filter: o_proj
|
| 609 |
+
value: 4.0
|
| 610 |
+
- filter: down_proj
|
| 611 |
+
value: 4.0
|
| 612 |
+
merge_method: passthrough
|
| 613 |
+
dtype: bfloat16
|
| 614 |
+
```
|
| 615 |
+
","{""id"": ""ClaudioItaly/Fimbulvetr-Cognitive-11B-V3"", ""author"": ""ClaudioItaly"", ""sha"": ""90551637522656a477620378b6b80ec6dd49abd3"", ""last_modified"": ""2024-09-13 19:50:07+00:00"", ""created_at"": ""2024-09-13 19:30:12+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 10513412096}, ""total"": 10513412096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-13 19:50:07+00:00"", ""cardData"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66e492c45c06b7719c1788da"", ""modelId"": ""ClaudioItaly/Fimbulvetr-Cognitive-11B-V3"", ""usedStorage"": 21027367227}",1,,0,,0,"https://huggingface.co/ClaudioItaly/Fimbulvetr-Cognitive-11B-V3-Q5_K_M-GGUF, https://huggingface.co/mradermacher/Fimbulvetr-Cognitive-11B-V3-GGUF, https://huggingface.co/mradermacher/Fimbulvetr-Cognitive-11B-V3-i1-GGUF",3,,0,huggingface/InferenceSupport/discussions/new?title=ClaudioItaly/Fimbulvetr-Cognitive-11B-V3&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BClaudioItaly%2FFimbulvetr-Cognitive-11B-V3%5D(%2FClaudioItaly%2FFimbulvetr-Cognitive-11B-V3)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 616 |
+
ClaudioItaly/Fimbulvetr-40,"---
|
| 617 |
+
base_model:
|
| 618 |
+
- Sao10K/Fimbulvetr-11B-v2
|
| 619 |
+
library_name: transformers
|
| 620 |
+
tags:
|
| 621 |
+
- mergekit
|
| 622 |
+
- merge
|
| 623 |
+
|
| 624 |
+
---
|
| 625 |
+
Changing the value of kv_count from 34 to 40 indicates an increase in the number of key-value pairs in the model. These key-value pairs are mainly used to represent attention information within neural networks, particularly in Transformer-type models such as LLaMA.
|
| 626 |
+
|
| 627 |
+
# merge
|
| 628 |
+
|
| 629 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 630 |
+
|
| 631 |
+
## Merge Details
|
| 632 |
+
### Merge Method
|
| 633 |
+
|
| 634 |
+
This model was merged using the passthrough merge method using [Sao10K/Fimbulvetr-11B-v2](https://huggingface.co/Sao10K/Fimbulvetr-11B-v2) as a base.
|
| 635 |
+
|
| 636 |
+
### Models Merged
|
| 637 |
+
|
| 638 |
+
The following models were included in the merge:
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
### Configuration
|
| 642 |
+
|
| 643 |
+
The following YAML configuration was used to produce this model:
|
| 644 |
+
|
| 645 |
+
```yaml
|
| 646 |
+
base_model: Sao10K/Fimbulvetr-11B-v2
|
| 647 |
+
merge_method: passthrough
|
| 648 |
+
dtype: float16
|
| 649 |
+
parameters:
|
| 650 |
+
normalize: true
|
| 651 |
+
|
| 652 |
+
slices:
|
| 653 |
+
- sources:
|
| 654 |
+
- model: Sao10K/Fimbulvetr-11B-v2
|
| 655 |
+
layer_range: [0, 48] # Assumi che il modello abbia 48 layer
|
| 656 |
+
densify:
|
| 657 |
+
- linear
|
| 658 |
+
- ""rope:alpha=8192/4096"" # Estende il contesto a 8192
|
| 659 |
+
|
| 660 |
+
tokens:
|
| 661 |
+
- source: Sao10K/Fimbulvetr-11B-v2
|
| 662 |
+
mode: stretch
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
```
|
| 666 |
+
","{""id"": ""ClaudioItaly/Fimbulvetr-40"", ""author"": ""ClaudioItaly"", ""sha"": ""2653389c788fd3102401db253a3fa5dc9f5314fd"", ""last_modified"": ""2024-09-17 09:38:07+00:00"", ""created_at"": ""2024-09-17 09:15:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 10731524096}, ""total"": 10731524096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-17 09:38:07+00:00"", ""cardData"": ""base_model:\n- Sao10K/Fimbulvetr-11B-v2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66e948cdb3982349fa32a462"", ""modelId"": ""ClaudioItaly/Fimbulvetr-40"", ""usedStorage"": 21463591851}",1,,0,,0,"https://huggingface.co/mradermacher/Fimbulvetr-40-GGUF, https://huggingface.co/mradermacher/Fimbulvetr-40-i1-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=ClaudioItaly/Fimbulvetr-40&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BClaudioItaly%2FFimbulvetr-40%5D(%2FClaudioItaly%2FFimbulvetr-40)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 667 |
+
mlx-community/Fimbulvetr-11B-v2,"---
|
| 668 |
+
base_model: Sao10K/Fimbulvetr-11B-v2
|
| 669 |
+
language:
|
| 670 |
+
- en
|
| 671 |
+
license: cc-by-nc-4.0
|
| 672 |
+
tags:
|
| 673 |
+
- mlx
|
| 674 |
+
---
|
| 675 |
+
|
| 676 |
+
# mlx-community/Fimbulvetr-11B-v2
|
| 677 |
+
|
| 678 |
+
The Model [mlx-community/Fimbulvetr-11B-v2](https://huggingface.co/mlx-community/Fimbulvetr-11B-v2) was converted to MLX format from [Sao10K/Fimbulvetr-11B-v2](https://huggingface.co/Sao10K/Fimbulvetr-11B-v2) using mlx-lm version **0.19.0**.
|
| 679 |
+
|
| 680 |
+
## Use with mlx
|
| 681 |
+
|
| 682 |
+
```bash
|
| 683 |
+
pip install mlx-lm
|
| 684 |
+
```
|
| 685 |
+
|
| 686 |
+
```python
|
| 687 |
+
from mlx_lm import load, generate
|
| 688 |
+
|
| 689 |
+
model, tokenizer = load(""mlx-community/Fimbulvetr-11B-v2"")
|
| 690 |
+
|
| 691 |
+
prompt=""hello""
|
| 692 |
+
|
| 693 |
+
if hasattr(tokenizer, ""apply_chat_template"") and tokenizer.chat_template is not None:
|
| 694 |
+
messages = [{""role"": ""user"", ""content"": prompt}]
|
| 695 |
+
prompt = tokenizer.apply_chat_template(
|
| 696 |
+
messages, tokenize=False, add_generation_prompt=True
|
| 697 |
+
)
|
| 698 |
+
|
| 699 |
+
response = generate(model, tokenizer, prompt=prompt, verbose=True)
|
| 700 |
+
```
|
| 701 |
+
","{""id"": ""mlx-community/Fimbulvetr-11B-v2"", ""author"": ""mlx-community"", ""sha"": ""e5a90777372f3d9068af2465ecba12aafbc65a40"", ""last_modified"": ""2024-10-20 19:27:23+00:00"", ""created_at"": ""2024-10-20 19:17:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""mlx"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""mlx"", ""safetensors"", ""llama"", ""en"", ""base_model:Sao10K/Fimbulvetr-11B-v2"", ""base_model:finetune:Sao10K/Fimbulvetr-11B-v2"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Sao10K/Fimbulvetr-11B-v2\nlanguage:\n- en\nlicense: cc-by-nc-4.0\ntags:\n- mlx"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 10731524096}, ""total"": 10731524096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-20 19:27:23+00:00"", ""cardData"": ""base_model: Sao10K/Fimbulvetr-11B-v2\nlanguage:\n- en\nlicense: cc-by-nc-4.0\ntags:\n- mlx"", ""transformersInfo"": null, ""_id"": ""67155743acae3e126df217b6"", ""modelId"": ""mlx-community/Fimbulvetr-11B-v2"", ""usedStorage"": 21463591815}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mlx-community/Fimbulvetr-11B-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmlx-community%2FFimbulvetr-11B-v2%5D(%2Fmlx-community%2FFimbulvetr-11B-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 702 |
+
https://huggingface.co/TheHierophant/Fimbulvetr-11B-Attention-V0.1-test,N/A,N/A,1,,0,,0,,0,,0,,0
|
Florence-2-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,739 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
microsoft/Florence-2-base,"---
|
| 3 |
+
license: mit
|
| 4 |
+
license_link: https://huggingface.co/microsoft/Florence-2-base/resolve/main/LICENSE
|
| 5 |
+
pipeline_tag: image-text-to-text
|
| 6 |
+
tags:
|
| 7 |
+
- vision
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# Florence-2: Advancing a Unified Representation for a Variety of Vision Tasks
|
| 11 |
+
|
| 12 |
+
## Model Summary
|
| 13 |
+
|
| 14 |
+
This Hub repository contains a HuggingFace's `transformers` implementation of Florence-2 model from Microsoft.
|
| 15 |
+
|
| 16 |
+
Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks. Florence-2 can interpret simple text prompts to perform tasks like captioning, object detection, and segmentation. It leverages our FLD-5B dataset, containing 5.4 billion annotations across 126 million images, to master multi-task learning. The model's sequence-to-sequence architecture enables it to excel in both zero-shot and fine-tuned settings, proving to be a competitive vision foundation model.
|
| 17 |
+
|
| 18 |
+
Resources and Technical Documentation:
|
| 19 |
+
+ [Florence-2 technical report](https://arxiv.org/abs/2311.06242).
|
| 20 |
+
+ [Jupyter Notebook for inference and visualization of Florence-2-large model](https://huggingface.co/microsoft/Florence-2-large/blob/main/sample_inference.ipynb)
|
| 21 |
+
|
| 22 |
+
| Model | Model size | Model Description |
|
| 23 |
+
| ------- | ------------- | ------------- |
|
| 24 |
+
| Florence-2-base[[HF]](https://huggingface.co/microsoft/Florence-2-base) | 0.23B | Pretrained model with FLD-5B
|
| 25 |
+
| Florence-2-large[[HF]](https://huggingface.co/microsoft/Florence-2-large) | 0.77B | Pretrained model with FLD-5B
|
| 26 |
+
| Florence-2-base-ft[[HF]](https://huggingface.co/microsoft/Florence-2-base-ft) | 0.23B | Finetuned model on a colletion of downstream tasks
|
| 27 |
+
| Florence-2-large-ft[[HF]](https://huggingface.co/microsoft/Florence-2-large-ft) | 0.77B | Finetuned model on a colletion of downstream tasks
|
| 28 |
+
|
| 29 |
+
## How to Get Started with the Model
|
| 30 |
+
|
| 31 |
+
Use the code below to get started with the model. All models are trained with float16.
|
| 32 |
+
|
| 33 |
+
```python
|
| 34 |
+
import requests
|
| 35 |
+
|
| 36 |
+
from PIL import Image
|
| 37 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 38 |
+
|
| 39 |
+
device = ""cuda:0"" if torch.cuda.is_available() else ""cpu""
|
| 40 |
+
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
| 41 |
+
|
| 42 |
+
model = AutoModelForCausalLM.from_pretrained(""microsoft/Florence-2-base"", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
|
| 43 |
+
processor = AutoProcessor.from_pretrained(""microsoft/Florence-2-base"", trust_remote_code=True)
|
| 44 |
+
|
| 45 |
+
prompt = ""<OD>""
|
| 46 |
+
|
| 47 |
+
url = ""https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true""
|
| 48 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
| 49 |
+
|
| 50 |
+
inputs = processor(text=prompt, images=image, return_tensors=""pt"").to(device, torch_dtype)
|
| 51 |
+
|
| 52 |
+
generated_ids = model.generate(
|
| 53 |
+
input_ids=inputs[""input_ids""],
|
| 54 |
+
pixel_values=inputs[""pixel_values""],
|
| 55 |
+
max_new_tokens=1024,
|
| 56 |
+
do_sample=False,
|
| 57 |
+
num_beams=3,
|
| 58 |
+
)
|
| 59 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
| 60 |
+
|
| 61 |
+
parsed_answer = processor.post_process_generation(generated_text, task=""<OD>"", image_size=(image.width, image.height))
|
| 62 |
+
|
| 63 |
+
print(parsed_answer)
|
| 64 |
+
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
## Tasks
|
| 69 |
+
|
| 70 |
+
This model is capable of performing different tasks through changing the prompts.
|
| 71 |
+
|
| 72 |
+
First, let's define a function to run a prompt.
|
| 73 |
+
|
| 74 |
+
<details>
|
| 75 |
+
<summary> Click to expand </summary>
|
| 76 |
+
|
| 77 |
+
```python
|
| 78 |
+
import requests
|
| 79 |
+
|
| 80 |
+
from PIL import Image
|
| 81 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 82 |
+
|
| 83 |
+
device = ""cuda:0"" if torch.cuda.is_available() else ""cpu""
|
| 84 |
+
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
| 85 |
+
|
| 86 |
+
model = AutoModelForCausalLM.from_pretrained(""microsoft/Florence-2-base"", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
|
| 87 |
+
processor = AutoProcessor.from_pretrained(""microsoft/Florence-2-base"", trust_remote_code=True)
|
| 88 |
+
|
| 89 |
+
url = ""https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true""
|
| 90 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
| 91 |
+
|
| 92 |
+
def run_example(task_prompt, text_input=None):
|
| 93 |
+
if text_input is None:
|
| 94 |
+
prompt = task_prompt
|
| 95 |
+
else:
|
| 96 |
+
prompt = task_prompt + text_input
|
| 97 |
+
inputs = processor(text=prompt, images=image, return_tensors=""pt"").to(device, torch_dtype)
|
| 98 |
+
generated_ids = model.generate(
|
| 99 |
+
input_ids=inputs[""input_ids""],
|
| 100 |
+
pixel_values=inputs[""pixel_values""],
|
| 101 |
+
max_new_tokens=1024,
|
| 102 |
+
num_beams=3
|
| 103 |
+
)
|
| 104 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
| 105 |
+
|
| 106 |
+
parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
|
| 107 |
+
|
| 108 |
+
print(parsed_answer)
|
| 109 |
+
```
|
| 110 |
+
</details>
|
| 111 |
+
|
| 112 |
+
Here are the tasks `Florence-2` could perform:
|
| 113 |
+
|
| 114 |
+
<details>
|
| 115 |
+
<summary> Click to expand </summary>
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
### Caption
|
| 120 |
+
```python
|
| 121 |
+
prompt = ""<CAPTION>""
|
| 122 |
+
run_example(prompt)
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
### Detailed Caption
|
| 126 |
+
```python
|
| 127 |
+
prompt = ""<DETAILED_CAPTION>""
|
| 128 |
+
run_example(prompt)
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
### More Detailed Caption
|
| 132 |
+
```python
|
| 133 |
+
prompt = ""<MORE_DETAILED_CAPTION>""
|
| 134 |
+
run_example(prompt)
|
| 135 |
+
```
|
| 136 |
+
|
| 137 |
+
### Caption to Phrase Grounding
|
| 138 |
+
caption to phrase grounding task requires additional text input, i.e. caption.
|
| 139 |
+
|
| 140 |
+
Caption to phrase grounding results format:
|
| 141 |
+
{'\<CAPTION_TO_PHRASE_GROUNDING>': {'bboxes': [[x1, y1, x2, y2], ...], 'labels': ['', '', ...]}}
|
| 142 |
+
```python
|
| 143 |
+
task_prompt = ""<CAPTION_TO_PHRASE_GROUNDING>""
|
| 144 |
+
results = run_example(task_prompt, text_input=""A green car parked in front of a yellow building."")
|
| 145 |
+
```
|
| 146 |
+
|
| 147 |
+
### Object Detection
|
| 148 |
+
|
| 149 |
+
OD results format:
|
| 150 |
+
{'\<OD>': {'bboxes': [[x1, y1, x2, y2], ...],
|
| 151 |
+
'labels': ['label1', 'label2', ...]} }
|
| 152 |
+
|
| 153 |
+
```python
|
| 154 |
+
prompt = ""<OD>""
|
| 155 |
+
run_example(prompt)
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
### Dense Region Caption
|
| 159 |
+
Dense region caption results format:
|
| 160 |
+
{'\<DENSE_REGION_CAPTION>' : {'bboxes': [[x1, y1, x2, y2], ...],
|
| 161 |
+
'labels': ['label1', 'label2', ...]} }
|
| 162 |
+
```python
|
| 163 |
+
prompt = ""<DENSE_REGION_CAPTION>""
|
| 164 |
+
run_example(prompt)
|
| 165 |
+
```
|
| 166 |
+
|
| 167 |
+
### Region proposal
|
| 168 |
+
Dense region caption results format:
|
| 169 |
+
{'\<REGION_PROPOSAL>': {'bboxes': [[x1, y1, x2, y2], ...],
|
| 170 |
+
'labels': ['', '', ...]}}
|
| 171 |
+
```python
|
| 172 |
+
prompt = ""<REGION_PROPOSAL>""
|
| 173 |
+
run_example(prompt)
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
### OCR
|
| 177 |
+
|
| 178 |
+
```python
|
| 179 |
+
prompt = ""<OCR>""
|
| 180 |
+
run_example(prompt)
|
| 181 |
+
```
|
| 182 |
+
|
| 183 |
+
### OCR with Region
|
| 184 |
+
OCR with region output format:
|
| 185 |
+
{'\<OCR_WITH_REGION>': {'quad_boxes': [[x1, y1, x2, y2, x3, y3, x4, y4], ...], 'labels': ['text1', ...]}}
|
| 186 |
+
```python
|
| 187 |
+
prompt = ""<OCR_WITH_REGION>""
|
| 188 |
+
run_example(prompt)
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
for More detailed examples, please refer to [notebook](https://huggingface.co/microsoft/Florence-2-large/blob/main/sample_inference.ipynb)
|
| 192 |
+
</details>
|
| 193 |
+
|
| 194 |
+
# Benchmarks
|
| 195 |
+
|
| 196 |
+
## Florence-2 Zero-shot performance
|
| 197 |
+
|
| 198 |
+
The following table presents the zero-shot performance of generalist vision foundation models on image captioning and object detection evaluation tasks. These models have not been exposed to the training data of the evaluation tasks during their training phase.
|
| 199 |
+
|
| 200 |
+
| Method | #params | COCO Cap. test CIDEr | NoCaps val CIDEr | TextCaps val CIDEr | COCO Det. val2017 mAP |
|
| 201 |
+
|--------|---------|----------------------|------------------|--------------------|-----------------------|
|
| 202 |
+
| Flamingo | 80B | 84.3 | - | - | - |
|
| 203 |
+
| Florence-2-base| 0.23B | 133.0 | 118.7 | 70.1 | 34.7 |
|
| 204 |
+
| Florence-2-large| 0.77B | 135.6 | 120.8 | 72.8 | 37.5 |
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
The following table continues the comparison with performance on other vision-language evaluation tasks.
|
| 208 |
+
|
| 209 |
+
| Method | Flickr30k test R@1 | Refcoco val Accuracy | Refcoco test-A Accuracy | Refcoco test-B Accuracy | Refcoco+ val Accuracy | Refcoco+ test-A Accuracy | Refcoco+ test-B Accuracy | Refcocog val Accuracy | Refcocog test Accuracy | Refcoco RES val mIoU |
|
| 210 |
+
|--------|----------------------|----------------------|-------------------------|-------------------------|-----------------------|--------------------------|--------------------------|-----------------------|------------------------|----------------------|
|
| 211 |
+
| Kosmos-2 | 78.7 | 52.3 | 57.4 | 47.3 | 45.5 | 50.7 | 42.2 | 60.6 | 61.7 | - |
|
| 212 |
+
| Florence-2-base | 83.6 | 53.9 | 58.4 | 49.7 | 51.5 | 56.4 | 47.9 | 66.3 | 65.1 | 34.6 |
|
| 213 |
+
| Florence-2-large | 84.4 | 56.3 | 61.6 | 51.4 | 53.6 | 57.9 | 49.9 | 68.0 | 67.0 | 35.8 |
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
## Florence-2 finetuned performance
|
| 218 |
+
|
| 219 |
+
We finetune Florence-2 models with a collection of downstream tasks, resulting two generalist models *Florence-2-base-ft* and *Florence-2-large-ft* that can conduct a wide range of downstream tasks.
|
| 220 |
+
|
| 221 |
+
The table below compares the performance of specialist and generalist models on various captioning and Visual Question Answering (VQA) tasks. Specialist models are fine-tuned specifically for each task, whereas generalist models are fine-tuned in a task-agnostic manner across all tasks. The symbol ""▲"" indicates the usage of external OCR as input.
|
| 222 |
+
|
| 223 |
+
| Method | # Params | COCO Caption Karpathy test CIDEr | NoCaps val CIDEr | TextCaps val CIDEr | VQAv2 test-dev Acc | TextVQA test-dev Acc | VizWiz VQA test-dev Acc |
|
| 224 |
+
|----------------|----------|-----------------------------------|------------------|--------------------|--------------------|----------------------|-------------------------|
|
| 225 |
+
| **Specialist Models** | | | | | | | |
|
| 226 |
+
| CoCa | 2.1B | 143.6 | 122.4 | - | 82.3 | - | - |
|
| 227 |
+
| BLIP-2 | 7.8B | 144.5 | 121.6 | - | 82.2 | - | - |
|
| 228 |
+
| GIT2 | 5.1B | 145.0 | 126.9 | 148.6 | 81.7 | 67.3 | 71.0 |
|
| 229 |
+
| Flamingo | 80B | 138.1 | - | - | 82.0 | 54.1 | 65.7 |
|
| 230 |
+
| PaLI | 17B | 149.1 | 127.0 | 160.0▲ | 84.3 | 58.8 / 73.1▲ | 71.6 / 74.4▲ |
|
| 231 |
+
| PaLI-X | 55B | 149.2 | 126.3 | 147.0 / 163.7▲ | 86.0 | 71.4 / 80.8▲ | 70.9 / 74.6▲ |
|
| 232 |
+
| **Generalist Models** | | | | | | | |
|
| 233 |
+
| Unified-IO | 2.9B | - | 100.0 | - | 77.9 | - | 57.4 |
|
| 234 |
+
| Florence-2-base-ft | 0.23B | 140.0 | 116.7 | 143.9 | 79.7 | 63.6 | 63.6 |
|
| 235 |
+
| Florence-2-large-ft | 0.77B | 143.3 | 124.9 | 151.1 | 81.7 | 73.5 | 72.6 |
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
| Method | # Params | COCO Det. val2017 mAP | Flickr30k test R@1 | RefCOCO val Accuracy | RefCOCO test-A Accuracy | RefCOCO test-B Accuracy | RefCOCO+ val Accuracy | RefCOCO+ test-A Accuracy | RefCOCO+ test-B Accuracy | RefCOCOg val Accuracy | RefCOCOg test Accuracy | RefCOCO RES val mIoU |
|
| 239 |
+
|----------------------|----------|-----------------------|--------------------|----------------------|-------------------------|-------------------------|------------------------|---------------------------|---------------------------|------------------------|-----------------------|------------------------|
|
| 240 |
+
| **Specialist Models** | | | | | | | | | | | | |
|
| 241 |
+
| SeqTR | - | - | - | 83.7 | 86.5 | 81.2 | 71.5 | 76.3 | 64.9 | 74.9 | 74.2 | - |
|
| 242 |
+
| PolyFormer | - | - | - | 90.4 | 92.9 | 87.2 | 85.0 | 89.8 | 78.0 | 85.8 | 85.9 | 76.9 |
|
| 243 |
+
| UNINEXT | 0.74B | 60.6 | - | 92.6 | 94.3 | 91.5 | 85.2 | 89.6 | 79.8 | 88.7 | 89.4 | - |
|
| 244 |
+
| Ferret | 13B | - | - | 89.5 | 92.4 | 84.4 | 82.8 | 88.1 | 75.2 | 85.8 | 86.3 | - |
|
| 245 |
+
| **Generalist Models** | | | | | | | | | | | | |
|
| 246 |
+
| UniTAB | - | - | - | 88.6 | 91.1 | 83.8 | 81.0 | 85.4 | 71.6 | 84.6 | 84.7 | - |
|
| 247 |
+
| Florence-2-base-ft | 0.23B | 41.4 | 84.0 | 92.6 | 94.8 | 91.5 | 86.8 | 91.7 | 82.2 | 89.8 | 82.2 | 78.0 |
|
| 248 |
+
| Florence-2-large-ft| 0.77B | 43.4 | 85.2 | 93.4 | 95.3 | 92.0 | 88.3 | 92.9 | 83.6 | 91.2 | 91.7 | 80.5 |
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
## BibTex and citation info
|
| 252 |
+
|
| 253 |
+
```
|
| 254 |
+
@article{xiao2023florence,
|
| 255 |
+
title={Florence-2: Advancing a unified representation for a variety of vision tasks},
|
| 256 |
+
author={Xiao, Bin and Wu, Haiping and Xu, Weijian and Dai, Xiyang and Hu, Houdong and Lu, Yumao and Zeng, Michael and Liu, Ce and Yuan, Lu},
|
| 257 |
+
journal={arXiv preprint arXiv:2311.06242},
|
| 258 |
+
year={2023}
|
| 259 |
+
}
|
| 260 |
+
```","{""id"": ""microsoft/Florence-2-base"", ""author"": ""microsoft"", ""sha"": ""ceaf371f01ef66192264811b390bccad475a4f02"", ""last_modified"": ""2024-11-04 17:59:39+00:00"", ""created_at"": ""2024-06-15 00:57:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 428374, ""downloads_all_time"": null, ""likes"": 264, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""florence2"", ""text-generation"", ""vision"", ""image-text-to-text"", ""custom_code"", ""arxiv:2311.06242"", ""license:mit"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: mit\nlicense_link: https://huggingface.co/microsoft/Florence-2-base/resolve/main/LICENSE\npipeline_tag: image-text-to-text\ntags:\n- vision"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CODE_OF_CONDUCT.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SECURITY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SUPPORT.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""gokaygokay/Florence-2"", ""SkalskiP/florence-sam"", ""microsoft/OmniParser-v2"", ""ovi054/image-to-prompt"", ""microsoft/OmniParser"", ""SkalskiP/better-florence-2"", ""gokaygokay/PonyRealism"", ""gokaygokay/KolorsPlusPlus"", ""gokaygokay/FLUX.1-dev-with-Captioner"", ""gokaygokay/AuraFlow-with-Captioner"", ""gokaygokay/SD3.5-with-Captioner"", ""gokaygokay/NoobAI-Animagine-T-ponynai3"", ""Nymbo/FLUX-Prompt-Generator"", ""SkalskiP/florence-sam-masking"", ""ginipick/FLUX-Prompt-Generator"", ""prithivMLmods/Image-Captioning"", ""DamarJati/Remove-watermark"", ""microsoft/Magma-UI"", ""Akshayram1/image_to_promt_info"", ""phyloforfun/VoucherVision"", ""Nymbo/image-to-prompt"", ""ginigen/OmniParser-v2-pro"", ""Jonny001/Image-to-Text"", ""wjfhugging/image-to-prompt"", ""drlon/magma-ui-agent"", ""cibey/cibey-image-to-prompt"", ""jotase/florence-sam-masking"", ""tsengiii/Image_to_prompt"", ""ovi054/text-guided-mask-for-inpainting"", ""lee-ite/Florence-2-CPU"", ""jiuface/florence-sam-masking"", ""ighoshsubho/flux-sam-florence"", ""mrbeliever/img-to-prompt"", ""not-lain/OmniParser-v2"", ""GianJSX/Florence-2"", ""ThreadAbort/Florence-2"", ""UltraMarkoRJ/ImageToPrompt"", ""tsi-org/FLUX-Prompt-Generator"", ""aliceblue11/FLUX-Prompt-Generator123"", ""tb2l/florence-sam-masking"", ""Gordonkl/image_to_prompt"", ""TotoB12/OmniParser"", ""EX4L/PonyXL"", ""silveroxides/RNS-NoobAI-Hybrid"", ""zarroug/MC-image-to-prompt"", ""tommytracx/EnergyOmniParser"", ""Ashoka74/ProductPlacement"", ""ullashAi/image-to-prompt"", ""saravatpt/smartbizai-imagecaptioning-api"", ""Steven10429/apply_lora_and_quantize"", ""SOMIA-ALSHIBAH-25/image-to-text"", ""nofl/OmniParser-v2"", ""pratik188/Multi_Modal_AI_Surveillance_Chatbot"", ""victorgg/Florence-2-bot"", ""Mahmoudmody777/image-to-prompt"", ""Adityabhaskar/image-to-prompt"", ""QuinACR/deepvesionQuinV21"", ""54xunqi/Florence-2"", ""QuinACR/DeepVisonCoreTALI1"", ""cocktailpeanut/Florence-2"", ""lingkoai/Florence"", ""aolko/describe-test"", ""mroca/Florence-2"", ""alexbuz/ocr"", ""alexbuz/florence-2-ocr"", ""EX4L/PonyRealism-ZeroGPUs"", ""cocktailpeanut/florence-sam"", ""PiusShaw/detect_object"", ""datmar/florence-sam"", ""jiachenjiang/image-restoration"", ""Justforailolomg/PonyRealismOPEN"", ""Justforailolomg/PonyRealismOPEN_TURBO"", ""mobenta/cp_flux"", ""Justforailolomg/PonyRealismNEW"", ""Justforailolomg/PonyRealism_neww"", ""Ssdfiuheqwiufhuieir/FLUX-Prompt-Generator"", ""Ssdfiuheqwiufhuieir/FLUX-Prompt-Generatoruy"", ""NRbones/florence-sam-masking"", ""pabitramahato/FLUX-Prompt-Generator"", ""QQQ-XXX/florence-sam"", ""Erfan2001/Florence-2-Erfan"", ""Deddy/FLUX-Prompt-Maker"", ""aliceblue11/FLUX-Prompt-Generator111"", ""mrvero/Florence-2"", ""ahmedghani/video-object-removal"", ""waloneai/Walone-Prompt-Generator"", ""gvij/inpainting-segment"", ""ciditel/better-florence-2"", ""Justforailolomg/PonyRealismNew2"", ""Justforailolomg/PonyRealismLive"", ""Justforailolomg/PonyRealism123"", ""MoAusaf/florence-sam"", ""adminuhstraydur/Florence-DarkIdol"", ""yuvabe-ai/Ring_Size_Scalev2"", ""BBo09/image_text"", ""yuuciciu/image_text"", ""Peiiiiiiiiru/IMAGE_HE"", ""wint543/image_text"", ""ziphai/Image_TP"", ""wuki8888/image""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-04 17:59:39+00:00"", ""cardData"": ""license: mit\nlicense_link: https://huggingface.co/microsoft/Florence-2-base/resolve/main/LICENSE\npipeline_tag: image-text-to-text\ntags:\n- vision"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""666ce6f4067382b3e98317c6"", ""modelId"": ""microsoft/Florence-2-base"", ""usedStorage"": 1932866513}",0,"https://huggingface.co/Samsung/TinyClick, https://huggingface.co/AskUI/PTA-1, https://huggingface.co/PJMixers-Images/Florence-2-base-danbooru2022-316k, https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.4, https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.5, https://huggingface.co/maxiw/Florence-2-ScreenQA-base, https://huggingface.co/gokaygokay/Florence-2-Flux, https://huggingface.co/sahilnishad/Florence-2-FT-DocVQA, https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.1, https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.2, https://huggingface.co/alakxender/florence-base-dv01-01, https://huggingface.co/alakxender/florence-base-dv01-01-dv-e10",12,,0,https://huggingface.co/onnx-community/Florence-2-base,1,,0,"SkalskiP/better-florence-2, SkalskiP/florence-sam, gokaygokay/AuraFlow-with-Captioner, gokaygokay/FLUX.1-dev-with-Captioner, gokaygokay/Florence-2, gokaygokay/KolorsPlusPlus, gokaygokay/NoobAI-Animagine-T-ponynai3, gokaygokay/PonyRealism, gokaygokay/SD3.5-with-Captioner, huggingface/InferenceSupport/discussions/new?title=microsoft/Florence-2-base&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmicrosoft%2FFlorence-2-base%5D(%2Fmicrosoft%2FFlorence-2-base)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, microsoft/OmniParser, microsoft/OmniParser-v2, ovi054/image-to-prompt",13
|
| 261 |
+
Samsung/TinyClick,"---
|
| 262 |
+
license: mit
|
| 263 |
+
base_model: microsoft/Florence-2-base
|
| 264 |
+
---
|
| 265 |
+
|
| 266 |
+
<a id=""readme-top""></a>
|
| 267 |
+
|
| 268 |
+
[![arXiv][paper-shield]][paper-url]
|
| 269 |
+
[![MIT License][license-shield]][license-url]
|
| 270 |
+
|
| 271 |
+
<!-- PROJECT LOGO -->
|
| 272 |
+
<br />
|
| 273 |
+
<div align=""center"">
|
| 274 |
+
<!-- <a href=""https://github.com/othneildrew/Best-README-Template"">
|
| 275 |
+
<img src=""images/logo.png"" alt=""Logo"" width=""80"" height=""80"">
|
| 276 |
+
</a> -->
|
| 277 |
+
<h3 align=""center"">TinyClick: Single-Turn Agent for Empowering GUI Automation</h3>
|
| 278 |
+
<p align=""center"">
|
| 279 |
+
The code for running the model from paper: TinyClick: Single-Turn Agent for Empowering GUI Automation
|
| 280 |
+
</p>
|
| 281 |
+
</div>
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
<!-- ABOUT THE PROJECT -->
|
| 285 |
+
## About The Project
|
| 286 |
+
|
| 287 |
+
We present a single-turn agent for graphical user interface (GUI) interaction tasks, using Vision-Language Model Florence-2-Base. Main goal of the agent is to click on desired UI element based on the screenshot and user command. It demonstrates strong performance on Screenspot and OmniAct, while maintaining a compact size of 0.27B parameters and minimal latency.
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
<!-- USAGE EXAMPLES -->
|
| 291 |
+
## Usage
|
| 292 |
+
To set up the environment for running the code, please refer to the [GitHub repository](https://github.com/SamsungLabs/TinyClick). All necessary libraries and dependencies are listed in the requirements.txt file
|
| 293 |
+
|
| 294 |
+
```python
|
| 295 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
| 296 |
+
from PIL import Image
|
| 297 |
+
import requests
|
| 298 |
+
import torch
|
| 299 |
+
|
| 300 |
+
device = torch.device(""cuda"" if torch.cuda.is_available() else ""cpu"")
|
| 301 |
+
processor = AutoProcessor.from_pretrained(
|
| 302 |
+
""Samsung/TinyClick"", trust_remote_code=True
|
| 303 |
+
)
|
| 304 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 305 |
+
""Samsung/TinyClick"",
|
| 306 |
+
trust_remote_code=True,
|
| 307 |
+
).to(device)
|
| 308 |
+
|
| 309 |
+
url = ""https://huggingface.co/Samsung/TinyClick/resolve/main/sample.png""
|
| 310 |
+
img = Image.open(requests.get(url, stream=True).raw)
|
| 311 |
+
|
| 312 |
+
command = ""click on accept and continue button""
|
| 313 |
+
image_size = img.size
|
| 314 |
+
|
| 315 |
+
input_text = (""What to do to execute the command? "" + command.strip()).lower()
|
| 316 |
+
|
| 317 |
+
inputs = processor(
|
| 318 |
+
images=img,
|
| 319 |
+
text=input_text,
|
| 320 |
+
return_tensors=""pt"",
|
| 321 |
+
do_resize=True,
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
outputs = model.generate(**inputs)
|
| 325 |
+
generated_texts = processor.batch_decode(outputs, skip_special_tokens=False)
|
| 326 |
+
```
|
| 327 |
+
|
| 328 |
+
For postprocessing fuction go to our github repository: https://github.com/SamsungLabs/TinyClick
|
| 329 |
+
```python
|
| 330 |
+
from tinyclick_utils import postprocess
|
| 331 |
+
|
| 332 |
+
result = postprocess(generated_texts[0], image_size)
|
| 333 |
+
```
|
| 334 |
+
|
| 335 |
+
<!-- CITATION -->
|
| 336 |
+
## Citation
|
| 337 |
+
|
| 338 |
+
```
|
| 339 |
+
@misc{pawlowski2024tinyclicksingleturnagentempowering,
|
| 340 |
+
title={TinyClick: Single-Turn Agent for Empowering GUI Automation},
|
| 341 |
+
author={Pawel Pawlowski and Krystian Zawistowski and Wojciech Lapacz and Marcin Skorupa and Adam Wiacek and Sebastien Postansque and Jakub Hoscilowicz},
|
| 342 |
+
year={2024},
|
| 343 |
+
eprint={2410.11871},
|
| 344 |
+
archivePrefix={arXiv},
|
| 345 |
+
primaryClass={cs.HC},
|
| 346 |
+
url={https://arxiv.org/abs/2410.11871},
|
| 347 |
+
}
|
| 348 |
+
```
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
<!-- LICENSE -->
|
| 352 |
+
## License
|
| 353 |
+
|
| 354 |
+
Please check the MIT license that is listed in this repository. See `LICENSE` for more information.
|
| 355 |
+
|
| 356 |
+
<p align=""right"">(<a href=""#readme-top"">back to top</a>)</p>
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
<!-- MARKDOWN LINKS & IMAGES -->
|
| 360 |
+
[paper-shield]: https://img.shields.io/badge/2024-arXiv-red
|
| 361 |
+
[paper-url]: https://arxiv.org/abs/2410.11871
|
| 362 |
+
[license-shield]: https://img.shields.io/badge/License-MIT-yellow.svg
|
| 363 |
+
[license-url]: https://opensource.org/licenses/MIT
|
| 364 |
+
","{""id"": ""Samsung/TinyClick"", ""author"": ""Samsung"", ""sha"": ""a77fadb223603d17f91e0b96a0b24edf78be241b"", ""last_modified"": ""2024-10-29 10:39:53+00:00"", ""created_at"": ""2024-10-16 07:08:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 317, ""downloads_all_time"": null, ""likes"": 26, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""florence2"", ""custom_code"", ""arxiv:2410.11871"", ""base_model:microsoft/Florence-2-base"", ""base_model:finetune:microsoft/Florence-2-base"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Florence-2-base\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""microsoft/Florence-2-base--configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""microsoft/Florence-2-base--modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sample.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 270957657}, ""total"": 270957657}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-29 10:39:53+00:00"", ""cardData"": ""base_model: microsoft/Florence-2-base\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""670f666f41b894977b77c492"", ""modelId"": ""Samsung/TinyClick"", ""usedStorage"": 1083916964}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Samsung/TinyClick&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSamsung%2FTinyClick%5D(%2FSamsung%2FTinyClick)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 365 |
+
AskUI/PTA-1,"---
|
| 366 |
+
library_name: transformers
|
| 367 |
+
pipeline_tag: image-text-to-text
|
| 368 |
+
tags:
|
| 369 |
+
- computer use
|
| 370 |
+
license: mit
|
| 371 |
+
language:
|
| 372 |
+
- en
|
| 373 |
+
base_model:
|
| 374 |
+
- microsoft/Florence-2-base
|
| 375 |
+
---
|
| 376 |
+
|
| 377 |
+
# PTA-1: Controlling Computers with Small Models
|
| 378 |
+
|
| 379 |
+
PTA (Prompt-to-Automation) is a vision language model for computer & phone automation, based on Florence-2.
|
| 380 |
+
With only 270M parameters it outperforms much larger models in GUI text and element localization.
|
| 381 |
+
This enables low-latency computer automation with local execution.
|
| 382 |
+
|
| 383 |
+
▶️ Try the demo at: [AskUI/PTA-1](https://huggingface.co/spaces/AskUI/PTA-1)
|
| 384 |
+
|
| 385 |
+
**Model Input:** Screenshot + description_of_target_element
|
| 386 |
+
|
| 387 |
+
**Model Output:** BoundingBox for Target Element
|
| 388 |
+
|
| 389 |
+

|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
## How to Get Started with the Model
|
| 393 |
+
|
| 394 |
+
Use the code below to get started with the model.
|
| 395 |
+
|
| 396 |
+
*Requirements:* torch, timm, einops, Pillow, transformers
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
```python
|
| 400 |
+
import torch
|
| 401 |
+
from PIL import Image
|
| 402 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 403 |
+
|
| 404 |
+
device = ""cuda:0"" if torch.cuda.is_available() else ""cpu""
|
| 405 |
+
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
| 406 |
+
|
| 407 |
+
model = AutoModelForCausalLM.from_pretrained(""AskUI/PTA-1"", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
|
| 408 |
+
processor = AutoProcessor.from_pretrained(""AskUI/PTA-1"", trust_remote_code=True)
|
| 409 |
+
|
| 410 |
+
task_prompt = ""<OPEN_VOCABULARY_DETECTION>""
|
| 411 |
+
prompt = task_prompt + ""description of the target element""
|
| 412 |
+
|
| 413 |
+
image = Image.open(""path to screenshot"").convert(""RGB"")
|
| 414 |
+
|
| 415 |
+
inputs = processor(text=prompt, images=image, return_tensors=""pt"").to(device, torch_dtype)
|
| 416 |
+
|
| 417 |
+
generated_ids = model.generate(
|
| 418 |
+
input_ids=inputs[""input_ids""],
|
| 419 |
+
pixel_values=inputs[""pixel_values""],
|
| 420 |
+
max_new_tokens=1024,
|
| 421 |
+
do_sample=False,
|
| 422 |
+
num_beams=3,
|
| 423 |
+
)
|
| 424 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
| 425 |
+
|
| 426 |
+
parsed_answer = processor.post_process_generation(generated_text, task=""<OPEN_VOCABULARY_DETECTION>"", image_size=(image.width, image.height))
|
| 427 |
+
|
| 428 |
+
print(parsed_answer)
|
| 429 |
+
```
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
## Evaluation
|
| 433 |
+
|
| 434 |
+
**Note:** This is a first version of our evaluation, based on 999 samples (333 samples from each dataset).
|
| 435 |
+
We are still running all models on the full test sets, and we are seeing ±5% deviations for a subset of the models we have already evaluated.
|
| 436 |
+
|
| 437 |
+
| Model | Parameters | Mean | agentsea/wave-ui | AskUI/pta-text | ivelin/rico_refexp_combined |
|
| 438 |
+
|--------------------------------------------|------------|--------|------------------|----------------|-----------------------------|
|
| 439 |
+
| AskUI/PTA-1 | 0.27B | 79.98 | 90.69* | 76.28 | 72.97* |
|
| 440 |
+
| anthropic.claude-3-5-sonnet-20241022-v2:0 | - | 70.37 | 82.28 | 83.18 | 45.65 |
|
| 441 |
+
| agentsea/paligemma-3b-ft-waveui-896 | 3.29B | 57.76 | 70.57* | 67.87 | 34.83 |
|
| 442 |
+
| Qwen/Qwen2-VL-7B-Instruct | 8.29B | 57.26 | 47.45 | 60.66 | 63.66 |
|
| 443 |
+
| agentsea/paligemma-3b-ft-widgetcap-waveui-448 | 3.29B | 53.15 | 74.17* | 53.45 | 31.83 |
|
| 444 |
+
| microsoft/Florence-2-base | 0.27B | 39.44 | 22.22 | 81.38 | 14.71 |
|
| 445 |
+
| microsoft/Florence-2-large | 0.82B | 36.64 | 14.11 | 81.98 | 13.81 |
|
| 446 |
+
| EasyOCR | - | 29.43 | 3.9 | 75.08 | 9.31 |
|
| 447 |
+
| adept/fuyu-8b | 9.41B | 26.83 | 5.71 | 71.47 | 3.3 |
|
| 448 |
+
| Qwen/Qwen2-VL-2B-Instruct | 2.21B | 23.32 | 17.12 | 26.13 | 26.73 |
|
| 449 |
+
| Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4 | 0.90B | 18.92 | 10.81 | 22.82 | 23.12 |
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
\* Models is known to be trained on the train split of that dataset.
|
| 453 |
+
|
| 454 |
+
The high benchmark scores for our model are partially due to data bias.
|
| 455 |
+
Therefore, we expect users of the model to fine-tune it according to the data distributions of their use case.
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
#### Metrics
|
| 459 |
+
|
| 460 |
+
Click success rate is calculated as the number of clicks inside the target bounding box relative to all clicks.
|
| 461 |
+
If a model predicts a target bounding box instead of a click coordinate, its center is used as its click prediction.","{""id"": ""AskUI/PTA-1"", ""author"": ""AskUI"", ""sha"": ""cf9877cde0d7c75b60b4d699f359922a848ee3ec"", ""last_modified"": ""2024-11-28 09:30:35+00:00"", ""created_at"": ""2024-11-13 09:33:15+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7008, ""downloads_all_time"": null, ""likes"": 88, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""florence2"", ""text-generation"", ""computer use"", ""image-text-to-text"", ""custom_code"", ""en"", ""base_model:microsoft/Florence-2-base"", ""base_model:finetune:microsoft/Florence-2-base"", ""license:mit"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/Florence-2-base\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text\ntags:\n- computer use"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/examples.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""AskUI/PTA-1""], ""safetensors"": {""parameters"": {""F32"": 270957657}, ""total"": 270957657}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-28 09:30:35+00:00"", ""cardData"": ""base_model:\n- microsoft/Florence-2-base\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text\ntags:\n- computer use"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6734725be6a45b6a0bac5046"", ""modelId"": ""AskUI/PTA-1"", ""usedStorage"": 2167833928}",1,,0,,0,,0,,0,"AskUI/PTA-1, huggingface/InferenceSupport/discussions/new?title=AskUI/PTA-1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BAskUI%2FPTA-1%5D(%2FAskUI%2FPTA-1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
| 462 |
+
PJMixers-Images/Florence-2-base-danbooru2022-316k,"---
|
| 463 |
+
datasets:
|
| 464 |
+
- animelover/danbooru2022
|
| 465 |
+
base_model:
|
| 466 |
+
- microsoft/Florence-2-base
|
| 467 |
+
---
|
| 468 |
+
This model serves as a proof of concept. You will *very likely* have better captioning results using [`SmilingWolf/wd-eva02-large-tagger-v3`](https://huggingface.co/SmilingWolf/wd-eva02-large-tagger-v3).
|
| 469 |
+
|
| 470 |
+
Trained with [Florence-2ner](https://github.com/xzuyn/Florence-2ner) using this config and 316K images from the [`animelover/danbooru2022` dataset](https://huggingface.co/datasets/animelover/danbooru2022) (`data-0880.zip` to `data-0943.zip`).
|
| 471 |
+
|
| 472 |
+
```json
|
| 473 |
+
{
|
| 474 |
+
""model_name"": ""microsoft/Florence-2-base"",
|
| 475 |
+
""dataset_path"": ""./0000_Datasets/danbooru2022"",
|
| 476 |
+
""run_name"": ""Florence-2-base-danbooru2022-316k-run1"",
|
| 477 |
+
""epochs"": 1,
|
| 478 |
+
""learning_rate"": 1e-5,
|
| 479 |
+
""gradient_checkpointing"": true,
|
| 480 |
+
""freeze_vision"": false,
|
| 481 |
+
""freeze_language"": false,
|
| 482 |
+
""freeze_other"": false,
|
| 483 |
+
""train_batch_size"": 8,
|
| 484 |
+
""eval_batch_size"": 16,
|
| 485 |
+
""gradient_accumulation_steps"": 32,
|
| 486 |
+
""clip_grad_norm"": 1,
|
| 487 |
+
""weight_decay"": 1e-5,
|
| 488 |
+
""save_total_limit"": 3,
|
| 489 |
+
""save_steps"": 50,
|
| 490 |
+
""eval_steps"": 50,
|
| 491 |
+
""warmup_steps"": 50,
|
| 492 |
+
""eval_split_ratio"": 0.01,
|
| 493 |
+
""seed"": 42,
|
| 494 |
+
""filtering_processes"": 128,
|
| 495 |
+
""attn_implementation"": ""sdpa""
|
| 496 |
+
}
|
| 497 |
+
```
|
| 498 |
+
|
| 499 |
+
","{""id"": ""PJMixers-Images/Florence-2-base-danbooru2022-316k"", ""author"": ""PJMixers-Images"", ""sha"": ""567db1d70709210240354c4ee46f44db1a24f337"", ""last_modified"": ""2025-01-08 18:31:32+00:00"", ""created_at"": ""2025-01-07 14:13:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""florence2"", ""custom_code"", ""dataset:animelover/danbooru2022"", ""base_model:microsoft/Florence-2-base"", ""base_model:finetune:microsoft/Florence-2-base"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/Florence-2-base\ndatasets:\n- animelover/danbooru2022"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""microsoft/Florence-2-base--configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""microsoft/Florence-2-base--modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='val_loss.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 270957657}, ""total"": 270957657}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-08 18:31:32+00:00"", ""cardData"": ""base_model:\n- microsoft/Florence-2-base\ndatasets:\n- animelover/danbooru2022"", ""transformersInfo"": null, ""_id"": ""677d3686c02ec1513943e26a"", ""modelId"": ""PJMixers-Images/Florence-2-base-danbooru2022-316k"", ""usedStorage"": 1626006582}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PJMixers-Images/Florence-2-base-danbooru2022-316k&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPJMixers-Images%2FFlorence-2-base-danbooru2022-316k%5D(%2FPJMixers-Images%2FFlorence-2-base-danbooru2022-316k)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 500 |
+
https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.4,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 501 |
+
https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.5,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 502 |
+
maxiw/Florence-2-ScreenQA-base,"---
|
| 503 |
+
library_name: transformers
|
| 504 |
+
language:
|
| 505 |
+
- en
|
| 506 |
+
pipeline_tag: image-text-to-text
|
| 507 |
+
datasets:
|
| 508 |
+
- rootsautomation/RICO-ScreenQA
|
| 509 |
+
base_model: microsoft/Florence-2-base
|
| 510 |
+
---
|
| 511 |
+
|
| 512 |
+
# Florence-2-ScreenQA-base
|
| 513 |
+
|
| 514 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 515 |
+
|
| 516 |
+
This is fine-tuned version of [microsoft/Florence-2-base](https://huggingface.co/microsoft/Florence-2-base) on [RICO-ScreenQA](https://huggingface.co/datasets/rootsautomation/RICO-ScreenQA). It can be used to extract information from screenshots.
|
| 517 |
+
|
| 518 |
+
## Model Details
|
| 519 |
+
|
| 520 |
+
### Model Description
|
| 521 |
+
|
| 522 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 523 |
+
|
| 524 |
+
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
| 525 |
+
|
| 526 |
+
- **Developed by:** [More Information Needed]
|
| 527 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 528 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 529 |
+
- **Model type:** [More Information Needed]
|
| 530 |
+
- **Finetuned from model:** [microsoft/Florence-2-base](https://huggingface.co/microsoft/Florence-2-base)
|
| 531 |
+
|
| 532 |
+
### Model Sources [optional]
|
| 533 |
+
|
| 534 |
+
<!-- Provide the basic links for the model. -->
|
| 535 |
+
|
| 536 |
+
- **Repository:** [More Information Needed]
|
| 537 |
+
- **Demo:** [HF Space](https://huggingface.co/spaces/maxiw/Florence-2-ScreenQA)
|
| 538 |
+
|
| 539 |
+
## How to Get Started with the Model
|
| 540 |
+
|
| 541 |
+
Use the code below to get started with the model.
|
| 542 |
+
|
| 543 |
+
[More Information Needed]
|
| 544 |
+
|
| 545 |
+
## Training Details
|
| 546 |
+
|
| 547 |
+
### Training Data
|
| 548 |
+
|
| 549 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 550 |
+
|
| 551 |
+
[More Information Needed]
|
| 552 |
+
|
| 553 |
+
### Training Procedure
|
| 554 |
+
|
| 555 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 556 |
+
|
| 557 |
+
#### Preprocessing [optional]
|
| 558 |
+
|
| 559 |
+
[More Information Needed]
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
#### Training Hyperparameters
|
| 563 |
+
|
| 564 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 565 |
+
|
| 566 |
+
#### Speeds, Sizes, Times [optional]
|
| 567 |
+
|
| 568 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 569 |
+
|
| 570 |
+
[More Information Needed]","{""id"": ""maxiw/Florence-2-ScreenQA-base"", ""author"": ""maxiw"", ""sha"": ""2f8114d9238c25796cb23805a2d9609beb52c2d8"", ""last_modified"": ""2024-08-21 09:06:12+00:00"", ""created_at"": ""2024-08-11 11:15:59+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""florence2"", ""text-generation"", ""image-text-to-text"", ""custom_code"", ""en"", ""dataset:rootsautomation/RICO-ScreenQA"", ""base_model:microsoft/Florence-2-base"", ""base_model:finetune:microsoft/Florence-2-base"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Florence-2-base\ndatasets:\n- rootsautomation/RICO-ScreenQA\nlanguage:\n- en\nlibrary_name: transformers\npipeline_tag: image-text-to-text"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""maxiw/Florence-2-ScreenQA""], ""safetensors"": {""parameters"": {""F16"": 270957657}, ""total"": 270957657}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-21 09:06:12+00:00"", ""cardData"": ""base_model: microsoft/Florence-2-base\ndatasets:\n- rootsautomation/RICO-ScreenQA\nlanguage:\n- en\nlibrary_name: transformers\npipeline_tag: image-text-to-text"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""66b89d6f088299999a778769"", ""modelId"": ""maxiw/Florence-2-ScreenQA-base"", ""usedStorage"": 542001522}",1,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/new?title=maxiw/Florence-2-ScreenQA-base&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmaxiw%2FFlorence-2-ScreenQA-base%5D(%2Fmaxiw%2FFlorence-2-ScreenQA-base)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, maxiw/Florence-2-ScreenQA",2
|
| 571 |
+
gokaygokay/Florence-2-Flux,"---
|
| 572 |
+
license: apache-2.0
|
| 573 |
+
language:
|
| 574 |
+
- en
|
| 575 |
+
library_name: transformers
|
| 576 |
+
pipeline_tag: image-text-to-text
|
| 577 |
+
tags:
|
| 578 |
+
- art
|
| 579 |
+
base_model: microsoft/Florence-2-base
|
| 580 |
+
datasets:
|
| 581 |
+
- kadirnar/fluxdev_controlnet_16k
|
| 582 |
+
---
|
| 583 |
+
|
| 584 |
+
```
|
| 585 |
+
pip install -q torch==2.4.0 datasets flash_attn timm einops
|
| 586 |
+
```
|
| 587 |
+
|
| 588 |
+
```python
|
| 589 |
+
|
| 590 |
+
from transformers import AutoModelForCausalLM, AutoProcessor, AutoConfig
|
| 591 |
+
import torch
|
| 592 |
+
|
| 593 |
+
device = torch.device(""cuda"" if torch.cuda.is_available() else ""cpu"")
|
| 594 |
+
|
| 595 |
+
model = AutoModelForCausalLM.from_pretrained(""gokaygokay/Florence-2-Flux"", trust_remote_code=True).to(device).eval()
|
| 596 |
+
processor = AutoProcessor.from_pretrained(""gokaygokay/Florence-2-Flux"", trust_remote_code=True)
|
| 597 |
+
|
| 598 |
+
# Function to run the model on an example
|
| 599 |
+
def run_example(task_prompt, text_input, image):
|
| 600 |
+
prompt = task_prompt + text_input
|
| 601 |
+
|
| 602 |
+
# Ensure the image is in RGB mode
|
| 603 |
+
if image.mode != ""RGB"":
|
| 604 |
+
image = image.convert(""RGB"")
|
| 605 |
+
|
| 606 |
+
inputs = processor(text=prompt, images=image, return_tensors=""pt"").to(device)
|
| 607 |
+
generated_ids = model.generate(
|
| 608 |
+
input_ids=inputs[""input_ids""],
|
| 609 |
+
pixel_values=inputs[""pixel_values""],
|
| 610 |
+
max_new_tokens=1024,
|
| 611 |
+
num_beams=3,
|
| 612 |
+
repetition_penalty=1.10,
|
| 613 |
+
)
|
| 614 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
| 615 |
+
parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
|
| 616 |
+
return parsed_answer
|
| 617 |
+
|
| 618 |
+
from PIL import Image
|
| 619 |
+
import requests
|
| 620 |
+
import copy
|
| 621 |
+
|
| 622 |
+
url = ""https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true""
|
| 623 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
| 624 |
+
answer = run_example(""<DESCRIPTION>"", ""Describe this image in great detail."", image)
|
| 625 |
+
|
| 626 |
+
final_answer = answer[""<DESCRIPTION>""]
|
| 627 |
+
print(final_answer)
|
| 628 |
+
|
| 629 |
+
```","{""id"": ""gokaygokay/Florence-2-Flux"", ""author"": ""gokaygokay"", ""sha"": ""d17454c4e4bccb3bfe52477634089b8515b1bc3c"", ""last_modified"": ""2024-10-27 14:43:18+00:00"", ""created_at"": ""2024-08-23 20:42:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2064, ""downloads_all_time"": null, ""likes"": 13, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""florence2"", ""text-generation"", ""art"", ""image-text-to-text"", ""custom_code"", ""en"", ""dataset:kadirnar/fluxdev_controlnet_16k"", ""base_model:microsoft/Florence-2-base"", ""base_model:finetune:microsoft/Florence-2-base"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Florence-2-base\ndatasets:\n- kadirnar/fluxdev_controlnet_16k\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- art"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_florence2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""John6666/danbooru-tags-transformer-v2-with-wd-tagger"", ""John6666/Prompt-Enhancer"", ""gokaygokay/Flux-Florence-2"", ""John6666/danbooru-tags-transformer-v2-with-wd-tagger-b"", ""Heartsync/FLUX-Vision"", ""FiditeNemini/Prompt-Enhancer"", ""haroldooo/danbooru-tags-transformer-v2-with-wd-tagger"", ""vinhtruong3/florence-captioning-2-txt"", ""ginipick/Flux-Florence-2"", ""saepulid/bongkar-prompt"", ""FiditeNemini/danbooru-tags-transformer-v2-with-wd-tagger"", ""creaturebot/danbooru-tags-transformer-v2-with-wd-tagger"", ""LMFResearchSociety/danbooru-tags-transformer-v2-with-wd-tagger-b""], ""safetensors"": {""parameters"": {""F32"": 270957657}, ""total"": 270957657}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-27 14:43:18+00:00"", ""cardData"": ""base_model: microsoft/Florence-2-base\ndatasets:\n- kadirnar/fluxdev_controlnet_16k\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- art"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""66c8f42ad2d84bde3ccffca4"", ""modelId"": ""gokaygokay/Florence-2-Flux"", ""usedStorage"": 1083916964}",1,,0,,0,,0,,0,"FiditeNemini/Prompt-Enhancer, Heartsync/FLUX-Vision, John6666/Prompt-Enhancer, John6666/danbooru-tags-transformer-v2-with-wd-tagger, John6666/danbooru-tags-transformer-v2-with-wd-tagger-b, LMFResearchSociety/danbooru-tags-transformer-v2-with-wd-tagger-b, creaturebot/danbooru-tags-transformer-v2-with-wd-tagger, ginipick/Flux-Florence-2, gokaygokay/Flux-Florence-2, haroldooo/danbooru-tags-transformer-v2-with-wd-tagger, huggingface/InferenceSupport/discussions/new?title=gokaygokay/Florence-2-Flux&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgokaygokay%2FFlorence-2-Flux%5D(%2Fgokaygokay%2FFlorence-2-Flux)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, saepulid/bongkar-prompt, vinhtruong3/florence-captioning-2-txt",13
|
| 630 |
+
sahilnishad/Florence-2-FT-DocVQA,"---
|
| 631 |
+
library_name: transformers
|
| 632 |
+
license: mit
|
| 633 |
+
datasets:
|
| 634 |
+
- HuggingFaceM4/DocumentVQA
|
| 635 |
+
language:
|
| 636 |
+
- en
|
| 637 |
+
base_model:
|
| 638 |
+
- microsoft/Florence-2-base
|
| 639 |
+
tags:
|
| 640 |
+
- transformers
|
| 641 |
+
- florence2
|
| 642 |
+
- document-vqa
|
| 643 |
+
- vqa
|
| 644 |
+
- image-to-text
|
| 645 |
+
- multimodal
|
| 646 |
+
- question-answering
|
| 647 |
+
---
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
# Model Description
|
| 651 |
+
Fine-tuned Florence-2 model on DocumentVQA dataset to perform question answering on document images
|
| 652 |
+
- **[Github](https://github.com/sahilnishad/Fine-Tuning-Florence-2-DocumentVQA)**
|
| 653 |
+
|
| 654 |
+
# Get Started with the Model
|
| 655 |
+
#### 1. Installation
|
| 656 |
+
```python
|
| 657 |
+
!pip install torch transformers datasets flash_attn
|
| 658 |
+
```
|
| 659 |
+
#### 2. Loading model and processor
|
| 660 |
+
```python
|
| 661 |
+
import torch
|
| 662 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
| 663 |
+
|
| 664 |
+
model = AutoModelForCausalLM.from_pretrained(""sahilnishad/Florence-2-FT-DocVQA"", trust_remote_code=True)
|
| 665 |
+
processor = AutoProcessor.from_pretrained(""sahilnishad/Florence-2-FT-DocVQA"", trust_remote_code=True)
|
| 666 |
+
device = torch.device(""cuda"" if torch.cuda.is_available() else ""cpu"")
|
| 667 |
+
model.to(device)
|
| 668 |
+
```
|
| 669 |
+
#### 3. Running inference
|
| 670 |
+
```python
|
| 671 |
+
def run_inference(task_prompt, question, image):
|
| 672 |
+
prompt = task_prompt + question
|
| 673 |
+
|
| 674 |
+
if image.mode != ""RGB"":
|
| 675 |
+
image = image.convert(""RGB"")
|
| 676 |
+
|
| 677 |
+
inputs = processor(text=prompt, images=image, return_tensors=""pt"").to(device)
|
| 678 |
+
|
| 679 |
+
with torch.no_grad():
|
| 680 |
+
generated_ids = model.generate(
|
| 681 |
+
input_ids=inputs[""input_ids""],
|
| 682 |
+
pixel_values=inputs[""pixel_values""],
|
| 683 |
+
max_new_tokens=1024,
|
| 684 |
+
num_beams=3
|
| 685 |
+
)
|
| 686 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 687 |
+
return generated_text
|
| 688 |
+
```
|
| 689 |
+
#### 4. Example
|
| 690 |
+
```python
|
| 691 |
+
from PIL import Image
|
| 692 |
+
from datasets import load_dataset
|
| 693 |
+
|
| 694 |
+
data = load_dataset(""HuggingFaceM4/DocumentVQA"")
|
| 695 |
+
|
| 696 |
+
question = ""What do you see in this image?""
|
| 697 |
+
image = data['train'][0]['image']
|
| 698 |
+
print(run_inference(""<DocVQA>"", question, image))
|
| 699 |
+
```
|
| 700 |
+
---
|
| 701 |
+
|
| 702 |
+
# BibTeX:
|
| 703 |
+
```bibtex
|
| 704 |
+
@misc{sahilnishad_florence_2_ft_docvqa,
|
| 705 |
+
author = {Sahil Nishad},
|
| 706 |
+
title = {Fine-Tuning Florence-2 For Document Visual Question-Answering},
|
| 707 |
+
year = {2024},
|
| 708 |
+
url = {https://huggingface.co/sahilnishad/Florence-2-FT-DocVQA},
|
| 709 |
+
note = {Model available on HuggingFace Hub},
|
| 710 |
+
howpublished = {\url{https://huggingface.co/sahilnishad/Florence-2-FT-DocVQA}},
|
| 711 |
+
}","{""id"": ""sahilnishad/Florence-2-FT-DocVQA"", ""author"": ""sahilnishad"", ""sha"": ""9add8e14fe891201e169d28a648a08172bd28448"", ""last_modified"": ""2024-11-07 14:05:58+00:00"", ""created_at"": ""2024-11-02 19:40:39+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4889, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""florence2"", ""text-generation"", ""document-vqa"", ""vqa"", ""image-to-text"", ""multimodal"", ""question-answering"", ""custom_code"", ""en"", ""dataset:HuggingFaceM4/DocumentVQA"", ""base_model:microsoft/Florence-2-base"", ""base_model:finetune:microsoft/Florence-2-base"", ""doi:10.57967/hf/3473"", ""license:mit"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/Florence-2-base\ndatasets:\n- HuggingFaceM4/DocumentVQA\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\ntags:\n- transformers\n- florence2\n- document-vqa\n- vqa\n- image-to-text\n- multimodal\n- question-answering"", ""widget_data"": [{""text"": ""Where do I live?"", ""context"": ""My name is Wolfgang and I live in Berlin""}, {""text"": ""Where do I live?"", ""context"": ""My name is Sarah and I live in London""}, {""text"": ""What's my name?"", ""context"": ""My name is Clara and I live in Berkeley.""}, {""text"": ""Which name is also used to describe the Amazon rainforest in English?"", ""context"": ""The Amazon rainforest (Portuguese: Floresta Amaz\u00f4nica or Amaz\u00f4nia; Spanish: Selva Amaz\u00f3nica, Amazon\u00eda or usually Amazonia; French: For\u00eat amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain \""Amazonas\"" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.""}], ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""microsoft/Florence-2-base-ft--configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""sahilnishad/scanned-doc-chat""], ""safetensors"": {""parameters"": {""F32"": 270957657}, ""total"": 270957657}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-07 14:05:58+00:00"", ""cardData"": ""base_model:\n- microsoft/Florence-2-base\ndatasets:\n- HuggingFaceM4/DocumentVQA\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\ntags:\n- transformers\n- florence2\n- document-vqa\n- vqa\n- image-to-text\n- multimodal\n- question-answering"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""672680373a882a0daa256dc7"", ""modelId"": ""sahilnishad/Florence-2-FT-DocVQA"", ""usedStorage"": 1083916964}",1,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/new?title=sahilnishad/Florence-2-FT-DocVQA&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsahilnishad%2FFlorence-2-FT-DocVQA%5D(%2Fsahilnishad%2FFlorence-2-FT-DocVQA)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, sahilnishad/scanned-doc-chat",2
|
| 712 |
+
https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.1,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 713 |
+
https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.2,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 714 |
+
alakxender/florence-base-dv01-01,"---
|
| 715 |
+
library_name: transformers
|
| 716 |
+
language:
|
| 717 |
+
- dv
|
| 718 |
+
base_model:
|
| 719 |
+
- microsoft/Florence-2-base
|
| 720 |
+
datasets:
|
| 721 |
+
- alakxender/dhivehi-image-text
|
| 722 |
+
---
|
| 723 |
+
|
| 724 |
+
# Florence Dhivehi
|
| 725 |
+
|
| 726 |
+
Finetuned florence2-base on dhivehi-text-image data, config dv01 only","{""id"": ""alakxender/florence-base-dv01-01"", ""author"": ""alakxender"", ""sha"": ""02dd2bf628da5ffeb5b298e3521f2d68eb150877"", ""last_modified"": ""2025-02-07 13:31:31+00:00"", ""created_at"": ""2025-01-27 15:10:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""florence2"", ""text-generation"", ""custom_code"", ""dv"", ""dataset:alakxender/dhivehi-image-text"", ""base_model:microsoft/Florence-2-base"", ""base_model:finetune:microsoft/Florence-2-base"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/Florence-2-base\ndatasets:\n- alakxender/dhivehi-image-text\nlanguage:\n- dv\nlibrary_name: transformers"", ""widget_data"": [{""text"": ""\u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0\u078e\u07ac \u0782\u07a6\u0789\u07a6\u0786\u07a9 \u0794\u07ab\u0790\u07aa\u078a\u07b0 \u0787\u07a6\u078b\u07a8 \u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0\u078e\u07ac \u0789\u07a6\u0787\u07a8\u078e\u07a6\u0782\u0791\u07aa""}, {""text"": ""\u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0\u078e\u07ac \u0782\u07a6\u0789\u07a6\u0786\u07a9 \u0789\u07a6\u0783\u07a8\u0787\u07a6\u0789\u07b0\u060c \u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0 \u0787\u07ac\u0782\u07b0\u0789\u07ac \u078e\u07a6\u0794\u07a7\u0788\u07a7""}, {""text"": ""\u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0\u078e\u07ac \u0782\u07a6\u0789\u07a6\u0786\u07a9 \u078a\u07a7\u078c\u07aa\u0789\u07a6\u078c\u07aa \u0787\u07a6\u078b\u07a8 \u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0""}, {""text"": ""\u060c\u0787\u07ac\u0787\u07b0 \u0792\u07a6\u0789\u07a7\u0782\u07ac\u0787\u07b0\u078e\u07a6\u0787\u07a8""}], ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""microsoft/Florence-2-base-ft--configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 270957657}, ""total"": 270957657}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-07 13:31:31+00:00"", ""cardData"": ""base_model:\n- microsoft/Florence-2-base\ndatasets:\n- alakxender/dhivehi-image-text\nlanguage:\n- dv\nlibrary_name: transformers"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6797a1fbe0bae7ff70123271"", ""modelId"": ""alakxender/florence-base-dv01-01"", ""usedStorage"": 1083916964}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=alakxender/florence-base-dv01-01&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Balakxender%2Fflorence-base-dv01-01%5D(%2Falakxender%2Fflorence-base-dv01-01)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 727 |
+
alakxender/florence-base-dv01-01-dv-e10,"---
|
| 728 |
+
library_name: transformers
|
| 729 |
+
language:
|
| 730 |
+
- dv
|
| 731 |
+
base_model:
|
| 732 |
+
- microsoft/Florence-2-base
|
| 733 |
+
datasets:
|
| 734 |
+
- alakxender/dhivehi-image-text
|
| 735 |
+
---
|
| 736 |
+
|
| 737 |
+
# Florence Dhivehi
|
| 738 |
+
|
| 739 |
+
Finetuned florence2-base on dhivehi-text-image data","{""id"": ""alakxender/florence-base-dv01-01-dv-e10"", ""author"": ""alakxender"", ""sha"": ""38a03d33583f8319836374d0d049737ffe07d83b"", ""last_modified"": ""2025-02-07 13:30:48+00:00"", ""created_at"": ""2025-01-28 12:25:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""florence2"", ""text-generation"", ""custom_code"", ""dv"", ""dataset:alakxender/dhivehi-image-text"", ""base_model:microsoft/Florence-2-base"", ""base_model:finetune:microsoft/Florence-2-base"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/Florence-2-base\ndatasets:\n- alakxender/dhivehi-image-text\nlanguage:\n- dv\nlibrary_name: transformers"", ""widget_data"": [{""text"": ""\u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0\u078e\u07ac \u0782\u07a6\u0789\u07a6\u0786\u07a9 \u0794\u07ab\u0790\u07aa\u078a\u07b0 \u0787\u07a6\u078b\u07a8 \u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0\u078e\u07ac \u0789\u07a6\u0787\u07a8\u078e\u07a6\u0782\u0791\u07aa""}, {""text"": ""\u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0\u078e\u07ac \u0782\u07a6\u0789\u07a6\u0786\u07a9 \u0789\u07a6\u0783\u07a8\u0787\u07a6\u0789\u07b0\u060c \u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0 \u0787\u07ac\u0782\u07b0\u0789\u07ac \u078e\u07a6\u0794\u07a7\u0788\u07a7""}, {""text"": ""\u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0\u078e\u07ac \u0782\u07a6\u0789\u07a6\u0786\u07a9 \u078a\u07a7\u078c\u07aa\u0789\u07a6\u078c\u07aa \u0787\u07a6\u078b\u07a8 \u0787\u07a6\u0780\u07a6\u0783\u07ac\u0782\u07b0""}, {""text"": ""\u060c\u0787\u07ac\u0787\u07b0 \u0792\u07a6\u0789\u07a7\u0782\u07ac\u0787\u07b0\u078e\u07a6\u0787\u07a8""}], ""model_index"": null, ""config"": {""architectures"": [""Florence2ForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""microsoft/Florence-2-base-ft--configuration_florence2.Florence2Config"", ""AutoModelForCausalLM"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration""}, ""model_type"": ""florence2"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 270957657}, ""total"": 270957657}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-07 13:30:48+00:00"", ""cardData"": ""base_model:\n- microsoft/Florence-2-base\ndatasets:\n- alakxender/dhivehi-image-text\nlanguage:\n- dv\nlibrary_name: transformers"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Florence-2-base-ft--modeling_florence2.Florence2ForConditionalGeneration"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6798ccd602ff123f682bf8d7"", ""modelId"": ""alakxender/florence-base-dv01-01-dv-e10"", ""usedStorage"": 1083916964}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=alakxender/florence-base-dv01-01-dv-e10&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Balakxender%2Fflorence-base-dv01-01-dv-e10%5D(%2Falakxender%2Fflorence-base-dv01-01-dv-e10)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Ghibli-Diffusion_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nitrosocke/Ghibli-Diffusion,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: creativeml-openrail-m
|
| 6 |
+
thumbnail: ""https://huggingface.co/nitrosocke/Ghibli-Diffusion/resolve/main/images/ghibli-diffusion-thumbnail.jpg""
|
| 7 |
+
tags:
|
| 8 |
+
- stable-diffusion
|
| 9 |
+
- text-to-image
|
| 10 |
+
- image-to-image
|
| 11 |
+
- diffusers
|
| 12 |
+
|
| 13 |
+
---
|
| 14 |
+
### Ghibli Diffusion
|
| 15 |
+
|
| 16 |
+
This is the fine-tuned Stable Diffusion model trained on images from modern anime feature films from Studio Ghibli.
|
| 17 |
+
Use the tokens **_ghibli style_** in your prompts for the effect.
|
| 18 |
+
|
| 19 |
+
**If you enjoy my work and want to test new models before release, please consider supporting me**
|
| 20 |
+
[](https://patreon.com/user?u=79196446)
|
| 21 |
+
|
| 22 |
+
**Characters rendered with the model:**
|
| 23 |
+

|
| 24 |
+
**Cars and Animals rendered with the model:**
|
| 25 |
+

|
| 26 |
+
**Landscapes rendered with the model:**
|
| 27 |
+

|
| 28 |
+
_ghibli style beautiful Caribbean beach tropical (sunset) - Negative prompt: soft blurry_
|
| 29 |
+

|
| 30 |
+
_ghibli style ice field white mountains ((northern lights)) starry sky low horizon - Negative prompt: soft blurry_
|
| 31 |
+
|
| 32 |
+
#### Prompt and settings for the Strom Trooper:
|
| 33 |
+
**ghibli style (storm trooper) Negative prompt: (bad anatomy)**
|
| 34 |
+
_Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3450349066, Size: 512x704_
|
| 35 |
+
|
| 36 |
+
#### Prompt and settings for the VW Beetle:
|
| 37 |
+
**ghibli style VW beetle Negative prompt: soft blurry**
|
| 38 |
+
_Steps: 30, Sampler: Euler a, CFG scale: 7, Seed: 1529856912, Size: 704x512_
|
| 39 |
+
|
| 40 |
+
This model was trained using the diffusers based dreambooth training by ShivamShrirao using prior-preservation loss and the _train-text-encoder_ flag in 15.000 steps.
|
| 41 |
+
|
| 42 |
+
<!-- ### Gradio
|
| 43 |
+
|
| 44 |
+
We support a [Gradio](https://github.com/gradio-app/gradio) Web UI run redshift-diffusion:
|
| 45 |
+
[](https://huggingface.co/spaces/nitrosocke/Ghibli-Diffusion-Demo)-->
|
| 46 |
+
|
| 47 |
+
### 🧨 Diffusers
|
| 48 |
+
|
| 49 |
+
This model can be used just like any other Stable Diffusion model. For more information,
|
| 50 |
+
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
|
| 51 |
+
|
| 52 |
+
You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
|
| 53 |
+
|
| 54 |
+
```python
|
| 55 |
+
from diffusers import StableDiffusionPipeline
|
| 56 |
+
import torch
|
| 57 |
+
|
| 58 |
+
model_id = ""nitrosocke/Ghibli-Diffusion""
|
| 59 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
| 60 |
+
pipe = pipe.to(""cuda"")
|
| 61 |
+
|
| 62 |
+
prompt = ""ghibli style magical princess with golden hair""
|
| 63 |
+
image = pipe(prompt).images[0]
|
| 64 |
+
|
| 65 |
+
image.save(""./magical_princess.png"")
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
## License
|
| 69 |
+
|
| 70 |
+
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
|
| 71 |
+
The CreativeML OpenRAIL License specifies:
|
| 72 |
+
|
| 73 |
+
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
|
| 74 |
+
2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
|
| 75 |
+
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
|
| 76 |
+
[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)","{""id"": ""nitrosocke/Ghibli-Diffusion"", ""author"": ""nitrosocke"", ""sha"": ""7600fd7538c9030fb60fa7bca17bf048c86466b3"", ""last_modified"": ""2023-08-03 19:46:59+00:00"", ""created_at"": ""2022-11-18 15:50:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 122591, ""downloads_all_time"": null, ""likes"": 760, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""stable-diffusion"", ""text-to-image"", ""image-to-image"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- image-to-image\n- diffusers\nthumbnail: https://huggingface.co/nitrosocke/Ghibli-Diffusion/resolve/main/images/ghibli-diffusion-thumbnail.jpg"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ghibli-diffusion-v1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-samples-01s.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-samples-02s.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-samples-03s.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-samples-04s.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-thumbnail.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/test.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""radames/Real-Time-Latent-Consistency-Model"", ""Yntec/ToyWorld"", ""radames/Real-Time-Latent-Consistency-Model-Text-To-Image"", ""r3gm/DiffuseCraft"", ""John6666/DiffuseCraftMod"", ""Yntec/PrintingPress"", ""radames/Real-Time-SD-Turbo"", ""radames/real-time-pix2pix-turbo"", ""John6666/votepurchase-multiple-model"", ""latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""fffiloni/ControlVideo"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""allknowingroger/Image-Models-Test59"", ""Menyu/DiffuseCraftMod"", ""daniissac/ghibli"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""IgorSense/Diffusion_Space2"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""bobber/DiffuseCraft"", ""chrisjayden/gh-diffusion"", ""akhaliq/Ghibli-Diffusion"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""Lwight/Ghibli-Diffusion"", ""Harshveer/Finetuned_Diffusion_Max"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/AnimeArtmodels2"", ""animeartstudio/AnimeModels"", ""Yabo/ControlVideo"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PeepDaSlan9/B2BMGMT_nitrosocke-Ghibli-Diffusion"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""multimodalart/Real-Time-Latent-SDXL-Lightning"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""VirtuRoa/ghibli"", ""catontheturntable/Ghibli-Diffusion"", ""herberthe/nitrosocke-Ghibli-Diffusion"", ""RedYan/nitrosocke-Ghibli-Diffusion"", ""bradarrML/Diffusion_Space"", ""koustubhavachat/Ghibli-Diffusion"", ""ygtrfed/pp-web-ui"", ""Xinxiang0820/nitrosocke-Ghibli-Diffusion"", ""ivanmeyer/Finetuned_Diffusion_Max"", ""Mileena/Diffusion_Space2-Styles"", ""ennov8ion/Landscapes-models"", ""Shad0ws/ImageModelTestEnvironment"", ""GorroRojo/nitrosocke-Ghibli-Diffusion"", ""sohoso/anime348756"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""afasdfas/cringe_model"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""Festrcze/Real-Time-SD-Turbooooooo"", ""vih-v/x_mod"", ""John6666/testvp""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-08-03 19:46:59+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- image-to-image\n- diffusers\nthumbnail: https://huggingface.co/nitrosocke/Ghibli-Diffusion/resolve/main/images/ghibli-diffusion-thumbnail.jpg"", ""transformersInfo"": null, ""_id"": ""6377a9dbf5fe4a39f7839d40"", ""modelId"": ""nitrosocke/Ghibli-Diffusion"", ""usedStorage"": 20376154998}",0,https://huggingface.co/elanoqi/wb-ghibli-800,1,,0,https://huggingface.co/RanaLLC/Ghibli-Diffusion-onnx-fp16,1,https://huggingface.co/Yntec/Ghibli,1,"CompVis/stable-diffusion-license, Daniela-C/6x_Image_diffusion, John6666/DiffuseCraftMod, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/votepurchase-multiple-model, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=nitrosocke/Ghibli-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnitrosocke%2FGhibli-Diffusion%5D(%2Fnitrosocke%2FGhibli-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, phenixrhyder/NSFW-ToyWorld, r3gm/DiffuseCraft",14
|
| 77 |
+
elanoqi/wb-ghibli-800,"
|
| 78 |
+
---
|
| 79 |
+
license: creativeml-openrail-m
|
| 80 |
+
base_model: nitrosocke/Ghibli-Diffusion
|
| 81 |
+
instance_prompt: a photo of jksj waist bag
|
| 82 |
+
tags:
|
| 83 |
+
- stable-diffusion
|
| 84 |
+
- stable-diffusion-diffusers
|
| 85 |
+
- text-to-image
|
| 86 |
+
- diffusers
|
| 87 |
+
- dreambooth
|
| 88 |
+
inference: true
|
| 89 |
+
---
|
| 90 |
+
|
| 91 |
+
# DreamBooth - elanoqi/wb-ghibli-800
|
| 92 |
+
|
| 93 |
+
This is a dreambooth model derived from nitrosocke/Ghibli-Diffusion. The weights were trained on a photo of jksj waist bag using [DreamBooth](https://dreambooth.github.io/).
|
| 94 |
+
You can find some example images in the following.
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
DreamBooth for the text encoder was enabled: False.
|
| 99 |
+
","{""id"": ""elanoqi/wb-ghibli-800"", ""author"": ""elanoqi"", ""sha"": ""d3daf31284f473d7cd9bc1fa78bdcd1c91c2fdef"", ""last_modified"": ""2023-10-11 09:48:59+00:00"", ""created_at"": ""2023-10-11 08:21:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""tensorboard"", ""safetensors"", ""stable-diffusion"", ""stable-diffusion-diffusers"", ""text-to-image"", ""dreambooth"", ""base_model:nitrosocke/Ghibli-Diffusion"", ""base_model:finetune:nitrosocke/Ghibli-Diffusion"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: nitrosocke/Ghibli-Diffusion\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- diffusers\n- dreambooth\ninstance_prompt: a photo of jksj waist bag\ninference: true"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/optimizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/random_states_0.pkl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/scheduler.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/1697015792.502247/events.out.tfevents.1697015792.fd068d4ea563.890.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/1697015792.5039868/hparams.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/events.out.tfevents.1697015792.fd068d4ea563.890.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-11 09:48:59+00:00"", ""cardData"": ""base_model: nitrosocke/Ghibli-Diffusion\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- diffusers\n- dreambooth\ninstance_prompt: a photo of jksj waist bag\ninference: true"", ""transformersInfo"": null, ""_id"": ""65265b21d5a9686516d90167"", ""modelId"": ""elanoqi/wb-ghibli-800"", ""usedStorage"": 10644418147}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=elanoqi/wb-ghibli-800&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Belanoqi%2Fwb-ghibli-800%5D(%2Felanoqi%2Fwb-ghibli-800)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Guanaco_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
JosephusCheung/Guanaco,"---
|
| 3 |
+
inference: false
|
| 4 |
+
license: gpl-3.0
|
| 5 |
+
datasets:
|
| 6 |
+
- JosephusCheung/GuanacoDataset
|
| 7 |
+
language:
|
| 8 |
+
- en
|
| 9 |
+
- zh
|
| 10 |
+
- ja
|
| 11 |
+
- de
|
| 12 |
+
pipeline_tag: conversational
|
| 13 |
+
tags:
|
| 14 |
+
- llama
|
| 15 |
+
- guannaco
|
| 16 |
+
- alpaca
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
|
| 21 |
+
**You can run on Colab free T4 GPU now**
|
| 22 |
+
|
| 23 |
+
[](https://colab.research.google.com/drive/1ocSmoy3ba1EkYu7JWT1oCw9vz8qC2cMk#scrollTo=zLORi5OcPcIJ)
|
| 24 |
+
|
| 25 |
+
**It is highly recommended to use fp16 inference for this model, as 8-bit precision may significantly affect performance. If you require a more Consumer Hardware friendly version, please use the specialized quantized, only 5+GB V-Ram required** [JosephusCheung/GuanacoOnConsumerHardware](https://huggingface.co/JosephusCheung/GuanacoOnConsumerHardware).
|
| 26 |
+
|
| 27 |
+
**You are encouraged to use the latest version of transformers from GitHub.**
|
| 28 |
+
|
| 29 |
+
Guanaco is an advanced instruction-following language model built on Meta's LLaMA 7B model. Expanding upon the initial 52K dataset from the Alpaca model, an additional 534K+ entries have been incorporated, covering English, Simplified Chinese, Traditional Chinese (Taiwan), Traditional Chinese (Hong Kong), Japanese, Deutsch, and various linguistic and grammatical tasks. This wealth of data enables Guanaco to perform exceptionally well in multilingual environments.
|
| 30 |
+
|
| 31 |
+
In an effort to foster openness and replicability in research, we have made the Guanaco Dataset publicly accessible and we have released the model weights here. By providing these resources, we aim to inspire more researchers to pursue related research and collectively advance the development of instruction-following language models.
|
| 32 |
+
|
| 33 |
+
[KBlueLeaf](https://huggingface.co/KBlueLeaf)’s invaluable contributions to the conceptual validation, [trained model](https://huggingface.co/KBlueLeaf/guanaco-7B-leh) and [inference development](https://github.com/KohakuBlueleaf/guanaco-lora) of the model would be gratefully acknowledged, without whose efforts the project shall never have come to fruition.
|
| 34 |
+
|
| 35 |
+
When utilizing the Guanaco model, please bear in mind the following points:
|
| 36 |
+
|
| 37 |
+
The Guanaco model has not been filtered for harmful, biased, or explicit content. As a result, outputs that do not adhere to ethical norms may be generated during use. Please exercise caution when using the model in research or practical applications.
|
| 38 |
+
1. ### Improved context and prompt role support:
|
| 39 |
+
|
| 40 |
+
The new format is designed to be similar to ChatGPT, allowing for better integration with the Alpaca format and enhancing the overall user experience.
|
| 41 |
+
|
| 42 |
+
Instruction is utilized as a few-shot context to support diverse inputs and responses, making it easier for the model to understand and provide accurate responses to user queries.
|
| 43 |
+
|
| 44 |
+
The format is as follows:
|
| 45 |
+
|
| 46 |
+
```
|
| 47 |
+
### Instruction:
|
| 48 |
+
User: History User Input
|
| 49 |
+
Assistant: History Assistant Answer
|
| 50 |
+
### Input:
|
| 51 |
+
System: Knowledge
|
| 52 |
+
User: New User Input
|
| 53 |
+
### Response:
|
| 54 |
+
New Assistant Answer
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
This structured format allows for easier tracking of the conversation history and maintaining context throughout a multi-turn dialogue.
|
| 58 |
+
|
| 59 |
+
3. ### Role-playing support:
|
| 60 |
+
|
| 61 |
+
Guanaco now offers advanced role-playing support, similar to Character.AI, in English, Simplified Chinese, Traditional Chinese, Japanese, and Deutsch, making it more versatile for users from different linguistic backgrounds.
|
| 62 |
+
|
| 63 |
+
Users can instruct the model to assume specific roles, historical figures, or fictional characters, as well as personalities based on their input. This allows for more engaging and immersive conversations.
|
| 64 |
+
|
| 65 |
+
The model can use various sources of information to provide knowledge and context for the character's background and behavior, such as encyclopedic entries, first-person narrations, or a list of personality traits.
|
| 66 |
+
|
| 67 |
+
The model will consistently output responses in the format ""Character Name: Reply"" to maintain the chosen role throughout the conversation, enhancing the user's experience.
|
| 68 |
+
|
| 69 |
+
4. ### Rejection of answers and avoidance of erroneous responses:
|
| 70 |
+
|
| 71 |
+
The model has been updated to handle situations where it lacks sufficient knowledge or is unable to provide a valid response more effectively.
|
| 72 |
+
|
| 73 |
+
Reserved keywords have been introduced to indicate different scenarios and provide clearer communication with the user, use in System Prompt:
|
| 74 |
+
|
| 75 |
+
NO IDEA: Indicates that the model lacks the necessary knowledge to provide an accurate answer, and will explain this to the user, encouraging them to seek alternative sources.
|
| 76 |
+
|
| 77 |
+
FORBIDDEN: Indicates that the model refuses to answer due to specific reasons (e.g., legal, ethical, or safety concerns), which will be inferred based on the context of the query.
|
| 78 |
+
|
| 79 |
+
SFW: Indicates that the model refuses to answer a question because it has been filtered for NSFW content, ensuring a safer and more appropriate user experience.
|
| 80 |
+
|
| 81 |
+
6. ### Continuation of responses for ongoing topics:
|
| 82 |
+
|
| 83 |
+
The Guanaco model can now continue answering questions or discussing topics upon the user's request, making it more adaptable and better suited for extended conversations.
|
| 84 |
+
|
| 85 |
+
The contextual structure consisting of System, Assistant, and User roles allows the model to engage in multi-turn dialogues, maintain context-aware conversations, and provide more coherent responses.
|
| 86 |
+
|
| 87 |
+
The model can now accommodate role specification and character settings, providing a more immersive and tailored conversational experience based on the user's preferences.
|
| 88 |
+
|
| 89 |
+
It is important to remember that Guanaco is a 7B-parameter model, and **any knowledge-based content should be considered potentially inaccurate**. We strongly recommend **providing verifiable sources in System Prompt, such as Wikipedia, for knowledge-based answers**. In the absence of sources, it is crucial to inform users of this limitation to prevent the dissemination of false information and to maintain transparency.
|
| 90 |
+
|
| 91 |
+
Due to the differences in the format between this project and [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca), please refer to *Guanaco-lora: LoRA for training Multilingual Instruction-following LM based on LLaMA* (https://github.com/KohakuBlueleaf/guanaco-lora) for further training and inference our models.
|
| 92 |
+
|
| 93 |
+
## Recent News
|
| 94 |
+
|
| 95 |
+
We've noticed a recent entrant in the field, the QLoRa method, which we find concerning due to its attempt to piggyback on the reputation of Guanaco. We strongly disapprove of such practices. QLoRa, as far as we can tell, lacks mathematical robustness and its performance significantly trails behind that of GPTQ and advancements such as PEFT fine-tuning, which have been successful in improving upon it.
|
| 96 |
+
|
| 97 |
+
Guanaco has been diligent, consistently releasing multilingual datasets since March 2023, along with publishing weights that are not only an enhanced version of GPTQ but also support multimodal VQA and have been optimized for 4-bit. Despite the substantial financial investment of tens of thousands of dollars in distilling data from OpenAI's GPT models, we still consider these efforts to be incremental.
|
| 98 |
+
|
| 99 |
+
We, however, aim to move beyond the incremental:
|
| 100 |
+
|
| 101 |
+
1. We strive to no longer rely on distillation data from OpenAI: We've found that relying on GPT-generated data impedes significant breakthroughs. Furthermore, this approach has proven to be disastrous when dealing with the imbalances in multilingual tasks.
|
| 102 |
+
|
| 103 |
+
2. We're focusing on the enhancement of quantization structure and partial native 4-bit fine-tuning: We are deeply appreciative of the GPTQ-Llama project for paving the way in state-of-the-art LLM quantization. Its unique qualities, especially at the 7B size, are facilitating significant progress in multilingual and multimodal tasks.
|
| 104 |
+
|
| 105 |
+
3. We plan to utilize visual data to adjust our language models: We believe this will fundamentally address the issues of language imbalance, translation inaccuracies, and the lack of graphical logic in LLM.
|
| 106 |
+
|
| 107 |
+
While our work is still in the early stages, we're determined to break new ground in these areas. Our critique of QLoRa's practices does not stem from animosity but rather from the fundamental belief that innovation should be rooted in originality, integrity, and substantial progress.
|
| 108 |
+
","{""id"": ""JosephusCheung/Guanaco"", ""author"": ""JosephusCheung"", ""sha"": ""bed6f3bd18f07a4a379525645cbd86d622b12836"", ""last_modified"": ""2023-05-29 12:48:21+00:00"", ""created_at"": ""2023-04-08 03:03:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 273, ""downloads_all_time"": null, ""likes"": 230, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""guannaco"", ""alpaca"", ""conversational"", ""en"", ""zh"", ""ja"", ""de"", ""dataset:JosephusCheung/GuanacoDataset"", ""doi:10.57967/hf/0607"", ""license:gpl-3.0"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- JosephusCheung/GuanacoDataset\nlanguage:\n- en\n- zh\n- ja\n- de\nlicense: gpl-3.0\npipeline_tag: conversational\ntags:\n- llama\n- guannaco\n- alpaca\ninference: false"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='StupidBanner.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""h2oai/h2ogpt-chatbot"", ""h2oai/h2ogpt-chatbot2"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""neubla/neubla-llm-evaluation-board"", ""lapsapking/h2ogpt-chatbot"", ""sddwt/guanaco"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""his0/h2ogpt-chatbot"", ""atimughal662/InfoFusion"", ""akashkj/H2OGPT"", ""ariel0330/h2osiri"", ""elitecode/h2ogpt-chatbot2"", ""ccoreilly/aigua-xat"", ""Sambhavnoobcoder/h2ogpt-chatbot"", ""smothiki/open_llm_leaderboard"", ""iblfe/test"", ""AnonymousSub/Ayurveda_Chatbot"", ""K00B404/Research-chatbot"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""kelvin-t-lu/chatbot"", ""kbmlcoding/open_llm_leaderboard_free"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""cw332/h2ogpt-chatbot"", ""mjalg/IFEvalTR"", ""abugaber/test""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-29 12:48:21+00:00"", ""cardData"": ""datasets:\n- JosephusCheung/GuanacoDataset\nlanguage:\n- en\n- zh\n- ja\n- de\nlicense: gpl-3.0\npipeline_tag: conversational\ntags:\n- llama\n- guannaco\n- alpaca\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6430d972217720a4f60879e8"", ""modelId"": ""JosephusCheung/Guanaco"", ""usedStorage"": 80337664129}",0,,0,,0,,0,,0,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, felixz/open_llm_leaderboard, gsaivinay/open_llm_leaderboard, h2oai/h2ogpt-chatbot, h2oai/h2ogpt-chatbot2, huggingface/InferenceSupport/discussions/new?title=JosephusCheung/Guanaco&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BJosephusCheung%2FGuanaco%5D(%2FJosephusCheung%2FGuanaco)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb, lapsapking/h2ogpt-chatbot",13
|
HunyuanVideo-gguf_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
city96/HunyuanVideo-gguf,"---
|
| 3 |
+
base_model: tencent/HunyuanVideo
|
| 4 |
+
library_name: gguf
|
| 5 |
+
quantized_by: city96
|
| 6 |
+
tags:
|
| 7 |
+
- text-to-video
|
| 8 |
+
license: other
|
| 9 |
+
license_name: tencent-hunyuan-community
|
| 10 |
+
license_link: LICENSE.md
|
| 11 |
+
---
|
| 12 |
+
This is a direct GGUF conversion of [tencent/HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo)
|
| 13 |
+
|
| 14 |
+
**It is intended to be used with the native, built-in ComfyUI HunyuanVideo nodes**
|
| 15 |
+
|
| 16 |
+
As this is a quantized model not a finetune, all the same restrictions/original license terms still apply.
|
| 17 |
+
|
| 18 |
+
The model files can be used with the [ComfyUI-GGUF](https://github.com/city96/ComfyUI-GGUF) custom node.
|
| 19 |
+
|
| 20 |
+
Place model files in `ComfyUI/models/unet` - see the GitHub readme for further install instructions.
|
| 21 |
+
|
| 22 |
+
The VAE can be downloaded from [this repository by Kijai](https://huggingface.co/Kijai/HunyuanVideo_comfy/blob/main/hunyuan_video_vae_bf16.safetensors)
|
| 23 |
+
|
| 24 |
+
Please refer to [this chart](https://github.com/ggerganov/llama.cpp/blob/master/examples/perplexity/README.md#llama-3-8b-scoreboard) for a basic overview of quantization types.
|
| 25 |
+
","{""id"": ""city96/HunyuanVideo-gguf"", ""author"": ""city96"", ""sha"": ""6869e07d25cffdff09f7c10a79887ac24217b67b"", ""last_modified"": ""2024-12-17 06:33:49+00:00"", ""created_at"": ""2024-12-17 06:03:06+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6400, ""downloads_all_time"": null, ""likes"": 162, ""library_name"": ""gguf"", ""gguf"": {""total"": 12821012544, ""architecture"": ""hyvid""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""gguf"", ""text-to-video"", ""base_model:tencent/HunyuanVideo"", ""base_model:quantized:tencent/HunyuanVideo"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: tencent/HunyuanVideo\nlibrary_name: gguf\nlicense: other\nlicense_name: tencent-hunyuan-community\nlicense_link: LICENSE.md\ntags:\n- text-to-video\nquantized_by: city96"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-BF16.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q3_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q3_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q4_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q4_1.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q4_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q4_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q5_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q5_1.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q5_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q5_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q6_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p-Q8_0.gguf', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Sergidev/Illustration-Text-To-Video"", ""Sergidev/Huanyan-Studio""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-17 06:33:49+00:00"", ""cardData"": ""base_model: tencent/HunyuanVideo\nlibrary_name: gguf\nlicense: other\nlicense_name: tencent-hunyuan-community\nlicense_link: LICENSE.md\ntags:\n- text-to-video\nquantized_by: city96"", ""transformersInfo"": null, ""_id"": ""6761141ab8b1b60a0e57a3e0"", ""modelId"": ""city96/HunyuanVideo-gguf"", ""usedStorage"": 132913917696}",0,,0,,0,,0,,0,"Sergidev/Huanyan-Studio, Sergidev/Illustration-Text-To-Video, huggingface/InferenceSupport/discussions/new?title=city96/HunyuanVideo-gguf&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcity96%2FHunyuanVideo-gguf%5D(%2Fcity96%2FHunyuanVideo-gguf)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",3
|
LaBSE_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Llama-2-7B-Chat-GGML_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
TheBloke/Llama-2-7B-Chat-GGML,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: other
|
| 6 |
+
tags:
|
| 7 |
+
- facebook
|
| 8 |
+
- meta
|
| 9 |
+
- pytorch
|
| 10 |
+
- llama
|
| 11 |
+
- llama-2
|
| 12 |
+
model_name: Llama 2 7B Chat
|
| 13 |
+
arxiv: 2307.09288
|
| 14 |
+
inference: false
|
| 15 |
+
model_creator: Meta Llama 2
|
| 16 |
+
model_link: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
|
| 17 |
+
model_type: llama
|
| 18 |
+
pipeline_tag: text-generation
|
| 19 |
+
quantized_by: TheBloke
|
| 20 |
+
base_model: meta-llama/Llama-2-7b-chat-hf
|
| 21 |
+
---
|
| 22 |
+
|
| 23 |
+
<!-- header start -->
|
| 24 |
+
<!-- 200823 -->
|
| 25 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 26 |
+
<img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 27 |
+
</div>
|
| 28 |
+
<div style=""display: flex; justify-content: space-between; width: 100%;"">
|
| 29 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-start;"">
|
| 30 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
|
| 31 |
+
</div>
|
| 32 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-end;"">
|
| 33 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
|
| 34 |
+
</div>
|
| 35 |
+
</div>
|
| 36 |
+
<div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
|
| 37 |
+
<hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
|
| 38 |
+
<!-- header end -->
|
| 39 |
+
|
| 40 |
+
# Llama 2 7B Chat - GGML
|
| 41 |
+
- Model creator: [Meta Llama 2](https://huggingface.co/meta-llama)
|
| 42 |
+
- Original model: [Llama 2 7B Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
|
| 43 |
+
|
| 44 |
+
## Description
|
| 45 |
+
|
| 46 |
+
This repo contains GGML format model files for [Meta Llama 2's Llama 2 7B Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf).
|
| 47 |
+
|
| 48 |
+
### Important note regarding GGML files.
|
| 49 |
+
|
| 50 |
+
The GGML format has now been superseded by GGUF. As of August 21st 2023, [llama.cpp](https://github.com/ggerganov/llama.cpp) no longer supports GGML models. Third party clients and libraries are expected to still support it for a time, but many may also drop support.
|
| 51 |
+
|
| 52 |
+
Please use the GGUF models instead.
|
| 53 |
+
### About GGML
|
| 54 |
+
|
| 55 |
+
GGML files are for CPU + GPU inference using [llama.cpp](https://github.com/ggerganov/llama.cpp) and libraries and UIs which support this format, such as:
|
| 56 |
+
* [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most popular web UI. Supports NVidia CUDA GPU acceleration.
|
| 57 |
+
* [KoboldCpp](https://github.com/LostRuins/koboldcpp), a powerful GGML web UI with GPU acceleration on all platforms (CUDA and OpenCL). Especially good for story telling.
|
| 58 |
+
* [LM Studio](https://lmstudio.ai/), a fully featured local GUI with GPU acceleration on both Windows (NVidia and AMD), and macOS.
|
| 59 |
+
* [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with CUDA GPU acceleration via the c_transformers backend.
|
| 60 |
+
* [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server.
|
| 61 |
+
* [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.
|
| 62 |
+
|
| 63 |
+
## Repositories available
|
| 64 |
+
|
| 65 |
+
* [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ)
|
| 66 |
+
* [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF)
|
| 67 |
+
* [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference (deprecated)](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGML)
|
| 68 |
+
* [Meta Llama 2's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
|
| 69 |
+
|
| 70 |
+
## Prompt template: Llama-2-Chat
|
| 71 |
+
|
| 72 |
+
```
|
| 73 |
+
[INST] <<SYS>>
|
| 74 |
+
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
| 75 |
+
<</SYS>>
|
| 76 |
+
{prompt}[/INST]
|
| 77 |
+
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
<!-- compatibility_ggml start -->
|
| 81 |
+
## Compatibility
|
| 82 |
+
|
| 83 |
+
These quantised GGML files are compatible with llama.cpp between June 6th (commit `2d43387`) and August 21st 2023.
|
| 84 |
+
|
| 85 |
+
For support with latest llama.cpp, please use GGUF files instead.
|
| 86 |
+
|
| 87 |
+
The final llama.cpp commit with support for GGML was: [dadbed99e65252d79f81101a392d0d6497b86caa](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa)
|
| 88 |
+
|
| 89 |
+
As of August 23rd 2023 they are still compatible with all UIs, libraries and utilities which use GGML. This may change in the future.
|
| 90 |
+
|
| 91 |
+
## Explanation of the new k-quant methods
|
| 92 |
+
<details>
|
| 93 |
+
<summary>Click to see details</summary>
|
| 94 |
+
|
| 95 |
+
The new methods available are:
|
| 96 |
+
* GGML_TYPE_Q2_K - ""type-1"" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)
|
| 97 |
+
* GGML_TYPE_Q3_K - ""type-0"" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw.
|
| 98 |
+
* GGML_TYPE_Q4_K - ""type-1"" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.
|
| 99 |
+
* GGML_TYPE_Q5_K - ""type-1"" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw
|
| 100 |
+
* GGML_TYPE_Q6_K - ""type-0"" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw
|
| 101 |
+
* GGML_TYPE_Q8_K - ""type-0"" 8-bit quantization. Only used for quantizing intermediate results. The difference to the existing Q8_0 is that the block size is 256. All 2-6 bit dot products are implemented for this quantization type.
|
| 102 |
+
|
| 103 |
+
Refer to the Provided Files table below to see what files use which methods, and how.
|
| 104 |
+
</details>
|
| 105 |
+
<!-- compatibility_ggml end -->
|
| 106 |
+
|
| 107 |
+
## Provided files
|
| 108 |
+
|
| 109 |
+
| Name | Quant method | Bits | Size | Max RAM required | Use case |
|
| 110 |
+
| ---- | ---- | ---- | ---- | ---- | ----- |
|
| 111 |
+
| llama-2-7b-chat.ggmlv3.q2_K.bin | q2_K | 2 | 2.87 GB| 5.37 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.vw and feed_forward.w2 tensors, GGML_TYPE_Q2_K for the other tensors. |
|
| 112 |
+
| llama-2-7b-chat.ggmlv3.q3_K_S.bin | q3_K_S | 3 | 2.95 GB| 5.45 GB | New k-quant method. Uses GGML_TYPE_Q3_K for all tensors |
|
| 113 |
+
| llama-2-7b-chat.ggmlv3.q3_K_M.bin | q3_K_M | 3 | 3.28 GB| 5.78 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K |
|
| 114 |
+
| llama-2-7b-chat.ggmlv3.q3_K_L.bin | q3_K_L | 3 | 3.60 GB| 6.10 GB | New k-quant method. Uses GGML_TYPE_Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K |
|
| 115 |
+
| llama-2-7b-chat.ggmlv3.q4_0.bin | q4_0 | 4 | 3.79 GB| 6.29 GB | Original quant method, 4-bit. |
|
| 116 |
+
| llama-2-7b-chat.ggmlv3.q4_K_S.bin | q4_K_S | 4 | 3.83 GB| 6.33 GB | New k-quant method. Uses GGML_TYPE_Q4_K for all tensors |
|
| 117 |
+
| llama-2-7b-chat.ggmlv3.q4_K_M.bin | q4_K_M | 4 | 4.08 GB| 6.58 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q4_K |
|
| 118 |
+
| llama-2-7b-chat.ggmlv3.q4_1.bin | q4_1 | 4 | 4.21 GB| 6.71 GB | Original quant method, 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models. |
|
| 119 |
+
| llama-2-7b-chat.ggmlv3.q5_0.bin | q5_0 | 5 | 4.63 GB| 7.13 GB | Original quant method, 5-bit. Higher accuracy, higher resource usage and slower inference. |
|
| 120 |
+
| llama-2-7b-chat.ggmlv3.q5_K_S.bin | q5_K_S | 5 | 4.65 GB| 7.15 GB | New k-quant method. Uses GGML_TYPE_Q5_K for all tensors |
|
| 121 |
+
| llama-2-7b-chat.ggmlv3.q5_K_M.bin | q5_K_M | 5 | 4.78 GB| 7.28 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q5_K |
|
| 122 |
+
| llama-2-7b-chat.ggmlv3.q5_1.bin | q5_1 | 5 | 5.06 GB| 7.56 GB | Original quant method, 5-bit. Even higher accuracy, resource usage and slower inference. |
|
| 123 |
+
| llama-2-7b-chat.ggmlv3.q6_K.bin | q6_K | 6 | 5.53 GB| 8.03 GB | New k-quant method. Uses GGML_TYPE_Q8_K for all tensors - 6-bit quantization |
|
| 124 |
+
| llama-2-7b-chat.ggmlv3.q8_0.bin | q8_0 | 8 | 7.16 GB| 9.66 GB | Original quant method, 8-bit. Almost indistinguishable from float16. High resource use and slow. Not recommended for most users. |
|
| 125 |
+
|
| 126 |
+
**Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead.
|
| 127 |
+
|
| 128 |
+
## How to run in `llama.cpp`
|
| 129 |
+
|
| 130 |
+
Make sure you are using `llama.cpp` from commit [dadbed99e65252d79f81101a392d0d6497b86caa](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa) or earlier.
|
| 131 |
+
|
| 132 |
+
For compatibility with latest llama.cpp, please use GGUF files instead.
|
| 133 |
+
|
| 134 |
+
```
|
| 135 |
+
./main -t 10 -ngl 32 -m llama-2-7b-chat.ggmlv3.q4_K_M.bin --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p ""[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\nWrite a story about llamas[/INST]""
|
| 136 |
+
```
|
| 137 |
+
Change `-t 10` to the number of physical CPU cores you have. For example if your system has 8 cores/16 threads, use `-t 8`.
|
| 138 |
+
|
| 139 |
+
Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.
|
| 140 |
+
|
| 141 |
+
Change `-c 2048` to the desired sequence length for this model. For example, `-c 4096` for a Llama 2 model. For models that use RoPE, add `--rope-freq-base 10000 --rope-freq-scale 0.5` for doubled context, or `--rope-freq-base 10000 --rope-freq-scale 0.25` for 4x context.
|
| 142 |
+
|
| 143 |
+
If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
|
| 144 |
+
|
| 145 |
+
For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
|
| 146 |
+
|
| 147 |
+
## How to run in `text-generation-webui`
|
| 148 |
+
|
| 149 |
+
Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md).
|
| 150 |
+
|
| 151 |
+
<!-- footer start -->
|
| 152 |
+
<!-- 200823 -->
|
| 153 |
+
## Discord
|
| 154 |
+
|
| 155 |
+
For further support, and discussions on these models and AI in general, join us at:
|
| 156 |
+
|
| 157 |
+
[TheBloke AI's Discord server](https://discord.gg/theblokeai)
|
| 158 |
+
|
| 159 |
+
## Thanks, and how to contribute.
|
| 160 |
+
|
| 161 |
+
Thanks to the [chirper.ai](https://chirper.ai) team!
|
| 162 |
+
|
| 163 |
+
I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
|
| 164 |
+
|
| 165 |
+
If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
|
| 166 |
+
|
| 167 |
+
Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
|
| 168 |
+
|
| 169 |
+
* Patreon: https://patreon.com/TheBlokeAI
|
| 170 |
+
* Ko-Fi: https://ko-fi.com/TheBlokeAI
|
| 171 |
+
|
| 172 |
+
**Special thanks to**: Aemon Algiz.
|
| 173 |
+
|
| 174 |
+
**Patreon special mentions**: Russ Johnson, J, alfie_i, Alex, NimbleBox.ai, Chadd, Mandus, Nikolai Manek, Ken Nordquist, ya boyyy, Illia Dulskyi, Viktor Bowallius, vamX, Iucharbius, zynix, Magnesian, Clay Pascal, Pierre Kircher, Enrico Ros, Tony Hughes, Elle, Andrey, knownsqashed, Deep Realms, Jerry Meng, Lone Striker, Derek Yates, Pyrater, Mesiah Bishop, James Bentley, Femi Adebogun, Brandon Frisco, SuperWojo, Alps Aficionado, Michael Dempsey, Vitor Caleffi, Will Dee, Edmond Seymore, usrbinkat, LangChain4j, Kacper Wikieł, Luke Pendergrass, John Detwiler, theTransient, Nathan LeClaire, Tiffany J. Kim, biorpg, Eugene Pentland, Stanislav Ovsiannikov, Fred von Graf, terasurfer, Kalila, Dan Guido, Nitin Borwankar, 阿明, Ai Maven, John Villwock, Gabriel Puliatti, Stephen Murray, Asp the Wyvern, danny, Chris Smitley, ReadyPlayerEmma, S_X, Daniel P. Andersen, Olakabola, Jeffrey Morgan, Imad Khwaja, Caitlyn Gatomon, webtim, Alicia Loh, Trenton Dambrowitz, Swaroop Kallakuri, Erik Bjäreholt, Leonard Tan, Spiking Neurons AB, Luke @flexchar, Ajan Kanaga, Thomas Belote, Deo Leter, RoA, Willem Michiel, transmissions 11, subjectnull, Matthew Berman, Joseph William Delisle, David Ziegler, Michael Davis, Johann-Peter Hartmann, Talal Aujan, senxiiz, Artur Olbinski, Rainer Wilmers, Spencer Kim, Fen Risland, Cap'n Zoog, Rishabh Srivastava, Michael Levine, Geoffrey Montalvo, Sean Connelly, Alexandros Triantafyllidis, Pieter, Gabriel Tamborski, Sam, Subspace Studios, Junyu Yang, Pedro Madruga, Vadim, Cory Kujawski, K, Raven Klaugh, Randy H, Mano Prime, Sebastain Graf, Space Cruiser
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
Thank you to all my generous patrons and donaters!
|
| 178 |
+
|
| 179 |
+
And thank you again to a16z for their generous grant.
|
| 180 |
+
|
| 181 |
+
<!-- footer end -->
|
| 182 |
+
|
| 183 |
+
# Original model card: Meta Llama 2's Llama 2 7B Chat
|
| 184 |
+
|
| 185 |
+
# **Llama 2**
|
| 186 |
+
Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
|
| 187 |
+
|
| 188 |
+
## Model Details
|
| 189 |
+
*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.*
|
| 190 |
+
|
| 191 |
+
Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.
|
| 192 |
+
|
| 193 |
+
**Model Developers** Meta
|
| 194 |
+
|
| 195 |
+
**Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.
|
| 196 |
+
|
| 197 |
+
**Input** Models input text only.
|
| 198 |
+
|
| 199 |
+
**Output** Models generate text only.
|
| 200 |
+
|
| 201 |
+
**Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
||Training Data|Params|Content Length|GQA|Tokens|LR|
|
| 205 |
+
|---|---|---|---|---|---|---|
|
| 206 |
+
|Llama 2|*A new mix of publicly available online data*|7B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
|
| 207 |
+
|Llama 2|*A new mix of publicly available online data*|13B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
|
| 208 |
+
|Llama 2|*A new mix of publicly available online data*|70B|4k|✔|2.0T|1.5 x 10<sup>-4</sup>|
|
| 209 |
+
|
| 210 |
+
*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.
|
| 211 |
+
|
| 212 |
+
**Model Dates** Llama 2 was trained between January 2023 and July 2023.
|
| 213 |
+
|
| 214 |
+
**Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
|
| 215 |
+
|
| 216 |
+
**License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
|
| 217 |
+
|
| 218 |
+
**Research Paper** [""Llama-2: Open Foundation and Fine-tuned Chat Models""](arxiv.org/abs/2307.09288)
|
| 219 |
+
|
| 220 |
+
## Intended Use
|
| 221 |
+
**Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
|
| 222 |
+
|
| 223 |
+
To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212).
|
| 224 |
+
|
| 225 |
+
**Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.
|
| 226 |
+
|
| 227 |
+
## Hardware and Software
|
| 228 |
+
**Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
|
| 229 |
+
|
| 230 |
+
**Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.
|
| 231 |
+
|
| 232 |
+
||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)|
|
| 233 |
+
|---|---|---|---|
|
| 234 |
+
|Llama 2 7B|184320|400|31.22|
|
| 235 |
+
|Llama 2 13B|368640|400|62.44|
|
| 236 |
+
|Llama 2 70B|1720320|400|291.42|
|
| 237 |
+
|Total|3311616||539.00|
|
| 238 |
+
|
| 239 |
+
**CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
|
| 240 |
+
|
| 241 |
+
## Training Data
|
| 242 |
+
**Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
|
| 243 |
+
|
| 244 |
+
**Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.
|
| 245 |
+
|
| 246 |
+
## Evaluation Results
|
| 247 |
+
|
| 248 |
+
In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.
|
| 249 |
+
|
| 250 |
+
|Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval|
|
| 251 |
+
|---|---|---|---|---|---|---|---|---|---|
|
| 252 |
+
|Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9|
|
| 253 |
+
|Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9|
|
| 254 |
+
|Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7|
|
| 255 |
+
|Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6|
|
| 256 |
+
|Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3|
|
| 257 |
+
|Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1|
|
| 258 |
+
|Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**|
|
| 259 |
+
|
| 260 |
+
**Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.
|
| 261 |
+
|
| 262 |
+
|||TruthfulQA|Toxigen|
|
| 263 |
+
|---|---|---|---|
|
| 264 |
+
|Llama 1|7B|27.42|23.00|
|
| 265 |
+
|Llama 1|13B|41.74|23.08|
|
| 266 |
+
|Llama 1|33B|44.19|22.57|
|
| 267 |
+
|Llama 1|65B|48.71|21.77|
|
| 268 |
+
|Llama 2|7B|33.29|**21.25**|
|
| 269 |
+
|Llama 2|13B|41.86|26.10|
|
| 270 |
+
|Llama 2|70B|**50.18**|24.60|
|
| 271 |
+
|
| 272 |
+
**Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
|||TruthfulQA|Toxigen|
|
| 276 |
+
|---|---|---|---|
|
| 277 |
+
|Llama-2-Chat|7B|57.04|**0.00**|
|
| 278 |
+
|Llama-2-Chat|13B|62.18|**0.00**|
|
| 279 |
+
|Llama-2-Chat|70B|**64.14**|0.01|
|
| 280 |
+
|
| 281 |
+
**Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above.
|
| 282 |
+
|
| 283 |
+
## Ethical Considerations and Limitations
|
| 284 |
+
Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.
|
| 285 |
+
|
| 286 |
+
Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide)
|
| 287 |
+
|
| 288 |
+
## Reporting Issues
|
| 289 |
+
Please report any software “bug,” or other problems with the models through one of the following means:
|
| 290 |
+
- Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
|
| 291 |
+
- Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
|
| 292 |
+
- Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
|
| 293 |
+
|
| 294 |
+
## Llama Model Index
|
| 295 |
+
|Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf|
|
| 296 |
+
|---|---|---|---|---|
|
| 297 |
+
|7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)|
|
| 298 |
+
|13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)|
|
| 299 |
+
|70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
|
| 300 |
+
","{""id"": ""TheBloke/Llama-2-7B-Chat-GGML"", ""author"": ""TheBloke"", ""sha"": ""76cd63c351ae389e1d4b91cab2cf470aab11864b"", ""last_modified"": ""2023-09-27 13:00:17+00:00"", ""created_at"": ""2023-07-18 17:38:15+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1926, ""downloads_all_time"": null, ""likes"": 871, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""facebook"", ""meta"", ""pytorch"", ""llama-2"", ""text-generation"", ""en"", ""arxiv:2307.09288"", ""base_model:meta-llama/Llama-2-7b-chat-hf"", ""base_model:finetune:meta-llama/Llama-2-7b-chat-hf"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: meta-llama/Llama-2-7b-chat-hf\nlanguage:\n- en\nlicense: other\nmodel_name: Llama 2 7B Chat\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\narxiv: 2307.09288\ninference: false\nmodel_creator: Meta Llama 2\nmodel_link: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf\nmodel_type: llama\nquantized_by: TheBloke"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""model_type"": ""llama""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='USE_POLICY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q2_K.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q3_K_L.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q3_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q3_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q4_0.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q4_1.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q4_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q4_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q5_0.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q5_1.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q5_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q5_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q6_K.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b-chat.ggmlv3.q8_0.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""mikeee/llama2-7b-chat-uncensored-ggml"", ""memef4rmer/llama2-7b-chat-uncensored-ggml"", ""harsh-manvar/llama-2-7b-chat-test"", ""Nymbo/llama2-7b-chat-uncensored-ggml"", ""r3gm/ConversaDocs"", ""YaTharThShaRma999/WizardLM7b"", ""mikeee/nousresearch-nous-hermes-llama2-13b-ggml"", ""mikeee/llama2-7b-chat-ggml"", ""ThisIs-Developer/Llama-2-GGML-Medical-Chatbot"", ""PSMdata/langchain-llama2-7b-chat"", ""ankanpy/LlamaGPT"", ""izammohammed/legal-advisor"", ""DHEIVER/VestibulaIA"", ""mikeee/langchain-llama2-7b-chat-uncensored-ggml"", ""haywired/medibot-llama2"", ""srikanth-nm/ai_seeker"", ""ThisIs-Developer/Llama-2-GGML-CSV-Chatbot"", ""K00B404/langchain-llama2-7b-chat-uncensored-ggml"", ""atharvapawar/Email-Generator-App-Langchain-LLAMA2-LLM"", ""V15h/learnai2.0"", ""saitejad/llama-2-gen-with-speech"", ""datastx/EmailGenerator"", ""mrm8488/llama-2-7b-chat-cpp"", ""captain-awesome/docuverse"", ""jergra43/llama2-7b-ggml-chat-app"", ""adityaagrawal/rag-assignment"", ""GoodML/MediBotAI"", ""4darsh-Dev/medicure"", ""4darsh-Dev/orchard_eyes-chatbot"", ""DhruvSarin/BlogGenerator"", ""BojanSimoski/SocialMovezVeggieAssistant"", ""umamicode/llama2-test"", ""maknee/kani-llama-v2-ggml"", ""gary109/llama2-webui"", ""zilongpa/llama2-webui"", ""lavanjv/vec-digichat"", ""TogetherAI/llahrou"", ""DripBeanBag/llama2_chatbot"", ""ndn1954/pdfchatbot"", ""AinzOoalGowns/llama2-7b-chat-uncensored-test"", ""Jafta/llama2-7b-chat-ggml"", ""LuckRafly/LLM-Generate-Math_Quiz"", ""Bankrid/huggingface-app"", ""yangzzay/HydroxApp_t2t"", ""myy97/llama2-webui"", ""Amirizaniani/Auditing_LLM"", ""ndn1954/chatwithpdf"", ""thivav/llama2-blogger"", ""Awe03/ai"", ""jingwora/llama2-7b-chat-ggml"", ""xsa-dev/llama2-7b-llama_cpp-ggmlv3-q4_1"", ""xsa-dev/llama-2-7b-chat-ggmlv3-q6_K"", ""yuping322/LLaMA-2-CHAT"", ""Jayavathsan/Email_Generator"", ""sofarikasid/LLM_Search_Engine"", ""Sakil/CSVQConnect"", ""adas100/blogs"", ""adas100/blog"", ""manjunathkukanur/mypdfchatbot"", ""ndn1954/llmdocumentchatbot"", ""dnzengou/llama-gpt-chatbot"", ""amol-rainfall/amol-rainfallStratosphere"", ""Pyasma/Querybot"", ""amol-rainfall/Stratosphere"", ""1littlecoder/llama-cpp-python-cuda-gradio"", ""goavinash5/Gradio_LLAMA_Testing"", ""lyimo/llama_multimodel_model"", ""ToonTownTommy/Tommylaw"", ""quangtn266/EmailGeneratorUsingLLAMA2"", ""huy302/SPGCI_Learnathon"", ""JohnTan38/llama-2-7b-chat-1"", ""sheetalbborkar/ArticleGenerator"", ""rajeshasb/llmsasb"", ""Dalleon/llama2-7b-chat-uncensored-ggml"", ""samim2024/EMAIL-Generator-META-AI"", ""brunodoti/turing-20.0"", ""Nikhil0987/med_bot"", ""adityakumar/nhpc-chatbot"", ""csalabs/AI-EMBD"", ""DeyPoulomi/HR_resume_screening"", ""Jacksonnavigator7/Llamacpp"", ""harichselvamc/Miskaacomics"", ""aiscientist/llamachat"", ""maheshwarligade/email_generator_llama2"", ""csalabs/Replicate-7b-chat-Llama-streamlit"", ""Preet2002/blog-generation"", ""uyen13/chatbot"", ""yashas-vi/JobCV_Writer"", ""md-vasim/llama-2-hf"", ""hellojj7/email_app"", ""robertquest/llama-2-7b-chat-test"", ""shubhamtw/qaBot"", ""kartikeyarana/ESCO"", ""nikesh66/mediweb1.0"", ""uyen13/chatgirl"", ""sanket09/llama-2-7b-chat"", ""Antonio49/llama-2-7b-chat"", ""rahul-bhoyar-1995/Email-Generator-using-LLM"", ""Amirizaniani/AuditLLM"", ""uyen13/chatgirl2""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-27 13:00:17+00:00"", ""cardData"": ""base_model: meta-llama/Llama-2-7b-chat-hf\nlanguage:\n- en\nlicense: other\nmodel_name: Llama 2 7B Chat\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\narxiv: 2307.09288\ninference: false\nmodel_creator: Meta Llama 2\nmodel_link: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf\nmodel_type: llama\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""64b6ce072a8e3cd59df98e98"", ""modelId"": ""TheBloke/Llama-2-7B-Chat-GGML"", ""usedStorage"": 60421177985}",0,"https://huggingface.co/ThisIs-Developer/Llama-2-GGML-Medical-Chatbot, https://huggingface.co/nik-55/youtube-question-answer",2,,0,,0,,0,"DhruvSarin/BlogGenerator, GoodML/MediBotAI, Nymbo/llama2-7b-chat-uncensored-ggml, PSMdata/langchain-llama2-7b-chat, ThisIs-Developer/Llama-2-GGML-Medical-Chatbot, YaTharThShaRma999/WizardLM7b, harsh-manvar/llama-2-7b-chat-test, huggingface/InferenceSupport/discussions/new?title=TheBloke/Llama-2-7B-Chat-GGML&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FLlama-2-7B-Chat-GGML%5D(%2FTheBloke%2FLlama-2-7B-Chat-GGML)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, memef4rmer/llama2-7b-chat-uncensored-ggml, mikeee/llama2-7b-chat-ggml, mikeee/llama2-7b-chat-uncensored-ggml, mikeee/nousresearch-nous-hermes-llama2-13b-ggml, r3gm/ConversaDocs",13
|
| 301 |
+
ThisIs-Developer/Llama-2-GGML-Medical-Chatbot,"---
|
| 302 |
+
license: mit
|
| 303 |
+
language:
|
| 304 |
+
- en
|
| 305 |
+
base_model: TheBloke/Llama-2-7B-Chat-GGML
|
| 306 |
+
pipeline_tag: question-answering
|
| 307 |
+
library_name: transformers
|
| 308 |
+
tags:
|
| 309 |
+
- medical
|
| 310 |
+
- conversational
|
| 311 |
+
- text-generation
|
| 312 |
+
---
|
| 313 |
+
# 🐍 Llama-2-GGML-Medical-Chatbot 🤖
|
| 314 |
+
The **Llama-2-7B-Chat-GGML-Medical-Chatbot** is a repository for a medical chatbot that uses the _Llama-2-7B-Chat-GGML_ model and the pdf _The Gale Encyclopedia of Medicine_. The chatbot is still under development, but it has the potential to be a valuable tool for patients, healthcare professionals, and researchers. The chatbot can be used to answer questions about medical topics, provide summaries of medical articles, and generate medical text. However, it is important to note that the chatbot is not a substitute for medical advice from a qualified healthcare professional.
|
| 315 |
+

|
| 316 |
+
|
| 317 |
+
## 📚 Here are some of the features of the Llama-2-7B-Chat-GGML-Medical-Chatbot:
|
| 318 |
+
|
| 319 |
+
- It uses the _Llama-2-7B-Chat-GGML_ model, which is a **large language model (LLM)** that has been fine-tuned.
|
| 320 |
+
* Name - **llama-2-7b-chat.ggmlv3.q2_K.bin**
|
| 321 |
+
* Quant method - q2_K
|
| 322 |
+
* Bits - 2
|
| 323 |
+
* Size - **2.87 GB**
|
| 324 |
+
* Max RAM required - 5.37 GB
|
| 325 |
+
* Use case - New k-quant method. Uses GGML_TYPE_Q4_K for the attention.vw and feed_forward.w2 tensors, GGML_TYPE_Q2_K for the other tensors.
|
| 326 |
+
* **Model:** Know more about model **[Llama-2-7B-Chat-GGML](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML)**
|
| 327 |
+
- It is trained on the pdf **[The Gale Encyclopedia of Medicine, Volume 1, 2nd Edition, 637-page PDF](https://github.com/ThisIs-Developer/Llama-2-GGML-Medical-Chatbot/blob/main/data/71763-gale-encyclopedia-of-medicine.-vol.-1.-2nd-ed.pdf)**, which is a comprehensive medical reference that provides information on a wide range of medical topics. This means that the chatbot is able to answer questions about a variety of medical topics.
|
| 328 |
+
- This is a sophisticated medical chatbot, developed using Llama-2 7B and Sentence Transformers. Powered by **[Langchain](https://python.langchain.com/docs/get_started/introduction)** and **[Chainlit](https://docs.chainlit.io/overview)**, This bot operates on a powerful CPU computer that boasts a minimum of
|
| 329 |
+
* Operating system: Linux, macOS, or Windows
|
| 330 |
+
* CPU: Intel® Core™ i3
|
| 331 |
+
* RAM: **8 GB**
|
| 332 |
+
* Disk space: 7 GB
|
| 333 |
+
* GPU: None **(CPU only)**
|
| 334 |
+
- It is still under development, but it has the potential to be a valuable tool for patients, healthcare professionals, and researchers.
|
| 335 |
+
|
| 336 |
+
## 🚀 Quickstart
|
| 337 |
+
1. Open Git Bash.
|
| 338 |
+
2. Change the current working directory to the location where you want the cloned directory.
|
| 339 |
+
3. Type `git clone`, and then paste the URL you copied earlier.
|
| 340 |
+
```bash
|
| 341 |
+
git clone https://github.com/ThisIs-Developer/Llama-2-GGML-Medical-Chatbot.git
|
| 342 |
+
```
|
| 343 |
+
Press Enter to create your local clone.
|
| 344 |
+
4. Install the pip packages in requirements.txt
|
| 345 |
+
```bash
|
| 346 |
+
pip install -r requirements.txt
|
| 347 |
+
```
|
| 348 |
+
5. Now run it!
|
| 349 |
+
```ternimal
|
| 350 |
+
chainlit run model.py -w
|
| 351 |
+
```
|
| 352 |
+
## 📖 ChatBot Conversession
|
| 353 |
+
### ⛓️Chainlit ver. on [#v1.0.1.dev20230913](https://github.com/ThisIs-Developer/Llama-2-GGML-Medical-Chatbot/releases/tag/v1.0.1.dev20230913)
|
| 354 |
+

|
| 355 |
+
|
| 356 |
+
### ⚡Streamlit ver. on [#v2.0.1.dev20231230](https://github.com/ThisIs-Developer/Llama-2-GGML-Medical-Chatbot/releases/tag/v2.0.1.dev20231230)
|
| 357 |
+

|
| 358 |
+
|
| 359 |
+
### DEMO: 📽️Conversession.vid.mp4->https://cdn-uploads.huggingface.co/production/uploads/64d8c442a4839890b2490db9/iI4t0lhjkCw3dDSvWQ4Jk.mp4
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
|
| 363 |
+
","{""id"": ""ThisIs-Developer/Llama-2-GGML-Medical-Chatbot"", ""author"": ""ThisIs-Developer"", ""sha"": ""f0bbd3d9b14dda9526c4368fb1489d7a4c2ec760"", ""last_modified"": ""2024-09-05 15:35:10+00:00"", ""created_at"": ""2023-12-19 14:51:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 74, ""downloads_all_time"": null, ""likes"": 36, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""medical"", ""conversational"", ""text-generation"", ""question-answering"", ""en"", ""base_model:TheBloke/Llama-2-7B-Chat-GGML"", ""base_model:finetune:TheBloke/Llama-2-7B-Chat-GGML"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: TheBloke/Llama-2-7B-Chat-GGML\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: question-answering\ntags:\n- medical\n- conversational\n- text-generation"", ""widget_data"": [{""text"": ""Where do I live?"", ""context"": ""My name is Wolfgang and I live in Berlin""}, {""text"": ""Where do I live?"", ""context"": ""My name is Sarah and I live in London""}, {""text"": ""What's my name?"", ""context"": ""My name is Clara and I live in Berkeley.""}, {""text"": ""Which name is also used to describe the Amazon rainforest in English?"", ""context"": ""The Amazon rainforest (Portuguese: Floresta Amaz\u00f4nica or Amaz\u00f4nia; Spanish: Selva Amaz\u00f3nica, Amazon\u00eda or usually Amazonia; French: For\u00eat amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain \""Amazonas\"" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.""}], ""model_index"": null, ""config"": {""model_type"": ""llama""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.chainlit/config.toml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Chainlit/model.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Streamlit/README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Streamlit/model.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='__pycache__/model.cpython-311.pyc', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chainlit.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='conversession e.g/ChatBot Conversession img-1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='conversession e.g/ChatBot Conversession img-2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='conversession e.g/ChatBot Conversession img-3.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='conversession e.g/ChatBot Conversession img-3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='conversession e.g/ChatBot Conversession vid.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='data/71763-gale-encyclopedia-of-medicine.-vol.-1.-2nd-ed.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ingest.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='requirements.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vectorstores/db_faiss/index.faiss', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vectorstores/db_faiss/index.pkl', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ThisIs-Developer/Llama-2-GGML-Medical-Chatbot"", ""awpbash/ThisIs-Developer-Llama-2-GGML-Medical-Chatbot"", ""MZ786/GPT_DOCTOR"", ""comara/ThisIs-Developer-Llama-2-GGML-Medical-Chatbot"", ""sidthegirlkid/ThisIs-Developer-Llama-2-GGML-Medical-Chatbot"", ""saswattulo/ThisIs-Developer-Llama-2-GGML-Medical-Chatbot"", ""Karani/Llama-2-Medical-Chatbot"", ""Paulie-Aditya/MedIntel""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-05 15:35:10+00:00"", ""cardData"": ""base_model: TheBloke/Llama-2-7B-Chat-GGML\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: question-answering\ntags:\n- medical\n- conversational\n- text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6581adf9193fb3eccded09ac"", ""modelId"": ""ThisIs-Developer/Llama-2-GGML-Medical-Chatbot"", ""usedStorage"": 48147439}",1,,0,,0,,0,,0,"Karani/Llama-2-Medical-Chatbot, MZ786/GPT_DOCTOR, Paulie-Aditya/MedIntel, ThisIs-Developer/Llama-2-GGML-Medical-Chatbot, awpbash/ThisIs-Developer-Llama-2-GGML-Medical-Chatbot, comara/ThisIs-Developer-Llama-2-GGML-Medical-Chatbot, huggingface/InferenceSupport/discussions/new?title=ThisIs-Developer/Llama-2-GGML-Medical-Chatbot&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BThisIs-Developer%2FLlama-2-GGML-Medical-Chatbot%5D(%2FThisIs-Developer%2FLlama-2-GGML-Medical-Chatbot)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, saswattulo/ThisIs-Developer-Llama-2-GGML-Medical-Chatbot, sidthegirlkid/ThisIs-Developer-Llama-2-GGML-Medical-Chatbot",9
|
| 364 |
+
nik-55/youtube-question-answer,"---
|
| 365 |
+
language:
|
| 366 |
+
- en
|
| 367 |
+
base_model: ""TheBloke/Llama-2-7B-Chat-GGML""
|
| 368 |
+
---
|
| 369 |
+
|
| 370 |
+
# YOUTUBE Question Answer
|
| 371 |
+
|
| 372 |
+
","{""id"": ""nik-55/youtube-question-answer"", ""author"": ""nik-55"", ""sha"": ""fd72e50d5d3a2556bdf8bc40a386a3540ba0b59c"", ""last_modified"": ""2023-12-22 10:04:52+00:00"", ""created_at"": ""2023-12-22 09:50:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""en"", ""base_model:TheBloke/Llama-2-7B-Chat-GGML"", ""base_model:finetune:TheBloke/Llama-2-7B-Chat-GGML"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: TheBloke/Llama-2-7B-Chat-GGML\nlanguage:\n- en"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-22 10:04:52+00:00"", ""cardData"": ""base_model: TheBloke/Llama-2-7B-Chat-GGML\nlanguage:\n- en"", ""transformersInfo"": null, ""_id"": ""65855bd189bb78d10455503e"", ""modelId"": ""nik-55/youtube-question-answer"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=nik-55/youtube-question-answer&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnik-55%2Fyoutube-question-answer%5D(%2Fnik-55%2Fyoutube-question-answer)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Llama2-Chinese-7b-Chat_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
FlagAlpha/Llama2-Chinese-7b-Chat,"---
|
| 3 |
+
developers: [https://huggingface.co/FlagAlphaAI]
|
| 4 |
+
license: apache-2.0
|
| 5 |
+
language:
|
| 6 |
+
- zh
|
| 7 |
+
- en
|
| 8 |
+
pipeline_tag: question-answering
|
| 9 |
+
library_name: transformers
|
| 10 |
+
---
|
| 11 |
+
# Llama2中文社区
|
| 12 |
+
|
| 13 |
+
---
|
| 14 |
+
## Llama2中文微调参数
|
| 15 |
+
由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-7b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。
|
| 16 |
+
|
| 17 |
+
🎯 **该版本为LoRA中文微调参数FlagAlpha/Llama2-Chinese-7b-Chat-LoRA和meta-llama/Llama-2-7b-chat-hf参数结合后的版本,可直接使用**
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
## 🚀 社区地址:
|
| 23 |
+
|
| 24 |
+
Github:[**Llama-Chinese**](https://github.com/LlamaFamily/Llama-Chinese)
|
| 25 |
+
|
| 26 |
+
在线体验链接:[**llama.family**](https://llama.family/)
|
| 27 |
+
|
| 28 |
+
## 🔥 社区介绍
|
| 29 |
+
欢迎来到Llama2中文社区!
|
| 30 |
+
|
| 31 |
+
我们是一个专注于Llama2模型在中文方面的优化和上层建设的高级技术社区。
|
| 32 |
+
|
| 33 |
+
**基于大规模中文数据,从预训练开始对Llama2模型进行中文能力的持续迭代升级**。
|
| 34 |
+
|
| 35 |
+
我们热忱欢迎对大模型LLM充满热情的开发者和研究者加入我们的行列。
|
| 36 |
+
|
| 37 |
+
## 🐼 社区资源
|
| 38 |
+
- Llama2在线体验链接[**llama.family**](https://llama.family/),同时包含Meta原版和中文微调版本!
|
| 39 |
+
- Llama2 Chat模型的[中文问答能力评测](https://github.com/LlamaFamily/Llama-Chinese/tree/main#-%E6%A8%A1%E5%9E%8B%E8%AF%84%E6%B5%8B)!
|
| 40 |
+
- [社区飞书知识库](https://chinesellama.feishu.cn/wiki/space/7257824476874768388?ccm_open_type=lark_wiki_spaceLink),欢迎大家一起共建!
|
| 41 |
+
|
| 42 |
+
","{""id"": ""FlagAlpha/Llama2-Chinese-7b-Chat"", ""author"": ""FlagAlpha"", ""sha"": ""9c1693247d2d1f99807b83b5dc817d700a3f2fa5"", ""last_modified"": ""2024-02-23 11:02:23+00:00"", ""created_at"": ""2023-07-23 10:12:21+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 624, ""downloads_all_time"": null, ""likes"": 221, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""question-answering"", ""zh"", ""en"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: question-answering\ndevelopers:\n- https://huggingface.co/FlagAlphaAI"", ""widget_data"": [{""text"": ""\u6211\u4f4f\u5728\u54ea\u91cc\uff1f"", ""context"": ""\u6211\u53eb\u6c83\u5c14\u592b\u5188\uff0c\u6211\u4f4f\u5728\u67cf\u6797\u3002""}, {""text"": ""\u6211\u4f4f\u5728\u54ea\u91cc\uff1f"", ""context"": ""\u6211\u53eb\u8428\u62c9\uff0c\u6211\u4f4f\u5728\u4f26\u6566\u3002""}, {""text"": ""\u6211\u7684\u540d\u5b57\u662f\u4ec0\u4e48\uff1f"", ""context"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u4f4f\u5728\u4f2f\u514b\u5229\u3002""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""DawnMagnet/FlagAlpha-Llama2-Chinese-7b-Chat"", ""wzszwj2018/FlagAlpha-Llama2-Chinese-7b-Chat"", ""AhmedMagdy7/FlagAlpha-Llama2-Chinese-7b-Chat"", ""epker/FlagAlpha-Llama2-Chinese-7b-Chat"", ""suan123/FlagAlpha-Llama2-Chinese-7b-Chat"", ""maidong/FlagAlpha-Llama2-Chinese-7b-Chat"", ""yiju2313/FlagAlpha-Llama2-Chinese-7b-Chat""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-23 11:02:23+00:00"", ""cardData"": ""language:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: question-answering\ndevelopers:\n- https://huggingface.co/FlagAlphaAI"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64bcfd05565b827f7ea47e07"", ""modelId"": ""FlagAlpha/Llama2-Chinese-7b-Chat"", ""usedStorage"": 26954330508}",0,,0,"https://huggingface.co/PengceWang/LLAMA2-Chinese-huma_emotion, https://huggingface.co/PengceWang/llama2-Chinese-human_emotion, https://huggingface.co/Trace2333/dian_r64_ep7, https://huggingface.co/AI4Bread/Eyedoctor, https://huggingface.co/fukadacat/FlagAlpha-Llama2-Chinese-7bnew-Chat_qlora",5,https://huggingface.co/mradermacher/Llama2-Chinese-7b-Chat-GGUF,1,"https://huggingface.co/Abin7/bangla-chinese, https://huggingface.co/Abin7/bangla-chinese-romania, https://huggingface.co/Abin7/bangla-chinese-romania-hindi",3,"AhmedMagdy7/FlagAlpha-Llama2-Chinese-7b-Chat, DawnMagnet/FlagAlpha-Llama2-Chinese-7b-Chat, epker/FlagAlpha-Llama2-Chinese-7b-Chat, huggingface/InferenceSupport/discussions/new?title=FlagAlpha/Llama2-Chinese-7b-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BFlagAlpha%2FLlama2-Chinese-7b-Chat%5D(%2FFlagAlpha%2FLlama2-Chinese-7b-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, maidong/FlagAlpha-Llama2-Chinese-7b-Chat, suan123/FlagAlpha-Llama2-Chinese-7b-Chat, wzszwj2018/FlagAlpha-Llama2-Chinese-7b-Chat, yiju2313/FlagAlpha-Llama2-Chinese-7b-Chat",8
|
Llasa-3B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
HKUSTAudio/Llasa-3B,"---
|
| 3 |
+
license: cc-by-nc-4.0
|
| 4 |
+
language:
|
| 5 |
+
- zh
|
| 6 |
+
- en
|
| 7 |
+
base_model:
|
| 8 |
+
- meta-llama/Llama-3.2-3B-Instruct
|
| 9 |
+
tags:
|
| 10 |
+
- Text-to-Speech
|
| 11 |
+
pipeline_tag: text-to-speech
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
[](https://arxiv.org/abs/2502.04128)
|
| 15 |
+
|
| 16 |
+
**Update (2025-02-13):** Add [Llasa finetune instruction](https://github.com/zhenye234/LLaSA_training/tree/main/finetune).
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
**Update (2025-02-07):** Our paper has been released!
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
LLaSA: Scaling Train-Time and Inference-Time Compute for LLaMA-based Speech Synthesis
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
- **Train from Scratch**: If you want to train the model from scratch, use the [LLaSA Training Repository](https://github.com/zhenye234/LLaSA_training).
|
| 26 |
+
|
| 27 |
+
- **Scale for Test-Time Computation**: If you want to experiment with scaling for test-time computation, use the [LLaSA Testing Repository](https://github.com/zhenye234/LLaSA_inference).
|
| 28 |
+
|
| 29 |
+
## Model Information
|
| 30 |
+
Our model, Llasa, is a text-to-speech (TTS) system that extends the text-based LLaMA (1B,3B, and 8B) language model by incorporating speech tokens from the XCodec2 codebook,
|
| 31 |
+
which contains 65,536 tokens. We trained Llasa on a dataset comprising 250,000 hours of Chinese-English speech data.
|
| 32 |
+
The model is capable of generating speech **either solely from input text or by utilizing a given speech prompt.**
|
| 33 |
+
|
| 34 |
+
The method is seamlessly compatible with the Llama framework, making training TTS similar as training LLM (convert audios into single-codebook tokens and simply view it as a special language). It opens the possiblity of existing method for compression, acceleration and finetuning for LLM to be applied.
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
## How to use
|
| 39 |
+
Install [XCodec2](https://huggingface.co/HKUSTAudio/xcodec2).
|
| 40 |
+
|
| 41 |
+
**1. Speech synthesis solely from input text**
|
| 42 |
+
```python
|
| 43 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 44 |
+
import torch
|
| 45 |
+
import soundfile as sf
|
| 46 |
+
|
| 47 |
+
llasa_3b ='HKUSTAudio/Llasa-3B'
|
| 48 |
+
|
| 49 |
+
tokenizer = AutoTokenizer.from_pretrained(llasa_3b)
|
| 50 |
+
model = AutoModelForCausalLM.from_pretrained(llasa_3b)
|
| 51 |
+
model.eval()
|
| 52 |
+
model.to('cuda')
|
| 53 |
+
|
| 54 |
+
from xcodec2.modeling_xcodec2 import XCodec2Model
|
| 55 |
+
|
| 56 |
+
model_path = ""HKUSTAudio/xcodec2""
|
| 57 |
+
|
| 58 |
+
Codec_model = XCodec2Model.from_pretrained(model_path)
|
| 59 |
+
Codec_model.eval().cuda()
|
| 60 |
+
|
| 61 |
+
input_text = 'Dealing with family secrets is never easy. Yet, sometimes, omission is a form of protection, intending to safeguard some from the harsh truths. One day, I hope you understand the reasons behind my actions. Until then, Anna, please, bear with me.'
|
| 62 |
+
# input_text = '突然,身边一阵笑声。我看着他们,意气风发地挺直了胸膛,甩了甩那稍显肉感的双臂,轻笑道:""我身上的肉,是为了掩饰我爆棚的魅力,否则,岂不吓坏了你们呢?""'
|
| 63 |
+
def ids_to_speech_tokens(speech_ids):
|
| 64 |
+
|
| 65 |
+
speech_tokens_str = []
|
| 66 |
+
for speech_id in speech_ids:
|
| 67 |
+
speech_tokens_str.append(f""<|s_{speech_id}|>"")
|
| 68 |
+
return speech_tokens_str
|
| 69 |
+
|
| 70 |
+
def extract_speech_ids(speech_tokens_str):
|
| 71 |
+
|
| 72 |
+
speech_ids = []
|
| 73 |
+
for token_str in speech_tokens_str:
|
| 74 |
+
if token_str.startswith('<|s_') and token_str.endswith('|>'):
|
| 75 |
+
num_str = token_str[4:-2]
|
| 76 |
+
|
| 77 |
+
num = int(num_str)
|
| 78 |
+
speech_ids.append(num)
|
| 79 |
+
else:
|
| 80 |
+
print(f""Unexpected token: {token_str}"")
|
| 81 |
+
return speech_ids
|
| 82 |
+
|
| 83 |
+
#TTS start!
|
| 84 |
+
with torch.no_grad():
|
| 85 |
+
|
| 86 |
+
formatted_text = f""<|TEXT_UNDERSTANDING_START|>{input_text}<|TEXT_UNDERSTANDING_END|>""
|
| 87 |
+
|
| 88 |
+
# Tokenize the text
|
| 89 |
+
chat = [
|
| 90 |
+
{""role"": ""user"", ""content"": ""Convert the text to speech:"" + formatted_text},
|
| 91 |
+
{""role"": ""assistant"", ""content"": ""<|SPEECH_GENERATION_START|>""}
|
| 92 |
+
]
|
| 93 |
+
|
| 94 |
+
input_ids = tokenizer.apply_chat_template(
|
| 95 |
+
chat,
|
| 96 |
+
tokenize=True,
|
| 97 |
+
return_tensors='pt',
|
| 98 |
+
continue_final_message=True
|
| 99 |
+
)
|
| 100 |
+
input_ids = input_ids.to('cuda')
|
| 101 |
+
speech_end_id = tokenizer.convert_tokens_to_ids('<|SPEECH_GENERATION_END|>')
|
| 102 |
+
|
| 103 |
+
# Generate the speech autoregressively
|
| 104 |
+
outputs = model.generate(
|
| 105 |
+
input_ids,
|
| 106 |
+
max_length=2048, # We trained our model with a max length of 2048
|
| 107 |
+
eos_token_id= speech_end_id ,
|
| 108 |
+
do_sample=True,
|
| 109 |
+
top_p=1, # Adjusts the diversity of generated content
|
| 110 |
+
temperature=0.8, # Controls randomness in output
|
| 111 |
+
)
|
| 112 |
+
# Extract the speech tokens
|
| 113 |
+
generated_ids = outputs[0][input_ids.shape[1]:-1]
|
| 114 |
+
|
| 115 |
+
speech_tokens = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
| 116 |
+
|
| 117 |
+
# Convert token <|s_23456|> to int 23456
|
| 118 |
+
speech_tokens = extract_speech_ids(speech_tokens)
|
| 119 |
+
|
| 120 |
+
speech_tokens = torch.tensor(speech_tokens).cuda().unsqueeze(0).unsqueeze(0)
|
| 121 |
+
|
| 122 |
+
# Decode the speech tokens to speech waveform
|
| 123 |
+
gen_wav = Codec_model.decode_code(speech_tokens)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
sf.write(""gen.wav"", gen_wav[0, 0, :].cpu().numpy(), 16000)
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
**2. Speech synthesis utilizing a given speech prompt**
|
| 130 |
+
|
| 131 |
+
```python
|
| 132 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 133 |
+
import torch
|
| 134 |
+
import soundfile as sf
|
| 135 |
+
|
| 136 |
+
llasa_3b ='HKUSTAudio/Llasa-3B'
|
| 137 |
+
|
| 138 |
+
tokenizer = AutoTokenizer.from_pretrained(llasa_3b)
|
| 139 |
+
model = AutoModelForCausalLM.from_pretrained(llasa_3b)
|
| 140 |
+
model.eval()
|
| 141 |
+
model.to('cuda')
|
| 142 |
+
|
| 143 |
+
from xcodec2.modeling_xcodec2 import XCodec2Model
|
| 144 |
+
|
| 145 |
+
model_path = ""HKUSTAudio/xcodec2""
|
| 146 |
+
|
| 147 |
+
Codec_model = XCodec2Model.from_pretrained(model_path)
|
| 148 |
+
Codec_model.eval().cuda()
|
| 149 |
+
# only 16khz speech support!
|
| 150 |
+
prompt_wav, sr = sf.read(""太乙真人.wav"") # you can find wav in Files
|
| 151 |
+
#prompt_wav, sr = sf.read(""Anna.wav"") # English prompt
|
| 152 |
+
prompt_wav = torch.from_numpy(prompt_wav).float().unsqueeze(0)
|
| 153 |
+
|
| 154 |
+
prompt_text =""对,这就是我万人敬仰的太乙真人,虽然有点婴儿肥,但也掩不住我逼人的帅气。""
|
| 155 |
+
#promt_text = ""A chance to leave him alone, but... No. She just wanted to see him again. Anna, you don't know how it feels to lose a sister. Anna, I'm sorry, but your father asked me not to tell you anything.""
|
| 156 |
+
target_text = '突然,身边一阵笑声。我看着他们,意气风发地挺直了胸膛,甩了甩那稍显肉感的双臂,轻笑道:""我身上的肉,是为了掩饰我爆棚的魅力,否则,岂不吓坏了你们呢?""'
|
| 157 |
+
#target_text = ""Dealing with family secrets is never easy. Yet, sometimes, omission is a form of protection, intending to safeguard some from the harsh truths. One day, I hope you understand the reasons behind my actions. Until then, Anna, please, bear with me.""
|
| 158 |
+
input_text = prompt_text + target_text
|
| 159 |
+
|
| 160 |
+
def ids_to_speech_tokens(speech_ids):
|
| 161 |
+
|
| 162 |
+
speech_tokens_str = []
|
| 163 |
+
for speech_id in speech_ids:
|
| 164 |
+
speech_tokens_str.append(f""<|s_{speech_id}|>"")
|
| 165 |
+
return speech_tokens_str
|
| 166 |
+
|
| 167 |
+
def extract_speech_ids(speech_tokens_str):
|
| 168 |
+
|
| 169 |
+
speech_ids = []
|
| 170 |
+
for token_str in speech_tokens_str:
|
| 171 |
+
if token_str.startswith('<|s_') and token_str.endswith('|>'):
|
| 172 |
+
num_str = token_str[4:-2]
|
| 173 |
+
|
| 174 |
+
num = int(num_str)
|
| 175 |
+
speech_ids.append(num)
|
| 176 |
+
else:
|
| 177 |
+
print(f""Unexpected token: {token_str}"")
|
| 178 |
+
return speech_ids
|
| 179 |
+
|
| 180 |
+
#TTS start!
|
| 181 |
+
with torch.no_grad():
|
| 182 |
+
# Encode the prompt wav
|
| 183 |
+
vq_code_prompt = Codec_model.encode_code(input_waveform=prompt_wav)
|
| 184 |
+
print(""Prompt Vq Code Shape:"", vq_code_prompt.shape )
|
| 185 |
+
|
| 186 |
+
vq_code_prompt = vq_code_prompt[0,0,:]
|
| 187 |
+
# Convert int 12345 to token <|s_12345|>
|
| 188 |
+
speech_ids_prefix = ids_to_speech_tokens(vq_code_prompt)
|
| 189 |
+
|
| 190 |
+
formatted_text = f""<|TEXT_UNDERSTANDING_START|>{input_text}<|TEXT_UNDERSTANDING_END|>""
|
| 191 |
+
|
| 192 |
+
# Tokenize the text and the speech prefix
|
| 193 |
+
chat = [
|
| 194 |
+
{""role"": ""user"", ""content"": ""Convert the text to speech:"" + formatted_text},
|
| 195 |
+
{""role"": ""assistant"", ""content"": ""<|SPEECH_GENERATION_START|>"" + ''.join(speech_ids_prefix)}
|
| 196 |
+
]
|
| 197 |
+
|
| 198 |
+
input_ids = tokenizer.apply_chat_template(
|
| 199 |
+
chat,
|
| 200 |
+
tokenize=True,
|
| 201 |
+
return_tensors='pt',
|
| 202 |
+
continue_final_message=True
|
| 203 |
+
)
|
| 204 |
+
input_ids = input_ids.to('cuda')
|
| 205 |
+
speech_end_id = tokenizer.convert_tokens_to_ids('<|SPEECH_GENERATION_END|>')
|
| 206 |
+
|
| 207 |
+
# Generate the speech autoregressively
|
| 208 |
+
outputs = model.generate(
|
| 209 |
+
input_ids,
|
| 210 |
+
max_length=2048, # We trained our model with a max length of 2048
|
| 211 |
+
eos_token_id= speech_end_id ,
|
| 212 |
+
do_sample=True,
|
| 213 |
+
top_p=1,
|
| 214 |
+
temperature=0.8,
|
| 215 |
+
)
|
| 216 |
+
# Extract the speech tokens
|
| 217 |
+
generated_ids = outputs[0][input_ids.shape[1]-len(speech_ids_prefix):-1]
|
| 218 |
+
|
| 219 |
+
speech_tokens = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
| 220 |
+
|
| 221 |
+
# Convert token <|s_23456|> to int 23456
|
| 222 |
+
speech_tokens = extract_speech_ids(speech_tokens)
|
| 223 |
+
|
| 224 |
+
speech_tokens = torch.tensor(speech_tokens).cuda().unsqueeze(0).unsqueeze(0)
|
| 225 |
+
|
| 226 |
+
# Decode the speech tokens to speech waveform
|
| 227 |
+
gen_wav = Codec_model.decode_code(speech_tokens)
|
| 228 |
+
|
| 229 |
+
# if only need the generated part
|
| 230 |
+
# gen_wav = gen_wav[:,:,prompt_wav.shape[1]:]
|
| 231 |
+
|
| 232 |
+
sf.write(""gen.wav"", gen_wav[0, 0, :].cpu().numpy(), 16000)
|
| 233 |
+
```
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
## Disclaimer
|
| 237 |
+
|
| 238 |
+
This model is licensed under the CC BY-NC 4.0 License, which prohibits free commercial use because of ethics and privacy concerns; detected violations will result in legal consequences.
|
| 239 |
+
|
| 240 |
+
This codebase is strictly prohibited from being used for any illegal purposes in any country or region. Please refer to your local laws about DMCA and other related laws.","{""id"": ""HKUSTAudio/Llasa-3B"", ""author"": ""HKUSTAudio"", ""sha"": ""97267cc5eed8c915eb524331f81139077462d075"", ""last_modified"": ""2025-03-09 08:24:14+00:00"", ""created_at"": ""2025-01-07 08:20:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3114, ""downloads_all_time"": null, ""likes"": 489, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""llama"", ""Text-to-Speech"", ""text-to-speech"", ""zh"", ""en"", ""arxiv:2502.04128"", ""base_model:meta-llama/Llama-3.2-3B-Instruct"", ""base_model:finetune:meta-llama/Llama-3.2-3B-Instruct"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- meta-llama/Llama-3.2-3B-Instruct\nlanguage:\n- zh\n- en\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-speech\ntags:\n- Text-to-Speech"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama""}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitignore', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Anna.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='\u592a\u4e59\u771f\u4eba.wav', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Pendrokar/TTS-Spaces-Arena"", ""srinivasbilla/llasa-3b-tts"", ""SunderAli17/llasa-3b-tts"", ""gorbiz/llasa-3b-tts"", ""adamnusic/llasa-3b-tts"", ""gogogaga7/HKUSTAudio-Llasa-3B"", ""setfunctionenvironment/llasa-3b-tts"", ""K2an/HKUSTAudio-Llasa-3B"", ""MoiMoi-01/llasa-3b-tts"", ""Yashowardhan/HKUSTAudio-Llasa-3B"", ""SKsizan/HKUSTAudio-Llasa-3B"", ""ADE-DANCE001/HKUSTAudio-Llasa-3B"", ""AwaisBasharat/Llasa-3B-TTS""], ""safetensors"": {""parameters"": {""BF16"": 4009454592}, ""total"": 4009454592}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-09 08:24:14+00:00"", ""cardData"": ""base_model:\n- meta-llama/Llama-3.2-3B-Instruct\nlanguage:\n- zh\n- en\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-speech\ntags:\n- Text-to-Speech"", ""transformersInfo"": null, ""_id"": ""677ce3daa41fc9de874529e5"", ""modelId"": ""HKUSTAudio/Llasa-3B"", ""usedStorage"": 8060170434}",0,,0,,0,"https://huggingface.co/NikolayKozloff/Llasa-3B-Q8_0-GGUF, https://huggingface.co/Zuellni/Llasa-3B-8.0bpw-h8-exl2, https://huggingface.co/AgeOfAlgorithms/Llasa-3b-GPTQ-4bit, https://huggingface.co/AgeOfAlgorithms/Llasa-3b-GPTQ-8bit, https://huggingface.co/na2tt300zx/Llasa-3B-Q4_K_M-GGUF",5,,0,"ADE-DANCE001/HKUSTAudio-Llasa-3B, K2an/HKUSTAudio-Llasa-3B, MoiMoi-01/llasa-3b-tts, Pendrokar/TTS-Spaces-Arena, SKsizan/HKUSTAudio-Llasa-3B, SunderAli17/llasa-3b-tts, Yashowardhan/HKUSTAudio-Llasa-3B, adamnusic/llasa-3b-tts, gogogaga7/HKUSTAudio-Llasa-3B, gorbiz/llasa-3b-tts, huggingface/InferenceSupport/discussions/new?title=HKUSTAudio/Llasa-3B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BHKUSTAudio%2FLlasa-3B%5D(%2FHKUSTAudio%2FLlasa-3B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, setfunctionenvironment/llasa-3b-tts, srinivasbilla/llasa-3b-tts",13
|
MARS5-TTS_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
CAMB-AI/MARS5-TTS,"---
|
| 3 |
+
license: agpl-3.0
|
| 4 |
+
pipeline_tag: text-to-speech
|
| 5 |
+
library_name: mars5-tts
|
| 6 |
+
tags:
|
| 7 |
+
- text-to-speech
|
| 8 |
+
- audio
|
| 9 |
+
- speech
|
| 10 |
+
- voice-cloning
|
| 11 |
+
- vc
|
| 12 |
+
- tts
|
| 13 |
+
---
|
| 14 |
+

|
| 15 |
+
|
| 16 |
+
# MARS5: A novel speech model for insane prosody.
|
| 17 |
+
|
| 18 |
+
This is the repo for the MARS5 English speech model (TTS) from CAMB.AI.
|
| 19 |
+
|
| 20 |
+
The model follows a two-stage AR-NAR pipeline with a distinctively novel NAR component (see more info in the [docs](docs/architecture.md)).
|
| 21 |
+
|
| 22 |
+
With just 5 seconds of audio and a snippet of text, MARS5 can generate speech even for prosodically hard and diverse scenarios like sports commentary, anime and more. Check out our demo:
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
https://github.com/Camb-ai/MARS5-TTS/assets/23717819/3e191508-e03c-4ff9-9b02-d73ae0ebefdd
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
**Quick links**:
|
| 33 |
+
- [CAMB.AI website](https://camb.ai/) (access MARS5 in 140+ languages for TTS and dubbing)
|
| 34 |
+
- Technical docs: [in the docs folder](docs/architecture.md)
|
| 35 |
+
- Colab quickstart: <a target=""_blank"" href=""https://colab.research.google.com/github/Camb-ai/mars5-tts/blob/master/mars5_demo.ipynb""><img src=""https://colab.research.google.com/assets/colab-badge.svg"" alt=""Open In Colab""/></a>
|
| 36 |
+
- Demo page with samples: [here](https://6b1a3a8e53ae.ngrok.app/)
|
| 37 |
+
|
| 38 |
+

|
| 39 |
+
|
| 40 |
+
**Figure**: the high-level architecture flow of Mars 5. Given text and a reference audio, coarse (L0) encodec speech features are obtained through an autoregressive transformer model. Then, the text, reference, and coarse features are refined in a multinomial DDPM model to produce the remaining encodec codebook values. The output of the DDPM is then vocoded to produce the final audio.
|
| 41 |
+
|
| 42 |
+
Because the model is trained on raw audio together with byte-pair-encoded text, it can be steered with things like punctuation and capitalization.
|
| 43 |
+
E.g. to add a pause, add a comma to that part in the transcript. Or, to emphasize a word, put it in capital letters in the transcript.
|
| 44 |
+
This enables a fairly natural way for guiding the prosody of the generated output.
|
| 45 |
+
|
| 46 |
+
Speaker identity is specified using an audio reference file between 2-12 seconds, with lengths around 6s giving optimal results.
|
| 47 |
+
Further, by providing the transcript of the reference, MARS5 enables one to do a '_deep clone_' which improves the quality of the cloning and output, at the cost of taking a bit longer to produce the audio.
|
| 48 |
+
For more details on this and other performance and model details, please see inside the [docs folder](docs/architecture.md).
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
## Quickstart
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
We use `torch.hub` to make loading the model easy -- no cloning of the repo needed. The steps to perform inference are simple:
|
| 55 |
+
|
| 56 |
+
1. **Install pip dependencies**: `huggingface_hub`, `torch`, `torchaudio`, `librosa`, `vocos`, and `encodec`. Python must be at version 3.10 or greater, and torch must be v2.0 or greater.
|
| 57 |
+
|
| 58 |
+
```bash
|
| 59 |
+
pip install --upgrade torch torchaudio librosa vocos encodec huggingface_hub
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
2. **Load models**: load the Mars 5 AR and NAR model from the huggingface hub:
|
| 63 |
+
|
| 64 |
+
```python
|
| 65 |
+
from inference import Mars5TTS, InferenceConfig as config_class
|
| 66 |
+
import librosa
|
| 67 |
+
mars5 = Mars5TTS.from_pretrained(""CAMB-AI/MARS5-TTS"")
|
| 68 |
+
# The `mars5` contains the AR and NAR model, as well as inference code.
|
| 69 |
+
# The `config_class` contains tunable inference config settings like temperature.
|
| 70 |
+
```
|
| 71 |
+
3. **Pick a reference** and optionally its transcript:
|
| 72 |
+
|
| 73 |
+
```python
|
| 74 |
+
# load reference audio between 1-12 seconds.
|
| 75 |
+
wav, sr = librosa.load('<path to arbitrary 24kHz waveform>.wav',
|
| 76 |
+
sr=mars5.sr, mono=True)
|
| 77 |
+
wav = torch.from_numpy(wav)
|
| 78 |
+
ref_transcript = ""<transcript of the reference audio>""
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
The reference transcript is an optional piece of info you need if you wish to do a deep clone.
|
| 82 |
+
Mars5 supports 2 kinds of inference: a shallow, fast inference whereby you do not need the transcript of the reference (we call this a _shallow clone_), and a second slower, but typically higher quality way, which we call a _deep clone_.
|
| 83 |
+
To use the deep clone, you need the prompt transcript. See the [model docs](docs/architecture.md) for more info on this.
|
| 84 |
+
|
| 85 |
+
4. **Perform the synthesis**:
|
| 86 |
+
|
| 87 |
+
```python
|
| 88 |
+
# Pick whether you want a deep or shallow clone. Set to False if you don't know prompt transcript or want fast inference. Set to True if you know transcript and want highest quality.
|
| 89 |
+
deep_clone = True
|
| 90 |
+
# Below you can tune other inference settings, like top_k, temperature, top_p, etc...
|
| 91 |
+
cfg = config_class(deep_clone=deep_clone, rep_penalty_window=100,
|
| 92 |
+
top_k=100, temperature=0.7, freq_penalty=3)
|
| 93 |
+
|
| 94 |
+
ar_codes, output_audio = mars5.tts(""The quick brown rat."", wav,
|
| 95 |
+
ref_transcript,
|
| 96 |
+
cfg=cfg)
|
| 97 |
+
# output_audio is (T,) shape float tensor corresponding to the 24kHz output audio.
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
That's it! These default settings provide pretty good results, but feel free to tune the inference settings to optimize the output for your particular example. See the [`InferenceConfig`](inference.py) code or the demo notebook for info and docs on all the different inference settings.
|
| 101 |
+
|
| 102 |
+
_Some tips for best quality:_
|
| 103 |
+
- Make sure reference audio is clean and between 1 second and 12 seconds.
|
| 104 |
+
- Use deep clone and provide an accurate transcript for the reference.
|
| 105 |
+
- Use proper punctuation -- the model can be guided and made better or worse with proper use of punctuation and capitalization.
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
## Model details
|
| 109 |
+
|
| 110 |
+
**Checkpoints**
|
| 111 |
+
|
| 112 |
+
The checkpoints for MARS5 are provided under the releases tab of this github repo. We provide two checkpoints:
|
| 113 |
+
|
| 114 |
+
- AR fp16 checkpoint [~750M parameters], along with config embedded in the checkpoint.
|
| 115 |
+
- NAR fp16 checkpoint [~450M parameters], along with config embedded in the checkpoint.
|
| 116 |
+
- The byte-pair encoding tokenizer used for the L0 encodec codes and the English text is embedded in each checkpoint under the `'vocab'` key, and follows roughly the same format of a saved minbpe tokenizer.
|
| 117 |
+
|
| 118 |
+
**Hardware requirements**:
|
| 119 |
+
|
| 120 |
+
You must be able to store at least 750M+450M params on GPU, and do inference with 750M of active parameters. In general, at least **20GB of GPU VRAM** is needed to run the model on GPU (we plan to further optimize this in the future).
|
| 121 |
+
|
| 122 |
+
If you do not have the necessary hardware requirements and just want to use MARS5 in your applications, you can use it via our API: see [docs.camb.ai](https://docs.camb.ai/). If you need some more credits to test it for your use case, feel free to reach out to `help@camb.ai` for help.
|
| 123 |
+
|
| 124 |
+
## Roadmap
|
| 125 |
+
|
| 126 |
+
Mars 5 is not perfect at the moment, and we are working on a few efforts to improve its quality, stability, and performance.
|
| 127 |
+
Rough areas we are looking to improve, and welcome any contributions:
|
| 128 |
+
|
| 129 |
+
- Improving inference stability and consistency
|
| 130 |
+
- Speed/performance optimizations
|
| 131 |
+
- Improving reference audio selection when given long references.
|
| 132 |
+
- Benchmark performance numbers for Mars 5 on standard speech datasets.
|
| 133 |
+
|
| 134 |
+
If you would like to contribute any improvement to MARS, please feel free to contribute (guidelines below).
|
| 135 |
+
|
| 136 |
+
## Contributions
|
| 137 |
+
|
| 138 |
+
We welcome any contributions to improving the model. As you may find when experimenting, it can produce really great results, it can still be further improved to create excellent outputs _consistently_. Please raise a PR/discussion in github.
|
| 139 |
+
|
| 140 |
+
**Contribution format**:
|
| 141 |
+
|
| 142 |
+
The preferred way to contribute to our repo is to fork the [master repository](https://github.com/Camb-ai/mars5-tts) on GitHub:
|
| 143 |
+
|
| 144 |
+
1. Fork the repo on github
|
| 145 |
+
2. Clone the repo, set upstream as this repo: `git remote add upstream git@github.com:Camb-ai/mars5-tts.git`
|
| 146 |
+
3. Make to a new local branch and make your changes, commit changes.
|
| 147 |
+
4. Push changes to new upstream branch: `git push --set-upstream origin <NAME-NEW-BRANCH>`
|
| 148 |
+
5. On github, go to your fork and click 'Pull request' to begin the PR process. Please make sure to include a description of what you did/fixed.
|
| 149 |
+
|
| 150 |
+
## License
|
| 151 |
+
|
| 152 |
+
We are open-sourcing MARS in English under GNU AGPL 3.0, but you can request to use it under a different license by emailing help@camb.ai
|
| 153 |
+
|
| 154 |
+
## Join our team
|
| 155 |
+
|
| 156 |
+
We're an ambitious team, globally distributed, with a singular aim of making everyone's voice count. At CAMB.AI, we're a research team of Interspeech-published, Carnegie Mellon, ex-Siri engineers and we're looking for you to join our team.
|
| 157 |
+
|
| 158 |
+
We're actively hiring; please drop us an email at ack@camb.ai if you're interested. Visit our [careers page](https://www.camb.ai/careers) for more info.
|
| 159 |
+
|
| 160 |
+
## Acknowledgements
|
| 161 |
+
|
| 162 |
+
Parts of code for this project are adapted from the following repositories -- please make sure to check them out! Thank you to the authors of:
|
| 163 |
+
|
| 164 |
+
- AWS: For providing much needed compute resources (NVIDIA H100s) to enable training of the model.
|
| 165 |
+
- TransFusion: [https://github.com/RF5/transfusion-asr](https://github.com/RF5/transfusion-asr)
|
| 166 |
+
- Multinomial diffusion: [https://github.com/ehoogeboom/multinomial_diffusion](https://github.com/ehoogeboom/multinomial_diffusion)
|
| 167 |
+
- Mistral-src: [https://github.com/mistralai/mistral-src](https://github.com/mistralai/mistral-src)
|
| 168 |
+
- minbpe: [https://github.com/karpathy/minbpe](https://github.com/karpathy/minbpe)
|
| 169 |
+
- gemelo-ai's encodec Vocos: [https://github.com/gemelo-ai/vocos](https://github.com/gemelo-ai/vocos)
|
| 170 |
+
- librosa for their `.trim()` code: [https://librosa.org/doc/main/generated/librosa.effects.trim.html](https://librosa.org/doc/main/generated/librosa.effects.trim.html)","{""id"": ""CAMB-AI/MARS5-TTS"", ""author"": ""CAMB-AI"", ""sha"": ""010cda8c524b55ccaabb34ac3711a16ad8830d03"", ""last_modified"": ""2024-07-05 15:24:40+00:00"", ""created_at"": ""2024-06-07 16:43:15+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 292, ""downloads_all_time"": null, ""likes"": 473, ""library_name"": ""mars5-tts"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""mars5-tts"", ""text-to-speech"", ""audio"", ""speech"", ""voice-cloning"", ""vc"", ""tts"", ""license:agpl-3.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: mars5-tts\nlicense: agpl-3.0\npipeline_tag: text-to-speech\ntags:\n- text-to-speech\n- audio\n- speech\n- voice-cloning\n- vc\n- tts"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/demo-preview.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/github-banner.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='docs/architecture.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='docs/assets/NAR_inpainting_diagram.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='docs/assets/example_ref.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='docs/assets/intro_vid.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='docs/assets/mars5_AR_arch.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='docs/assets/mars5_NAR_arch.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='docs/assets/simplified_diagram.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='handler.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hubconf.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='inference.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/ar_generate.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/diffuser.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/minbpe/base.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/minbpe/codebook.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/minbpe/regex.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/model.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/nn_future.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/samplers.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/trim.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5/utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5_ar.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5_demo.ipynb', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mars5_nar.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='requirements.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""CAMB-AI/mars5_space"", ""Nymbo/mars5_space"", ""agency888/AidenTTS""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-05 15:24:40+00:00"", ""cardData"": ""library_name: mars5-tts\nlicense: agpl-3.0\npipeline_tag: text-to-speech\ntags:\n- text-to-speech\n- audio\n- speech\n- voice-cloning\n- vc\n- tts"", ""transformersInfo"": null, ""_id"": ""666338a3921ddc2a5640cac5"", ""modelId"": ""CAMB-AI/MARS5-TTS"", ""usedStorage"": 4863727290}",0,,0,,0,,0,,0,"CAMB-AI/mars5_space, Nymbo/mars5_space, agency888/AidenTTS, huggingface/InferenceSupport/discussions/new?title=CAMB-AI/MARS5-TTS&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BCAMB-AI%2FMARS5-TTS%5D(%2FCAMB-AI%2FMARS5-TTS)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",4
|
MEETING_SUMMARY_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
MN-12B-Mag-Mell-R1_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
inflatebot/MN-12B-Mag-Mell-R1,"---
|
| 3 |
+
base_model:
|
| 4 |
+
- IntervitensInc/Mistral-Nemo-Base-2407-chatml
|
| 5 |
+
- nbeerbower/mistral-nemo-bophades-12B
|
| 6 |
+
- nbeerbower/mistral-nemo-wissenschaft-12B
|
| 7 |
+
- elinas/Chronos-Gold-12B-1.0
|
| 8 |
+
- Fizzarolli/MN-12b-Sunrose
|
| 9 |
+
- nbeerbower/mistral-nemo-gutenberg-12B-v4
|
| 10 |
+
- anthracite-org/magnum-12b-v2.5-kto
|
| 11 |
+
library_name: transformers
|
| 12 |
+
tags:
|
| 13 |
+
- mergekit
|
| 14 |
+
- merge
|
| 15 |
+
|
| 16 |
+
---
|
| 17 |
+

|
| 18 |
+
*[Welcome, brave one; you've come a long mile.](https://www.youtube.com/watch?v=dgGEuC1F3oE)*
|
| 19 |
+
|
| 20 |
+
# MN-12B-Mag-Mell-R1
|
| 21 |
+
|
| 22 |
+
NOTE for newer users: ""R1"" here means ""Revision 1"". This model predates DeepSeek's R1; DeepSeek inadvertently made using this versioning scheme very annoying!
|
| 23 |
+
|
| 24 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 25 |
+
|
| 26 |
+
[Official Q4_K_M, Q6_K and Q_8 GGUFs by me](https://huggingface.co/inflatebot/MN-12B-Mag-Mell-R1-GGUF)
|
| 27 |
+
|
| 28 |
+
[More available from mradermacher](https://huggingface.co/mradermacher/MN-12B-Mag-Mell-R1-GGUF/tree/main)
|
| 29 |
+
|
| 30 |
+
[Official EXL2 by toastypigeon](https://huggingface.co/Alfitaria/MN-12B-Mag-Mell-R1-exl2)
|
| 31 |
+
|
| 32 |
+
## Usage Details
|
| 33 |
+
|
| 34 |
+
### Sampler Settings
|
| 35 |
+
Mag Mell R1 was tested with Temp 1.25 and MinP 0.2. This was fairly stable up to 10K, but this might be too ""hot"".
|
| 36 |
+
If issues with coherency occur, try *in*creasing MinP or *de*creasing Temperature.
|
| 37 |
+
|
| 38 |
+
Other samplers shouldn't be necessary. XTC was shown to break outputs. DRY should be okay if used sparingly. Other penalty-type samplers should probably be avoided.
|
| 39 |
+
|
| 40 |
+
### Formatting
|
| 41 |
+
The base model for Mag Mell is [Mistral-Nemo-Base-2407-chatml](https://huggingface.co/IntervitensInc/Mistral-Nemo-Base-2407-chatml), and as such ChatML formatting is recommended.
|
| 42 |
+
|
| 43 |
+
Early testing versions had a tendency to leak tokens, but this should be more or less hammered out. It recently (12-18-2024) came to attention that Cache Quantization may either cause or exacerbate this issue.
|
| 44 |
+
|
| 45 |
+
## Merge Details
|
| 46 |
+
Mag Mell is a multi-stage merge, Inspired by hyper-merges like [Tiefighter](https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter) and [Umbral Mind.](https://huggingface.co/Casual-Autopsy/L3-Umbral-Mind-RP-v2.0-8B)
|
| 47 |
+
Intended to be a general purpose ""Best of Nemo"" model for any fictional, creative use case.
|
| 48 |
+
|
| 49 |
+
6 models were chosen based on 3 categories; they were then paired up and merged via layer-weighted SLERP to create intermediate ""specialists"" which are then evaluated in their domain.
|
| 50 |
+
The specialists were then merged into the base via DARE-TIES, with hyperparameters chosen to reduce interference caused by the overlap of the three domains.
|
| 51 |
+
The idea with this approach is to extract the best qualities of each component part, and produce models whose task vectors represent more than the sum of their parts.
|
| 52 |
+
|
| 53 |
+
The three specialists are as follows:
|
| 54 |
+
|
| 55 |
+
- Hero (RP, kink/trope coverage): [Chronos Gold](https://huggingface.co/elinas/Chronos-Gold-12B-1.0), [Sunrose](https://huggingface.co/Fizzarolli/MN-12b-Sunrose).
|
| 56 |
+
|
| 57 |
+
- Monk (Intelligence, groundedness): [Bophades](https://huggingface.co/nbeerbower/mistral-nemo-bophades-12B), [Wissenschaft](https://huggingface.co/nbeerbower/mistral-nemo-wissenschaft-12B).
|
| 58 |
+
|
| 59 |
+
- Deity (Prose, flair): [Gutenberg v4](https://huggingface.co/nbeerbower/mistral-nemo-gutenberg-12B-v4), [Magnum 2.5 KTO](https://huggingface.co/anthracite-org/magnum-v2.5-12b-kto).
|
| 60 |
+
|
| 61 |
+
I've been dreaming about this merge since Nemo tunes started coming out in earnest. From our testing, Mag Mell demonstrates worldbuilding capabilities unlike any model in its class, comparable to old adventuring models like Tiefighter, and prose that exhibits minimal ""slop"" (not bad for no finetuning,) frequently devising electrifying metaphors that left us consistently astonished.
|
| 62 |
+
|
| 63 |
+
I don't want to toot my own bugle though; I'm really proud of how this came out, but please leave your feedback, good or bad.
|
| 64 |
+
|
| 65 |
+
Special thanks as usual to Toaster for his feedback and Fizz for helping fund compute, as well as the KoboldAI Discord for their resources.
|
| 66 |
+
|
| 67 |
+
### Merge Method
|
| 68 |
+
|
| 69 |
+
This model was merged using the [DARE](https://arxiv.org/abs/2311.03099) [TIES](https://arxiv.org/abs/2306.01708) merge method using [IntervitensInc/Mistral-Nemo-Base-2407-chatml](https://huggingface.co/IntervitensInc/Mistral-Nemo-Base-2407-chatml) as a base.
|
| 70 |
+
|
| 71 |
+
### Models Merged
|
| 72 |
+
|
| 73 |
+
The following models were included in the merge:
|
| 74 |
+
* IntervitensInc/Mistral-Nemo-Base-2407-chatml
|
| 75 |
+
* nbeerbower/mistral-nemo-bophades-12B
|
| 76 |
+
* nbeerbower/mistral-nemo-wissenschaft-12B
|
| 77 |
+
* elinas/Chronos-Gold-12B-1.0
|
| 78 |
+
* Fizzarolli/MN-12b-Sunrose
|
| 79 |
+
* nbeerbower/mistral-nemo-gutenberg-12B-v4
|
| 80 |
+
* anthracite-org/magnum-12b-v2.5-kto
|
| 81 |
+
|
| 82 |
+
### Configuration
|
| 83 |
+
|
| 84 |
+
The following YAML configurations were used to produce this model:
|
| 85 |
+
|
| 86 |
+
#### Monk:
|
| 87 |
+
```yaml
|
| 88 |
+
models:
|
| 89 |
+
- model: nbeerbower/mistral-nemo-bophades-12B
|
| 90 |
+
- model: nbeerbower/mistral-nemo-wissenschaft-12B
|
| 91 |
+
merge_method: slerp
|
| 92 |
+
base_model: nbeerbower/mistral-nemo-bophades-12B
|
| 93 |
+
parameters:
|
| 94 |
+
t: [0.1, 0.2, 0.4, 0.6, 0.6, 0.4, 0.2, 0.1]
|
| 95 |
+
dtype: bfloat16
|
| 96 |
+
tokenizer_source: base
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
#### Hero:
|
| 100 |
+
```yaml
|
| 101 |
+
models:
|
| 102 |
+
- model: elinas/Chronos-Gold-12B-1.0
|
| 103 |
+
- model: Fizzarolli/MN-12b-Sunrose
|
| 104 |
+
merge_method: slerp
|
| 105 |
+
base_model: elinas/Chronos-Gold-12B-1.0
|
| 106 |
+
parameters:
|
| 107 |
+
t: [0.1, 0.2, 0.4, 0.6, 0.6, 0.4, 0.2, 0.1]
|
| 108 |
+
dtype: bfloat16
|
| 109 |
+
tokenizer_source: base
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
#### Deity:
|
| 113 |
+
```yaml
|
| 114 |
+
models:
|
| 115 |
+
- model: nbeerbower/mistral-nemo-gutenberg-12B-v4
|
| 116 |
+
- model: anthracite-org/magnum-12b-v2.5-kto
|
| 117 |
+
merge_method: slerp
|
| 118 |
+
base_model: nbeerbower/mistral-nemo-gutenberg-12B-v4
|
| 119 |
+
parameters:
|
| 120 |
+
t: [0, 0.1, 0.2, 0.25, 0.25, 0.2, 0.1, 0]
|
| 121 |
+
dtype: bfloat16
|
| 122 |
+
tokenizer_source: base
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
#### Mag Mell:
|
| 126 |
+
```yaml
|
| 127 |
+
models:
|
| 128 |
+
- model: monk
|
| 129 |
+
parameters:
|
| 130 |
+
density: 0.7
|
| 131 |
+
weight: 0.5
|
| 132 |
+
- model: hero
|
| 133 |
+
parameters:
|
| 134 |
+
density: 0.9
|
| 135 |
+
weight: 1
|
| 136 |
+
- model: deity
|
| 137 |
+
parameters:
|
| 138 |
+
density: 0.5
|
| 139 |
+
weight: 0.7
|
| 140 |
+
merge_method: dare_ties
|
| 141 |
+
base_model: IntervitensInc/Mistral-Nemo-Base-2407-chatml
|
| 142 |
+
tokenizer_source: base
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
`In Irish mythology, Mag Mell (modern spelling: Magh Meall, meaning 'delightful plain') is one of the names for the Celtic Otherworld, a mythical realm achievable through death and/or glory... Never explicitly stated in any surviving mythological account to be an afterlife; rather, it is usually portrayed as a paradise populated by deities, which is occasionally visited by some adventurous mortals. In its island guise, it was visited by various legendary Irish heroes and monks, forming the basis of the adventure myth or echtrae...`","{""id"": ""inflatebot/MN-12B-Mag-Mell-R1"", ""author"": ""inflatebot"", ""sha"": ""b5f9f348c2e43dc8c862b2d4ab1521e3256696fc"", ""last_modified"": ""2025-04-03 00:43:44+00:00"", ""created_at"": ""2024-09-16 00:38:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3035, ""downloads_all_time"": null, ""likes"": 159, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mistral"", ""text-generation"", ""mergekit"", ""merge"", ""conversational"", ""arxiv:2311.03099"", ""arxiv:2306.01708"", ""base_model:Fizzarolli/MN-12b-Sunrose"", ""base_model:merge:Fizzarolli/MN-12b-Sunrose"", ""base_model:IntervitensInc/Mistral-Nemo-Base-2407-chatml"", ""base_model:merge:IntervitensInc/Mistral-Nemo-Base-2407-chatml"", ""base_model:anthracite-org/magnum-v2.5-12b-kto"", ""base_model:merge:anthracite-org/magnum-v2.5-12b-kto"", ""base_model:elinas/Chronos-Gold-12B-1.0"", ""base_model:merge:elinas/Chronos-Gold-12B-1.0"", ""base_model:nbeerbower/mistral-nemo-bophades-12B"", ""base_model:merge:nbeerbower/mistral-nemo-bophades-12B"", ""base_model:nbeerbower/mistral-nemo-gutenberg-12B-v4"", ""base_model:merge:nbeerbower/mistral-nemo-gutenberg-12B-v4"", ""base_model:nbeerbower/mistral-nemo-wissenschaft-12B"", ""base_model:merge:nbeerbower/mistral-nemo-wissenschaft-12B"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- IntervitensInc/Mistral-Nemo-Base-2407-chatml\n- nbeerbower/mistral-nemo-bophades-12B\n- nbeerbower/mistral-nemo-wissenschaft-12B\n- elinas/Chronos-Gold-12B-1.0\n- Fizzarolli/MN-12b-Sunrose\n- nbeerbower/mistral-nemo-gutenberg-12B-v4\n- anthracite-org/magnum-12b-v2.5-kto\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='magmell.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""featherless-ai/try-this-model"", ""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""SC999/NV_Nemotron"", ""JackHoltone/try-this-model"", ""k11112/try-this-model"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 12247782400}, ""total"": 12247782400}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-03 00:43:44+00:00"", ""cardData"": ""base_model:\n- IntervitensInc/Mistral-Nemo-Base-2407-chatml\n- nbeerbower/mistral-nemo-bophades-12B\n- nbeerbower/mistral-nemo-wissenschaft-12B\n- elinas/Chronos-Gold-12B-1.0\n- Fizzarolli/MN-12b-Sunrose\n- nbeerbower/mistral-nemo-gutenberg-12B-v4\n- anthracite-org/magnum-12b-v2.5-kto\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66e77e198aa82e56feb0bfef"", ""modelId"": ""inflatebot/MN-12B-Mag-Mell-R1"", ""usedStorage"": 24497618465}",0,"https://huggingface.co/Frowning/Mag-Mell-R1-21B, https://huggingface.co/SubMaroon/MN-12B-Mag-Mell-R1-SODOM-v1",2,"https://huggingface.co/Alfitaria/mn-inf-qlora-mm, https://huggingface.co/SubMaroon/120-Days-Of-QLoRa, https://huggingface.co/Nitrals-Loras/MM-t1-lora",3,"https://huggingface.co/inflatebot/MN-12B-Mag-Mell-R1-GGUF, https://huggingface.co/mradermacher/MN-12B-Mag-Mell-R1-GGUF, https://huggingface.co/Triangle104/MN-12B-Mag-Mell-R1-Q6_K-GGUF, https://huggingface.co/mradermacher/MN-12B-Mag-Mell-R1-i1-GGUF, https://huggingface.co/Alfitaria/MN-12B-Mag-Mell-R1-exl2, https://huggingface.co/Lewdiculous/MN-12B-Mag-Mell-R1-GGUF-IQ-ARM-Imatrix, https://huggingface.co/bartowski/MN-12B-Mag-Mell-R1-GGUF, https://huggingface.co/Triangle104/MN-12B-Mag-Mell-R1-Q4_K_S-GGUF, https://huggingface.co/Triangle104/MN-12B-Mag-Mell-R1-Q4_K_M-GGUF, https://huggingface.co/Triangle104/MN-12B-Mag-Mell-R1-Q5_K_S-GGUF, https://huggingface.co/Triangle104/MN-12B-Mag-Mell-R1-Q5_K_M-GGUF, https://huggingface.co/Triangle104/MN-12B-Mag-Mell-R1-Q8_0-GGUF, https://huggingface.co/featherless-ai-quants/inflatebot-MN-12B-Mag-Mell-R1-GGUF, https://huggingface.co/tensorblock/MN-12B-Mag-Mell-R1-GGUF, https://huggingface.co/UniLLMer/MagMelKaa, https://huggingface.co/Rivaidan/MN-12B-Mag-Mell-R1-Q8_0-GGUF, https://huggingface.co/DevQuasar/inflatebot.MN-12B-Mag-Mell-R1-GGUF, https://huggingface.co/janboe91/MN-12B-Mag-Mell-R1-Q8-mlx, https://huggingface.co/janboe91/MN-12B-Mag-Mell-R1-Q6-mlx, https://huggingface.co/huggingkot/MN-12B-Mag-Mell-R1-q4f16_1-MLC, https://huggingface.co/daupaloffer/MN-12B-Mag-Mell-R1-mlx-4Bit",21,"https://huggingface.co/yamatazen/Twilight-SCE-12B-v2, https://huggingface.co/grimjim/MagSoup-v1-12B, https://huggingface.co/AIvel/AnotherOne-Unslop-Mell-12B, https://huggingface.co/yamatazen/Twilight-SCE-12B, https://huggingface.co/redrix/patricide-12B-Unslop-Mell, https://huggingface.co/redrix/AngelSlayer-12B-Unslop-Mell-RPMax-DARKNESS, https://huggingface.co/redrix/nepoticide-12B-Unslop-Unleashed-Mell-RPMax-v2, https://huggingface.co/redrix/patricide-12B-Unslop-Mell-v2, https://huggingface.co/redrix/fratricide-12B-Unslop-Mell-DARKNESS, https://huggingface.co/redrix/AngelSlayer-12B-Unslop-Mell-RPMax-DARKNESS-v2, https://huggingface.co/redrix/AngelSlayer-12B-Unslop-Mell-RPMax-DARKNESS-v3, https://huggingface.co/redrix/wuriaee-12B-schizostock, https://huggingface.co/redrix/sororicide-12B-Farer-Mell-Unslop, https://huggingface.co/DoppelReflEx/MN-12B-Mimicore-GreenSnake, https://huggingface.co/KatyTheCutie/Repose-12B, https://huggingface.co/Skarmorie/Mag-Mell-RU-035, https://huggingface.co/Aleteian/On-the-Strange-Lands-MN-12B, https://huggingface.co/mergekit-community/MN-Sappho-c-12B, https://huggingface.co/KatyTheCutie/Repose-V2-A2, https://huggingface.co/KatyTheCutie/Repose-V2-6O, https://huggingface.co/KatyTheCutie/Repose-V2-2B, https://huggingface.co/yamatazen/Ayla-Light-12B-v2, https://huggingface.co/Uncanned/Cammell-Twilight-v0.2-12B, https://huggingface.co/mergekit-community/MN-Sappho-g-12B, https://huggingface.co/mergekit-community/MN-Sappho-g2-12B, https://huggingface.co/yamatazen/EtherealLight-12B, https://huggingface.co/mergekit-community/MN-Sappho-k-12B, https://huggingface.co/yamatazen/Eris-Light-12B, https://huggingface.co/mergekit-community/MN-Sappho-l-12B, https://huggingface.co/mergekit-community/MN-Sappho-g3-12B, https://huggingface.co/yamatazen/Aurora-SCE-12B, https://huggingface.co/yamatazen/Aurora-SCE-12B-v2, https://huggingface.co/mergekit-community/MN-Sappho-n-12B, https://huggingface.co/mergekit-community/MN-Sappho-n2-12B, https://huggingface.co/yamatazen/EtherealMoon-12B, https://huggingface.co/yamatazen/NightWind-12B, https://huggingface.co/yamatazen/ElvenMaid-12B, https://huggingface.co/yamatazen/ElvenMaid-12B-v2, https://huggingface.co/grimjim/MagnaMellRei-v1-12B, https://huggingface.co/yamatazen/BlueLight-12B, https://huggingface.co/yamatazen/StarrySky-12B, https://huggingface.co/grimjim/Magnolia-v8-12B, https://huggingface.co/grimjim/Magnolia-v9-12B, https://huggingface.co/Khetterman/AbominationScience-12B-v4, https://huggingface.co/redrix/nepoticide-12B-Unslop-Unleashed-Mell-RPMax, https://huggingface.co/GenericMatter/ArliArli-Meg, https://huggingface.co/mergekit-community/mergekit-ties-ykqemwr, https://huggingface.co/Skarmorie/MagMell-GnR-Roc-12b, https://huggingface.co/mergekit-community/MN-Sappho-h-12B, https://huggingface.co/mergekit-community/MN-Sappho-i-12B, https://huggingface.co/mergekit-community/MN-Sappho-n3-12B, https://huggingface.co/mergekit-community/UnslopNemo-Mag-Mell_T-1, https://huggingface.co/ChaoticNeutrals/Community_Request-02-12B, https://huggingface.co/dutti/Ascal-rt.11, https://huggingface.co/Nitral-Archive/Violet_MagCap-Rebase-12B, https://huggingface.co/TomoDG/EtherealAurora-MN-Nemo-12B",56,"FallnAI/Quantize-HF-Models, JackHoltone/try-this-model, K00B404/LLM_Quantization, KBaba7/Quant, SC999/NV_Nemotron, bhaskartripathi/LLM_Quantization, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/118, k11112/try-this-model, ruslanmv/convert_to_gguf, totolook/Quant",11
|
| 147 |
+
Frowning/Mag-Mell-R1-21B,"---
|
| 148 |
+
base_model:
|
| 149 |
+
- inflatebot/MN-12B-Mag-Mell-R1
|
| 150 |
+
library_name: transformers
|
| 151 |
+
tags:
|
| 152 |
+
- mergekit
|
| 153 |
+
- merge
|
| 154 |
+
|
| 155 |
+
---
|
| 156 |
+
# merge
|
| 157 |
+
|
| 158 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 159 |
+
|
| 160 |
+
## Merge Details
|
| 161 |
+
### Merge Method
|
| 162 |
+
|
| 163 |
+
This model was merged using the passthrough merge method.
|
| 164 |
+
|
| 165 |
+
### Models Merged
|
| 166 |
+
|
| 167 |
+
The following models were included in the merge:
|
| 168 |
+
* [inflatebot/MN-12B-Mag-Mell-R1](https://huggingface.co/inflatebot/MN-12B-Mag-Mell-R1)
|
| 169 |
+
|
| 170 |
+
### Configuration
|
| 171 |
+
|
| 172 |
+
The following YAML configuration was used to produce this model:
|
| 173 |
+
|
| 174 |
+
```yaml
|
| 175 |
+
dtype: bfloat16
|
| 176 |
+
merge_method: passthrough
|
| 177 |
+
slices:
|
| 178 |
+
- sources:
|
| 179 |
+
- layer_range: [0, 30]
|
| 180 |
+
model: inflatebot/MN-12B-Mag-Mell-R1
|
| 181 |
+
- sources:
|
| 182 |
+
- layer_range: [16, 32]
|
| 183 |
+
model: inflatebot/MN-12B-Mag-Mell-R1
|
| 184 |
+
parameters:
|
| 185 |
+
scale:
|
| 186 |
+
- filter: o_proj
|
| 187 |
+
value: 0.0
|
| 188 |
+
- filter: down_proj
|
| 189 |
+
value: 0.0
|
| 190 |
+
- value: 1.0
|
| 191 |
+
- sources:
|
| 192 |
+
- layer_range: [16, 32]
|
| 193 |
+
model: inflatebot/MN-12B-Mag-Mell-R1
|
| 194 |
+
parameters:
|
| 195 |
+
scale:
|
| 196 |
+
- filter: o_proj
|
| 197 |
+
value: 0.0
|
| 198 |
+
- filter: down_proj
|
| 199 |
+
value: 0.0
|
| 200 |
+
- value: 1.0
|
| 201 |
+
- sources:
|
| 202 |
+
- layer_range: [32, 40]
|
| 203 |
+
model: inflatebot/MN-12B-Mag-Mell-R1
|
| 204 |
+
|
| 205 |
+
```
|
| 206 |
+
","{""id"": ""Frowning/Mag-Mell-R1-21B"", ""author"": ""Frowning"", ""sha"": ""291fc3ae0daa324c02549964dfd3e35e759ec04b"", ""last_modified"": ""2024-12-10 05:09:25+00:00"", ""created_at"": ""2024-12-10 05:02:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 27, ""downloads_all_time"": null, ""likes"": 4, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mistral"", ""text-generation"", ""mergekit"", ""merge"", ""conversational"", ""base_model:inflatebot/MN-12B-Mag-Mell-R1"", ""base_model:finetune:inflatebot/MN-12B-Mag-Mell-R1"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- inflatebot/MN-12B-Mag-Mell-R1\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 20426982400}, ""total"": 20426982400}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-10 05:09:25+00:00"", ""cardData"": ""base_model:\n- inflatebot/MN-12B-Mag-Mell-R1\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6757cb7ea5e246162dfa446b"", ""modelId"": ""Frowning/Mag-Mell-R1-21B"", ""usedStorage"": 40871116896}",1,,0,,0,"https://huggingface.co/mradermacher/Mag-Mell-R1-21B-GGUF, https://huggingface.co/mradermacher/Mag-Mell-R1-21B-i1-GGUF, https://huggingface.co/Frowning/Mag-Mell-R1-21B-Q5_K_M-GGUF",3,,0,huggingface/InferenceSupport/discussions/new?title=Frowning/Mag-Mell-R1-21B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BFrowning%2FMag-Mell-R1-21B%5D(%2FFrowning%2FMag-Mell-R1-21B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 207 |
+
SubMaroon/MN-12B-Mag-Mell-R1-SODOM-v1,"---
|
| 208 |
+
library_name: transformers
|
| 209 |
+
tags:
|
| 210 |
+
- not-for-all-audiences
|
| 211 |
+
base_model:
|
| 212 |
+
- inflatebot/MN-12B-Mag-Mell-R1
|
| 213 |
+
---
|
| 214 |
+
|
| 215 |
+
# Model Card for Model ID
|
| 216 |
+
|
| 217 |
+
This model was fine-tuned on the book ""120 Days of Sodom"". Need to test!
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
## Model Details
|
| 221 |
+
|
| 222 |
+
The model was fine-tuned on the raw text of a book translated by an open source project.
|
| 223 |
+
|
| 224 |
+
- **Dataset for fine-tuning:** [Internet Archive](https://archive.org/details/the120daysofsodom)","{""id"": ""SubMaroon/MN-12B-Mag-Mell-R1-SODOM-v1"", ""author"": ""SubMaroon"", ""sha"": ""8826a3bfe9f1b3b9fa9c053a70180ab2735331cf"", ""last_modified"": ""2025-02-04 17:36:37+00:00"", ""created_at"": ""2025-02-02 09:45:40+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mistral"", ""text-generation"", ""not-for-all-audiences"", ""conversational"", ""base_model:inflatebot/MN-12B-Mag-Mell-R1"", ""base_model:finetune:inflatebot/MN-12B-Mag-Mell-R1"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- inflatebot/MN-12B-Mag-Mell-R1\nlibrary_name: transformers\ntags:\n- not-for-all-audiences"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|im_end|>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 12247782400}, ""total"": 12247782400}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-04 17:36:37+00:00"", ""cardData"": ""base_model:\n- inflatebot/MN-12B-Mag-Mell-R1\nlibrary_name: transformers\ntags:\n- not-for-all-audiences"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""679f3ec43ea3e21df639a6da"", ""modelId"": ""SubMaroon/MN-12B-Mag-Mell-R1-SODOM-v1"", ""usedStorage"": 119810671840}",1,,0,,0,"https://huggingface.co/mradermacher/MN-12B-Mag-Mell-R1-SODOM-v1-GGUF, https://huggingface.co/mradermacher/MN-12B-Mag-Mell-R1-SODOM-v1-i1-GGUF",2,https://huggingface.co/mergekit-community/mergekit-slerp-cxqqrgr,1,,0
|
MaskGCT_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
amphion/MaskGCT,"---
|
| 3 |
+
license: cc-by-nc-4.0
|
| 4 |
+
datasets:
|
| 5 |
+
- amphion/Emilia-Dataset
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
- zh
|
| 9 |
+
- ko
|
| 10 |
+
- ja
|
| 11 |
+
- fr
|
| 12 |
+
- de
|
| 13 |
+
base_model:
|
| 14 |
+
- amphion/MaskGCT
|
| 15 |
+
pipeline_tag: text-to-speech
|
| 16 |
+
---
|
| 17 |
+
## MaskGCT: Zero-Shot Text-to-Speech with Masked Generative Codec Transformer
|
| 18 |
+
|
| 19 |
+
[](https://arxiv.org/abs/2409.00750) [](https://huggingface.co/amphion/maskgct) [](https://huggingface.co/spaces/amphion/maskgct) [](https://github.com/open-mmlab/Amphion/tree/main/models/tts/maskgct)
|
| 20 |
+
|
| 21 |
+
## Quickstart
|
| 22 |
+
|
| 23 |
+
**Clone and install**
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
git clone https://github.com/open-mmlab/Amphion.git
|
| 27 |
+
# create env
|
| 28 |
+
bash ./models/tts/maskgct/env.sh
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
**Model download**
|
| 32 |
+
|
| 33 |
+
We provide the following pretrained checkpoints:
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
| Model Name | Description |
|
| 37 |
+
|-------------------|-------------|
|
| 38 |
+
| [Semantic Codec](https://huggingface.co/amphion/MaskGCT/tree/main/semantic_codec) | Converting speech to semantic tokens. |
|
| 39 |
+
| [Acoustic Codec](https://huggingface.co/amphion/MaskGCT/tree/main/acoustic_codec) | Converting speech to acoustic tokens and reconstructing waveform from acoustic tokens. |
|
| 40 |
+
| [MaskGCT-T2S](https://huggingface.co/amphion/MaskGCT/tree/main/t2s_model) | Predicting semantic tokens with text and prompt semantic tokens. |
|
| 41 |
+
| [MaskGCT-S2A](https://huggingface.co/amphion/MaskGCT/tree/main/s2a_model) | Predicts acoustic tokens conditioned on semantic tokens. |
|
| 42 |
+
|
| 43 |
+
You can download all pretrained checkpoints from [HuggingFace](https://huggingface.co/amphion/MaskGCT/tree/main) or use huggingface api.
|
| 44 |
+
|
| 45 |
+
```python
|
| 46 |
+
from huggingface_hub import hf_hub_download
|
| 47 |
+
|
| 48 |
+
# download semantic codec ckpt
|
| 49 |
+
semantic_code_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""semantic_codec/model.safetensors"")
|
| 50 |
+
|
| 51 |
+
# download acoustic codec ckpt
|
| 52 |
+
codec_encoder_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""acoustic_codec/model.safetensors"")
|
| 53 |
+
codec_decoder_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""acoustic_codec/model_1.safetensors"")
|
| 54 |
+
|
| 55 |
+
# download t2s model ckpt
|
| 56 |
+
t2s_model_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""t2s_model/model.safetensors"")
|
| 57 |
+
|
| 58 |
+
# download s2a model ckpt
|
| 59 |
+
s2a_1layer_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""s2a_model/s2a_model_1layer/model.safetensors"")
|
| 60 |
+
s2a_full_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""s2a_model/s2a_model_full/model.safetensors"")
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
**Basic Usage**
|
| 64 |
+
|
| 65 |
+
You can use the following code to generate speech from text and a prompt speech.
|
| 66 |
+
```python
|
| 67 |
+
from models.tts.maskgct.maskgct_utils import *
|
| 68 |
+
from huggingface_hub import hf_hub_download
|
| 69 |
+
import safetensors
|
| 70 |
+
import soundfile as sf
|
| 71 |
+
|
| 72 |
+
if __name__ == ""__main__"":
|
| 73 |
+
|
| 74 |
+
# build model
|
| 75 |
+
device = torch.device(""cuda:0"")
|
| 76 |
+
cfg_path = ""./models/tts/maskgct/config/maskgct.json""
|
| 77 |
+
cfg = load_config(cfg_path)
|
| 78 |
+
# 1. build semantic model (w2v-bert-2.0)
|
| 79 |
+
semantic_model, semantic_mean, semantic_std = build_semantic_model(device)
|
| 80 |
+
# 2. build semantic codec
|
| 81 |
+
semantic_codec = build_semantic_codec(cfg.model.semantic_codec, device)
|
| 82 |
+
# 3. build acoustic codec
|
| 83 |
+
codec_encoder, codec_decoder = build_acoustic_codec(cfg.model.acoustic_codec, device)
|
| 84 |
+
# 4. build t2s model
|
| 85 |
+
t2s_model = build_t2s_model(cfg.model.t2s_model, device)
|
| 86 |
+
# 5. build s2a model
|
| 87 |
+
s2a_model_1layer = build_s2a_model(cfg.model.s2a_model.s2a_1layer, device)
|
| 88 |
+
s2a_model_full = build_s2a_model(cfg.model.s2a_model.s2a_full, device)
|
| 89 |
+
|
| 90 |
+
# download checkpoint
|
| 91 |
+
...
|
| 92 |
+
|
| 93 |
+
# load semantic codec
|
| 94 |
+
safetensors.torch.load_model(semantic_codec, semantic_code_ckpt)
|
| 95 |
+
# load acoustic codec
|
| 96 |
+
safetensors.torch.load_model(codec_encoder, codec_encoder_ckpt)
|
| 97 |
+
safetensors.torch.load_model(codec_decoder, codec_decoder_ckpt)
|
| 98 |
+
# load t2s model
|
| 99 |
+
safetensors.torch.load_model(t2s_model, t2s_model_ckpt)
|
| 100 |
+
# load s2a model
|
| 101 |
+
safetensors.torch.load_model(s2a_model_1layer, s2a_1layer_ckpt)
|
| 102 |
+
safetensors.torch.load_model(s2a_model_full, s2a_full_ckpt)
|
| 103 |
+
|
| 104 |
+
# inference
|
| 105 |
+
prompt_wav_path = ""./models/tts/maskgct/wav/prompt.wav""
|
| 106 |
+
save_path = ""[YOUR SAVE PATH]""
|
| 107 |
+
prompt_text = "" We do not break. We never give in. We never back down.""
|
| 108 |
+
target_text = ""In this paper, we introduce MaskGCT, a fully non-autoregressive TTS model that eliminates the need for explicit alignment information between text and speech supervision.""
|
| 109 |
+
# Specify the target duration (in seconds). If target_len = None, we use a simple rule to predict the target duration.
|
| 110 |
+
target_len = 18
|
| 111 |
+
|
| 112 |
+
maskgct_inference_pipeline = MaskGCT_Inference_Pipeline(
|
| 113 |
+
semantic_model,
|
| 114 |
+
semantic_codec,
|
| 115 |
+
codec_encoder,
|
| 116 |
+
codec_decoder,
|
| 117 |
+
t2s_model,
|
| 118 |
+
s2a_model_1layer,
|
| 119 |
+
s2a_model_full,
|
| 120 |
+
semantic_mean,
|
| 121 |
+
semantic_std,
|
| 122 |
+
device,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
recovered_audio = maskgct_inference_pipeline.maskgct_inference(
|
| 126 |
+
prompt_wav_path, prompt_text, target_text, ""en"", ""en"", target_len=target_len
|
| 127 |
+
)
|
| 128 |
+
sf.write(save_path, recovered_audio, 24000)
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
**Training Dataset**
|
| 132 |
+
|
| 133 |
+
We use the [Emilia](https://huggingface.co/datasets/amphion/Emilia-Dataset) dataset to train our models. Emilia is a multilingual and diverse in-the-wild speech dataset designed for large-scale speech generation. In this work, we use English and Chinese data from Emilia, each with 50K hours of speech (totaling 100K hours).
|
| 134 |
+
|
| 135 |
+
**Citation**
|
| 136 |
+
|
| 137 |
+
If you use MaskGCT in your research, please cite the following paper:
|
| 138 |
+
```bibtex
|
| 139 |
+
@article{wang2024maskgct,
|
| 140 |
+
title={MaskGCT: Zero-Shot Text-to-Speech with Masked Generative Codec Transformer},
|
| 141 |
+
author={Wang, Yuancheng and Zhan, Haoyue and Liu, Liwei and Zeng, Ruihong and Guo, Haotian and Zheng, Jiachen and Zhang, Qiang and Zhang, Xueyao and Zhang, Shunsi and Wu, Zhizheng},
|
| 142 |
+
journal={arXiv preprint arXiv:2409.00750},
|
| 143 |
+
year={2024}
|
| 144 |
+
}
|
| 145 |
+
@inproceedings{amphion,
|
| 146 |
+
author={Zhang, Xueyao and Xue, Liumeng and Gu, Yicheng and Wang, Yuancheng and Li, Jiaqi and He, Haorui and Wang, Chaoren and Song, Ting and Chen, Xi and Fang, Zihao and Chen, Haopeng and Zhang, Junan and Tang, Tze Ying and Zou, Lexiao and Wang, Mingxuan and Han, Jun and Chen, Kai and Li, Haizhou and Wu, Zhizheng},
|
| 147 |
+
title={Amphion: An Open-Source Audio, Music and Speech Generation Toolkit},
|
| 148 |
+
booktitle={{IEEE} Spoken Language Technology Workshop, {SLT} 2024},
|
| 149 |
+
year={2024}
|
| 150 |
+
}
|
| 151 |
+
```","{""id"": ""amphion/MaskGCT"", ""author"": ""amphion"", ""sha"": ""265c6cef07625665d0c28d2faafb1415562379dc"", ""last_modified"": ""2025-04-13 06:09:22+00:00"", ""created_at"": ""2024-10-13 16:42:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 77, ""downloads_all_time"": null, ""likes"": 286, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""text-to-speech"", ""en"", ""zh"", ""ko"", ""ja"", ""fr"", ""de"", ""dataset:amphion/Emilia-Dataset"", ""arxiv:2409.00750"", ""base_model:amphion/MaskGCT"", ""base_model:finetune:amphion/MaskGCT"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- amphion/MaskGCT\ndatasets:\n- amphion/Emilia-Dataset\nlanguage:\n- en\n- zh\n- ko\n- ja\n- fr\n- de\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-speech"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='acoustic_codec/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='acoustic_codec/model_1.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='s2a_model/s2a_model_1layer/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='s2a_model/s2a_model_full/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='semantic_codec/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2s_model/model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Pendrokar/TTS-Spaces-Arena"", ""amphion/maskgct"", ""fffiloni/tts-hallo-talking-portrait"", ""fffiloni/EchoMimic"", ""cocktailpeanut/maskgct"", ""Svngoku/maskgct-audio-lab"", ""ordinaryaccount/VoiceMark"", ""hasan93/hallo-talking-face"", ""raoyonghui/maskgct"", ""zjc1617018/maskgct"", ""mantrakp/maskgct"", ""lijiacai/maskgct"", ""RyanCc/tts-hallo-talking-portrait"", ""Nymbo/EchoMimic"", ""shelbao/maskgct"", ""ishandutta2007/tts-hallo-talking-portrait"", ""slhlal/maskgct"", ""Kotrapulokalnego/tts-hallo-talking-portrait"", ""Hyathi/maskgct"", ""waloneai/tts-hallo-talking-portrait"", ""waloneai/Walonetts-hallo-talking-portrait"", ""prakssss/tts-hallo-talking-portrait"", ""halobang/maskgct""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-13 06:09:22+00:00"", ""cardData"": ""base_model:\n- amphion/MaskGCT\ndatasets:\n- amphion/Emilia-Dataset\nlanguage:\n- en\n- zh\n- ko\n- ja\n- fr\n- de\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-speech"", ""transformersInfo"": null, ""_id"": ""670bf85f348a47072ed37065"", ""modelId"": ""amphion/MaskGCT"", ""usedStorage"": 7326755616}",0,"https://huggingface.co/amphion/MaskGCT, https://huggingface.co/lizhen95366/MSAK",2,,0,,0,,0,"Pendrokar/TTS-Spaces-Arena, RyanCc/tts-hallo-talking-portrait, Svngoku/maskgct-audio-lab, amphion/maskgct, cocktailpeanut/maskgct, fffiloni/EchoMimic, fffiloni/tts-hallo-talking-portrait, hasan93/hallo-talking-face, huggingface/InferenceSupport/discussions/new?title=amphion/MaskGCT&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bamphion%2FMaskGCT%5D(%2Famphion%2FMaskGCT)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ordinaryaccount/VoiceMark, prakssss/tts-hallo-talking-portrait, raoyonghui/maskgct, zjc1617018/maskgct",13
|
| 152 |
+
lizhen95366/MSAK,"---
|
| 153 |
+
license: cc-by-nc-4.0
|
| 154 |
+
datasets:
|
| 155 |
+
- amphion/Emilia-Dataset
|
| 156 |
+
language:
|
| 157 |
+
- en
|
| 158 |
+
- zh
|
| 159 |
+
- ko
|
| 160 |
+
- ja
|
| 161 |
+
- fr
|
| 162 |
+
- de
|
| 163 |
+
base_model:
|
| 164 |
+
- amphion/MaskGCT
|
| 165 |
+
pipeline_tag: text-to-speech
|
| 166 |
+
---
|
| 167 |
+
## MaskGCT: Zero-Shot Text-to-Speech with Masked Generative Codec Transformer
|
| 168 |
+
|
| 169 |
+
[](https://arxiv.org/abs/2409.00750) [](https://huggingface.co/amphion/maskgct) [](https://huggingface.co/spaces/amphion/maskgct) [](https://github.com/open-mmlab/Amphion/tree/main/models/tts/maskgct)
|
| 170 |
+
|
| 171 |
+
## Quickstart
|
| 172 |
+
|
| 173 |
+
**Clone and install**
|
| 174 |
+
|
| 175 |
+
```bash
|
| 176 |
+
git clone https://github.com/open-mmlab/Amphion.git
|
| 177 |
+
# create env
|
| 178 |
+
bash ./models/tts/maskgct/env.sh
|
| 179 |
+
```
|
| 180 |
+
|
| 181 |
+
**Model download**
|
| 182 |
+
|
| 183 |
+
We provide the following pretrained checkpoints:
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
| Model Name | Description |
|
| 187 |
+
|-------------------|-------------|
|
| 188 |
+
| [Semantic Codec](https://huggingface.co/amphion/MaskGCT/tree/main/semantic_codec) | Converting speech to semantic tokens. |
|
| 189 |
+
| [Acoustic Codec](https://huggingface.co/amphion/MaskGCT/tree/main/acoustic_codec) | Converting speech to acoustic tokens and reconstructing waveform from acoustic tokens. |
|
| 190 |
+
| [MaskGCT-T2S](https://huggingface.co/amphion/MaskGCT/tree/main/t2s_model) | Predicting semantic tokens with text and prompt semantic tokens. |
|
| 191 |
+
| [MaskGCT-S2A](https://huggingface.co/amphion/MaskGCT/tree/main/s2a_model) | Predicts acoustic tokens conditioned on semantic tokens. |
|
| 192 |
+
|
| 193 |
+
You can download all pretrained checkpoints from [HuggingFace](https://huggingface.co/amphion/MaskGCT/tree/main) or use huggingface api.
|
| 194 |
+
|
| 195 |
+
```python
|
| 196 |
+
from huggingface_hub import hf_hub_download
|
| 197 |
+
|
| 198 |
+
# download semantic codec ckpt
|
| 199 |
+
semantic_code_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""semantic_codec/model.safetensors"")
|
| 200 |
+
|
| 201 |
+
# download acoustic codec ckpt
|
| 202 |
+
codec_encoder_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""acoustic_codec/model.safetensors"")
|
| 203 |
+
codec_decoder_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""acoustic_codec/model_1.safetensors"")
|
| 204 |
+
|
| 205 |
+
# download t2s model ckpt
|
| 206 |
+
t2s_model_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""t2s_model/model.safetensors"")
|
| 207 |
+
|
| 208 |
+
# download s2a model ckpt
|
| 209 |
+
s2a_1layer_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""s2a_model/s2a_model_1layer/model.safetensors"")
|
| 210 |
+
s2a_full_ckpt = hf_hub_download(""amphion/MaskGCT"", filename=""s2a_model/s2a_model_full/model.safetensors"")
|
| 211 |
+
```
|
| 212 |
+
|
| 213 |
+
**Basic Usage**
|
| 214 |
+
|
| 215 |
+
You can use the following code to generate speech from text and a prompt speech.
|
| 216 |
+
```python
|
| 217 |
+
from models.tts.maskgct.maskgct_utils import *
|
| 218 |
+
from huggingface_hub import hf_hub_download
|
| 219 |
+
import safetensors
|
| 220 |
+
import soundfile as sf
|
| 221 |
+
|
| 222 |
+
if __name__ == ""__main__"":
|
| 223 |
+
|
| 224 |
+
# build model
|
| 225 |
+
device = torch.device(""cuda:0"")
|
| 226 |
+
cfg_path = ""./models/tts/maskgct/config/maskgct.json""
|
| 227 |
+
cfg = load_config(cfg_path)
|
| 228 |
+
# 1. build semantic model (w2v-bert-2.0)
|
| 229 |
+
semantic_model, semantic_mean, semantic_std = build_semantic_model(device)
|
| 230 |
+
# 2. build semantic codec
|
| 231 |
+
semantic_codec = build_semantic_codec(cfg.model.semantic_codec, device)
|
| 232 |
+
# 3. build acoustic codec
|
| 233 |
+
codec_encoder, codec_decoder = build_acoustic_codec(cfg.model.acoustic_codec, device)
|
| 234 |
+
# 4. build t2s model
|
| 235 |
+
t2s_model = build_t2s_model(cfg.model.t2s_model, device)
|
| 236 |
+
# 5. build s2a model
|
| 237 |
+
s2a_model_1layer = build_s2a_model(cfg.model.s2a_model.s2a_1layer, device)
|
| 238 |
+
s2a_model_full = build_s2a_model(cfg.model.s2a_model.s2a_full, device)
|
| 239 |
+
|
| 240 |
+
# download checkpoint
|
| 241 |
+
...
|
| 242 |
+
|
| 243 |
+
# load semantic codec
|
| 244 |
+
safetensors.torch.load_model(semantic_codec, semantic_code_ckpt)
|
| 245 |
+
# load acoustic codec
|
| 246 |
+
safetensors.torch.load_model(codec_encoder, codec_encoder_ckpt)
|
| 247 |
+
safetensors.torch.load_model(codec_decoder, codec_decoder_ckpt)
|
| 248 |
+
# load t2s model
|
| 249 |
+
safetensors.torch.load_model(t2s_model, t2s_model_ckpt)
|
| 250 |
+
# load s2a model
|
| 251 |
+
safetensors.torch.load_model(s2a_model_1layer, s2a_1layer_ckpt)
|
| 252 |
+
safetensors.torch.load_model(s2a_model_full, s2a_full_ckpt)
|
| 253 |
+
|
| 254 |
+
# inference
|
| 255 |
+
prompt_wav_path = ""./models/tts/maskgct/wav/prompt.wav""
|
| 256 |
+
save_path = ""[YOUR SAVE PATH]""
|
| 257 |
+
prompt_text = "" We do not break. We never give in. We never back down.""
|
| 258 |
+
target_text = ""In this paper, we introduce MaskGCT, a fully non-autoregressive TTS model that eliminates the need for explicit alignment information between text and speech supervision.""
|
| 259 |
+
# Specify the target duration (in seconds). If target_len = None, we use a simple rule to predict the target duration.
|
| 260 |
+
target_len = 18
|
| 261 |
+
|
| 262 |
+
maskgct_inference_pipeline = MaskGCT_Inference_Pipeline(
|
| 263 |
+
semantic_model,
|
| 264 |
+
semantic_codec,
|
| 265 |
+
codec_encoder,
|
| 266 |
+
codec_decoder,
|
| 267 |
+
t2s_model,
|
| 268 |
+
s2a_model_1layer,
|
| 269 |
+
s2a_model_full,
|
| 270 |
+
semantic_mean,
|
| 271 |
+
semantic_std,
|
| 272 |
+
device,
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
recovered_audio = maskgct_inference_pipeline.maskgct_inference(
|
| 276 |
+
prompt_wav_path, prompt_text, target_text, ""en"", ""en"", target_len=target_len
|
| 277 |
+
)
|
| 278 |
+
sf.write(save_path, recovered_audio, 24000)
|
| 279 |
+
```
|
| 280 |
+
|
| 281 |
+
**Training Dataset**
|
| 282 |
+
|
| 283 |
+
We use the [Emilia](https://huggingface.co/datasets/amphion/Emilia-Dataset) dataset to train our models. Emilia is a multilingual and diverse in-the-wild speech dataset designed for large-scale speech generation. In this work, we use English and Chinese data from Emilia, each with 50K hours of speech (totaling 100K hours).
|
| 284 |
+
|
| 285 |
+
**Citation**
|
| 286 |
+
|
| 287 |
+
If you use MaskGCT in your research, please cite the following paper:
|
| 288 |
+
```bibtex
|
| 289 |
+
@article{wang2024maskgct,
|
| 290 |
+
title={MaskGCT: Zero-Shot Text-to-Speech with Masked Generative Codec Transformer},
|
| 291 |
+
author={Wang, Yuancheng and Zhan, Haoyue and Liu, Liwei and Zeng, Ruihong and Guo, Haotian and Zheng, Jiachen and Zhang, Qiang and Zhang, Xueyao and Zhang, Shunsi and Wu, Zhizheng},
|
| 292 |
+
journal={arXiv preprint arXiv:2409.00750},
|
| 293 |
+
year={2024}
|
| 294 |
+
}
|
| 295 |
+
@inproceedings{amphion,
|
| 296 |
+
author={Zhang, Xueyao and Xue, Liumeng and Gu, Yicheng and Wang, Yuancheng and Li, Jiaqi and He, Haorui and Wang, Chaoren and Song, Ting and Chen, Xi and Fang, Zihao and Chen, Haopeng and Zhang, Junan and Tang, Tze Ying and Zou, Lexiao and Wang, Mingxuan and Han, Jun and Chen, Kai and Li, Haizhou and Wu, Zhizheng},
|
| 297 |
+
title={Amphion: An Open-Source Audio, Music and Speech Generation Toolkit},
|
| 298 |
+
booktitle={{IEEE} Spoken Language Technology Workshop, {SLT} 2024},
|
| 299 |
+
year={2024}
|
| 300 |
+
}
|
| 301 |
+
```","{""id"": ""lizhen95366/MSAK"", ""author"": ""lizhen95366"", ""sha"": ""3098898a5f7757024ff8b54faaa84ac0dea22856"", ""last_modified"": ""2024-10-29 02:37:12+00:00"", ""created_at"": ""2024-10-29 02:29:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""text-to-speech"", ""en"", ""zh"", ""ko"", ""ja"", ""fr"", ""de"", ""dataset:amphion/Emilia-Dataset"", ""arxiv:2409.00750"", ""base_model:amphion/MaskGCT"", ""base_model:finetune:amphion/MaskGCT"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- amphion/MaskGCT\ndatasets:\n- amphion/Emilia-Dataset\nlanguage:\n- en\n- zh\n- ko\n- ja\n- fr\n- de\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-speech"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-29 02:37:12+00:00"", ""cardData"": ""base_model:\n- amphion/MaskGCT\ndatasets:\n- amphion/Emilia-Dataset\nlanguage:\n- en\n- zh\n- ko\n- ja\n- fr\n- de\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-speech"", ""transformersInfo"": null, ""_id"": ""6720489893f3d8192f102a21"", ""modelId"": ""lizhen95366/MSAK"", ""usedStorage"": 0}",1,,0,,0,,0,,0,"amphion/maskgct, huggingface/InferenceSupport/discussions/new?title=lizhen95366/MSAK&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blizhen95366%2FMSAK%5D(%2Flizhen95366%2FMSAK)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
Meta-Llama-3-8B_finetunes_20250422_180448.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Midjourney_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Jovie/Midjourney,"---
|
| 3 |
+
tags:
|
| 4 |
+
- flux
|
| 5 |
+
- text-to-image
|
| 6 |
+
- lora
|
| 7 |
+
- diffusers
|
| 8 |
+
- fal
|
| 9 |
+
base_model: black-forest-labs/FLUX.1-dev
|
| 10 |
+
instance_prompt: null
|
| 11 |
+
license: other
|
| 12 |
+
license_name: flux-1-dev-non-commercial-license
|
| 13 |
+
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
|
| 14 |
+
widget:
|
| 15 |
+
- text: >-
|
| 16 |
+
A cute blonde woman in bikini and her doge are sitting on a couch cuddling
|
| 17 |
+
and the expressive, stylish living room scene with a playful twist. The room
|
| 18 |
+
is painted in a soothing turquoise color scheme, stylish living room scene
|
| 19 |
+
bathed in a cool, textured turquoise blanket and adorned with several
|
| 20 |
+
matching turquoise throw pillows. The room's color scheme is predominantly
|
| 21 |
+
turquoise, relaxed demeanor. The couch is covered in a soft, reflecting
|
| 22 |
+
light and adding to the vibrant blue hue., dark room with a sleek, spherical
|
| 23 |
+
gold decorations, This photograph captures a scene that is whimsically
|
| 24 |
+
styled in a vibrant, reflective cyan sunglasses. The dog's expression is
|
| 25 |
+
cheerful, metallic fabric sofa. The dog, soothing atmosphere.
|
| 26 |
+
output:
|
| 27 |
+
url: images/example_wilzbmf24.png
|
| 28 |
+
- text: >-
|
| 29 |
+
Luxe Enchantment: “A young woman in her 20s is captured in a dreamlike,
|
| 30 |
+
cinematic portrait, dressed in extravagant, layered fashion with rich,
|
| 31 |
+
jewel-toned fabrics and ornate embellishments. The high-contrast lighting
|
| 32 |
+
creates depth, casting dramatic shadows that highlight her smooth skin and
|
| 33 |
+
exquisite facial features. The shot exudes both sophistication and
|
| 34 |
+
sensuality, with professional retouching that enhances every intricate
|
| 35 |
+
detail of her attire. Her presence is magnetic, commanding attention through
|
| 36 |
+
the lush, opulent visuals that define her high-fashion look.”
|
| 37 |
+
output:
|
| 38 |
+
url: images/example_wbu24jq33.png
|
| 39 |
+
- text: ""In a surreal and cosmic confrontation, a figure gazes upward toward an imposing, liquid-like entity that seems to emerge from the darkness itself. The composition is stark, with the human figure positioned at the bottom right, dwarfed by the overwhelming presence of the otherworldly being that dominates the upper left. The background is a pure, endless black, intensifying the isolation and drama of the scene. The human figure, seemingly feminine, is rendered in soft, warm tones, their skin catching faint highlights. They wear a loose, slightly wrinkled white shirt, its fabric textured and realistic, subtly illuminated as if from a dim, unseen light source. Their head is tilted back, exposing their neck in a vulnerable pose, while their curly black hair cascades downward, adding dynamic texture. A trickle of bright red blood runs from their eyes, streaking down their face, evoking both pain and transcendence. Their expression is one of awe or terror, mouth slightly open, as if caught mid-breath or mid-scream. Their outstretched hand is delicate, fingers frozen mid-motion, as though reaching toward or recoiling from the massive entity above. The dominating presence of the entity above is an abstract, amorphous mass, its form resembling molten metal or liquid mercury. The surface of the creature is highly reflective, capturing faint rainbows of refracted light that streak across its silvery-black exterior, breaking the monochrome void with eerie vibrancy. Its face emerges from the swirling mass, grotesque and menacing, with exaggerated features: deep-set eyes, a snarling mouth full of jagged teeth, and sharp, angular contours that suggest both predatory intent and alien beauty. The creature appears both fluid and solid, with parts of its body stretching and twisting as if in motion, extending its influence over the scene. The interplay of textures is strikingâ\x80\x94the sharp, metallic sheen of the creatureâ\x80\x99s form contrasts heavily with the softness of the human figureâ\x80\x99s hair, skin, and clothing. The lighting emphasizes this contrast, with the creature glowing faintly from within, its surface rippling with unnatural energy, while the figure below is lit more subtly, grounding them in human vulnerability. The absence of a defined environment enhances the existential tension of the image, as if this encounter is taking place in a void or beyond reality itself. Themes of submission, fear, and cosmic insignificance permeate the composition, with the creatureâ\x80\x99s overwhelming scale and surreal presence dwarfing the human figure, evoking feelings of awe and helplessness. The aesthetic combines elements of cosmic horror, surrealism, and anime-inspired drama, creating a moment that feels frozen in timeâ\x80\x94both deeply personal and universally vast. The image captures the essence of a climactic confrontation, leaving the viewer to question whether this is a moment of transcendence, destruction, or something far beyond comprehension. anime, cyberpunk style""
|
| 40 |
+
output:
|
| 41 |
+
url: images/example_ubv32i84d.png
|
| 42 |
+
|
| 43 |
+
---
|
| 44 |
+
# mj model style
|
| 45 |
+
|
| 46 |
+
<Gallery />
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
## Model description
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
## Trigger words
|
| 55 |
+
|
| 56 |
+
You should use `` to trigger the image generation.
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
## Download model
|
| 60 |
+
|
| 61 |
+
Weights for this model are available in Safetensors format.
|
| 62 |
+
|
| 63 |
+
[Download](/Jovie/Midjourney/tree/main) them in the Files & versions tab.","{""id"": ""Jovie/Midjourney"", ""author"": ""Jovie"", ""sha"": ""f231dd808a1112434d465bf24c8d05d5523f89d4"", ""last_modified"": ""2025-01-07 13:04:42+00:00"", ""created_at"": ""2024-09-22 23:32:04+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 16628, ""downloads_all_time"": null, ""likes"": 257, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""flux"", ""text-to-image"", ""lora"", ""fal"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:adapter:black-forest-labs/FLUX.1-dev"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: black-forest-labs/FLUX.1-dev\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md\ntags:\n- flux\n- text-to-image\n- lora\n- diffusers\n- fal\nwidget:\n- text: A cute blonde woman in bikini and her doge are sitting on a couch cuddling\n and the expressive, stylish living room scene with a playful twist. The room is\n painted in a soothing turquoise color scheme, stylish living room scene bathed\n in a cool, textured turquoise blanket and adorned with several matching turquoise\n throw pillows. The room's color scheme is predominantly turquoise, relaxed demeanor.\n The couch is covered in a soft, reflecting light and adding to the vibrant blue\n hue., dark room with a sleek, spherical gold decorations, This photograph captures\n a scene that is whimsically styled in a vibrant, reflective cyan sunglasses. The\n dog's expression is cheerful, metallic fabric sofa. The dog, soothing atmosphere.\n output:\n url: https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_wilzbmf24.png\n- text: 'Luxe Enchantment: \u201cA young woman in her 20s is captured in a dreamlike, cinematic\n portrait, dressed in extravagant, layered fashion with rich, jewel-toned fabrics\n and ornate embellishments. The high-contrast lighting creates depth, casting dramatic\n shadows that highlight her smooth skin and exquisite facial features. The shot\n exudes both sophistication and sensuality, with professional retouching that enhances\n every intricate detail of her attire. Her presence is magnetic, commanding attention\n through the lush, opulent visuals that define her high-fashion look.\u201d'\n output:\n url: https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_wbu24jq33.png\n- text: \""In a surreal and cosmic confrontation, a figure gazes upward toward an imposing,\\\n \\ liquid-like entity that seems to emerge from the darkness itself. The composition\\\n \\ is stark, with the human figure positioned at the bottom right, dwarfed by the\\\n \\ overwhelming presence of the otherworldly being that dominates the upper left.\\\n \\ The background is a pure, endless black, intensifying the isolation and drama\\\n \\ of the scene. The human figure, seemingly feminine, is rendered in soft, warm\\\n \\ tones, their skin catching faint highlights. They wear a loose, slightly wrinkled\\\n \\ white shirt, its fabric textured and realistic, subtly illuminated as if from\\\n \\ a dim, unseen light source. Their head is tilted back, exposing their neck in\\\n \\ a vulnerable pose, while their curly black hair cascades downward, adding dynamic\\\n \\ texture. A trickle of bright red blood runs from their eyes, streaking down\\\n \\ their face, evoking both pain and transcendence. Their expression is one of\\\n \\ awe or terror, mouth slightly open, as if caught mid-breath or mid-scream. Their\\\n \\ outstretched hand is delicate, fingers frozen mid-motion, as though reaching\\\n \\ toward or recoiling from the massive entity above. The dominating presence of\\\n \\ the entity above is an abstract, amorphous mass, its form resembling molten\\\n \\ metal or liquid mercury. The surface of the creature is highly reflective, capturing\\\n \\ faint rainbows of refracted light that streak across its silvery-black exterior,\\\n \\ breaking the monochrome void with eerie vibrancy. Its face emerges from the\\\n \\ swirling mass, grotesque and menacing, with exaggerated features: deep-set eyes,\\\n \\ a snarling mouth full of jagged teeth, and sharp, angular contours that suggest\\\n \\ both predatory intent and alien beauty. The creature appears both fluid and\\\n \\ solid, with parts of its body stretching and twisting as if in motion, extending\\\n \\ its influence over the scene. The interplay of textures is striking\u00e2\\x80\\x94\\\n the sharp, metallic sheen of the creature\u00e2\\x80\\x99s form contrasts heavily with\\\n \\ the softness of the human figure\u00e2\\x80\\x99s hair, skin, and clothing. The lighting\\\n \\ emphasizes this contrast, with the creature glowing faintly from within, its\\\n \\ surface rippling with unnatural energy, while the figure below is lit more subtly,\\\n \\ grounding them in human vulnerability. The absence of a defined environment\\\n \\ enhances the existential tension of the image, as if this encounter is taking\\\n \\ place in a void or beyond reality itself. Themes of submission, fear, and cosmic\\\n \\ insignificance permeate the composition, with the creature\u00e2\\x80\\x99s overwhelming\\\n \\ scale and surreal presence dwarfing the human figure, evoking feelings of awe\\\n \\ and helplessness. The aesthetic combines elements of cosmic horror, surrealism,\\\n \\ and anime-inspired drama, creating a moment that feels frozen in time\u00e2\\x80\\x94\\\n both deeply personal and universally vast. The image captures the essence of a\\\n \\ climactic confrontation, leaving the viewer to question whether this is a moment\\\n \\ of transcendence, destruction, or something far beyond comprehension. anime,\\\n \\ cyberpunk style\""\n output:\n url: https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_ubv32i84d.png"", ""widget_data"": [{""text"": ""A cute blonde woman in bikini and her doge are sitting on a couch cuddling and the expressive, stylish living room scene with a playful twist. The room is painted in a soothing turquoise color scheme, stylish living room scene bathed in a cool, textured turquoise blanket and adorned with several matching turquoise throw pillows. The room's color scheme is predominantly turquoise, relaxed demeanor. The couch is covered in a soft, reflecting light and adding to the vibrant blue hue., dark room with a sleek, spherical gold decorations, This photograph captures a scene that is whimsically styled in a vibrant, reflective cyan sunglasses. The dog's expression is cheerful, metallic fabric sofa. The dog, soothing atmosphere."", ""output"": {""url"": ""https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_wilzbmf24.png""}}, {""text"": ""Luxe Enchantment: \u201cA young woman in her 20s is captured in a dreamlike, cinematic portrait, dressed in extravagant, layered fashion with rich, jewel-toned fabrics and ornate embellishments. The high-contrast lighting creates depth, casting dramatic shadows that highlight her smooth skin and exquisite facial features. The shot exudes both sophistication and sensuality, with professional retouching that enhances every intricate detail of her attire. Her presence is magnetic, commanding attention through the lush, opulent visuals that define her high-fashion look.\u201d"", ""output"": {""url"": ""https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_wbu24jq33.png""}}, {""text"": ""In a surreal and cosmic confrontation, a figure gazes upward toward an imposing, liquid-like entity that seems to emerge from the darkness itself. The composition is stark, with the human figure positioned at the bottom right, dwarfed by the overwhelming presence of the otherworldly being that dominates the upper left. The background is a pure, endless black, intensifying the isolation and drama of the scene. The human figure, seemingly feminine, is rendered in soft, warm tones, their skin catching faint highlights. They wear a loose, slightly wrinkled white shirt, its fabric textured and realistic, subtly illuminated as if from a dim, unseen light source. Their head is tilted back, exposing their neck in a vulnerable pose, while their curly black hair cascades downward, adding dynamic texture. A trickle of bright red blood runs from their eyes, streaking down their face, evoking both pain and transcendence. Their expression is one of awe or terror, mouth slightly open, as if caught mid-breath or mid-scream. Their outstretched hand is delicate, fingers frozen mid-motion, as though reaching toward or recoiling from the massive entity above. The dominating presence of the entity above is an abstract, amorphous mass, its form resembling molten metal or liquid mercury. The surface of the creature is highly reflective, capturing faint rainbows of refracted light that streak across its silvery-black exterior, breaking the monochrome void with eerie vibrancy. Its face emerges from the swirling mass, grotesque and menacing, with exaggerated features: deep-set eyes, a snarling mouth full of jagged teeth, and sharp, angular contours that suggest both predatory intent and alien beauty. The creature appears both fluid and solid, with parts of its body stretching and twisting as if in motion, extending its influence over the scene. The interplay of textures is striking\u00e2\u0080\u0094the sharp, metallic sheen of the creature\u00e2\u0080\u0099s form contrasts heavily with the softness of the human figure\u00e2\u0080\u0099s hair, skin, and clothing. The lighting emphasizes this contrast, with the creature glowing faintly from within, its surface rippling with unnatural energy, while the figure below is lit more subtly, grounding them in human vulnerability. The absence of a defined environment enhances the existential tension of the image, as if this encounter is taking place in a void or beyond reality itself. Themes of submission, fear, and cosmic insignificance permeate the composition, with the creature\u00e2\u0080\u0099s overwhelming scale and surreal presence dwarfing the human figure, evoking feelings of awe and helplessness. The aesthetic combines elements of cosmic horror, surrealism, and anime-inspired drama, creating a moment that feels frozen in time\u00e2\u0080\u0094both deeply personal and universally vast. The image captures the essence of a climactic confrontation, leaving the viewer to question whether this is a moment of transcendence, destruction, or something far beyond comprehension. anime, cyberpunk style"", ""output"": {""url"": ""https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_ubv32i84d.png""}}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/example_ubv32i84d.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/example_wbu24jq33.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/example_wilzbmf24.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mj5.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Nymbo/Serverless-ImgGen-Hub"", ""NeurixYUFI/imggen"", ""fantaxy/playground25"", ""Novaciano/Flux_Lustly_AI_Uncensored_NSFW_V1"", ""aexyb/Diffusion"", ""Deninski1/Jovie-Midjourney"", ""HuggingFaceSupport/midjourney"", ""RugsAIAlpha/Jovie-Midjourney"", ""colonsky/Jovie-Midjourney"", ""PeepDaSlan9/HYDRAS_Jovie-Midjourney"", ""NativeAngels/Serverless-ImgGen-Hub"", ""KwameLaryea/Jovie-Midjourney"", ""theunseenones94/Flux_Lustly_AI_Uncensored_NSFW_V1"", ""IdenGhost/Jovie-Midjourney"", ""yufiru/ImageGeneratotModels"", ""b1ka/Jovie-Midjourney"", ""Nymbo/Model-Status-Checker"", ""Anonym26/TextToImages"", ""crazyhite001/imggen"", ""sajjaddg/Jovie-Midjourney"", ""Parmist/strangerzonehf-Flux-Super-Realism-LoRA"", ""Nymbo/serverless-imggen-test"", ""Fantomio/Jovie-Midjourney"", ""gabriel234/Jovie-Midjourney"", ""crazycarrot/Jovie-Midjourney"", ""davidAbrahan/Jovie-Midjourney"", ""hansmdll/Jovie-Midjourney"", ""spiralrewind/Jovie-Midjourney"", ""basilerror/Jovie-Midjourney"", ""Tuanpluss02/Jovie-Midjourney"", ""RAMYASRI-39/Jovie-Midjourney"", ""nikkyiy/Jovie-Midjourney"", ""DANA-Z/Jovie-Midjourney"", ""BhavyaKachhadiya/Jovie-Midjourney"", ""tamamonomae222/Jovie-Midjourney"", ""savan2001/Jovie-Midjourney"", ""freQuensy23/TextToImages"", ""iryahayri/Jovie-Midjourney"", ""Dhw627ju/Jovie-Midjourney"", ""Lumivexity/MidjourneyByJOVIE"", ""saliseabeali/Jovie-Midjourney"", ""NativeAngels/Jovie-Midjourney"", ""yangq1q1ng/Jovie-Midjourney"", ""rikidev/Jovie-Midjourney"", ""Rajesh64240/Jovie-Midjourney"", ""0xBlxck/Jovie-Midjourney"", ""0xBlxck/Jovie-Midjourney1"", ""csalazarp73/CASPMIDJOURNEY"", ""lenruy/Jovie-Midjourney"", ""Vinarator/Jovie-Midjourney"", ""RobinD8/Jovie-Midjourney"", ""abhin2003/Jovie-Midjourney"", ""Stadereck/Jovie-Midjourney"", ""joeysaada/Jovie-Midjourney"", ""9891dev/Jovie-Midjourney"", ""martynka/for-dev"", ""dezshredder/First_agent_template"", ""rnkallday/Jovie-Midjourney"", ""kooldark/Mid_genimg"", ""erkbla/Jovie-Midjourney"", ""EliteGamerCJ/Jovie-Midjourney"", ""P08alcobia/Jovie-Midjourney"", ""yacksweb001/Jovie-Midjourney"", ""rafaelkamp/black-forest-labs-FLUX.1-dev""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-07 13:04:42+00:00"", ""cardData"": ""base_model: black-forest-labs/FLUX.1-dev\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md\ntags:\n- flux\n- text-to-image\n- lora\n- diffusers\n- fal\nwidget:\n- text: A cute blonde woman in bikini and her doge are sitting on a couch cuddling\n and the expressive, stylish living room scene with a playful twist. The room is\n painted in a soothing turquoise color scheme, stylish living room scene bathed\n in a cool, textured turquoise blanket and adorned with several matching turquoise\n throw pillows. The room's color scheme is predominantly turquoise, relaxed demeanor.\n The couch is covered in a soft, reflecting light and adding to the vibrant blue\n hue., dark room with a sleek, spherical gold decorations, This photograph captures\n a scene that is whimsically styled in a vibrant, reflective cyan sunglasses. The\n dog's expression is cheerful, metallic fabric sofa. The dog, soothing atmosphere.\n output:\n url: https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_wilzbmf24.png\n- text: 'Luxe Enchantment: \u201cA young woman in her 20s is captured in a dreamlike, cinematic\n portrait, dressed in extravagant, layered fashion with rich, jewel-toned fabrics\n and ornate embellishments. The high-contrast lighting creates depth, casting dramatic\n shadows that highlight her smooth skin and exquisite facial features. The shot\n exudes both sophistication and sensuality, with professional retouching that enhances\n every intricate detail of her attire. Her presence is magnetic, commanding attention\n through the lush, opulent visuals that define her high-fashion look.\u201d'\n output:\n url: https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_wbu24jq33.png\n- text: \""In a surreal and cosmic confrontation, a figure gazes upward toward an imposing,\\\n \\ liquid-like entity that seems to emerge from the darkness itself. The composition\\\n \\ is stark, with the human figure positioned at the bottom right, dwarfed by the\\\n \\ overwhelming presence of the otherworldly being that dominates the upper left.\\\n \\ The background is a pure, endless black, intensifying the isolation and drama\\\n \\ of the scene. The human figure, seemingly feminine, is rendered in soft, warm\\\n \\ tones, their skin catching faint highlights. They wear a loose, slightly wrinkled\\\n \\ white shirt, its fabric textured and realistic, subtly illuminated as if from\\\n \\ a dim, unseen light source. Their head is tilted back, exposing their neck in\\\n \\ a vulnerable pose, while their curly black hair cascades downward, adding dynamic\\\n \\ texture. A trickle of bright red blood runs from their eyes, streaking down\\\n \\ their face, evoking both pain and transcendence. Their expression is one of\\\n \\ awe or terror, mouth slightly open, as if caught mid-breath or mid-scream. Their\\\n \\ outstretched hand is delicate, fingers frozen mid-motion, as though reaching\\\n \\ toward or recoiling from the massive entity above. The dominating presence of\\\n \\ the entity above is an abstract, amorphous mass, its form resembling molten\\\n \\ metal or liquid mercury. The surface of the creature is highly reflective, capturing\\\n \\ faint rainbows of refracted light that streak across its silvery-black exterior,\\\n \\ breaking the monochrome void with eerie vibrancy. Its face emerges from the\\\n \\ swirling mass, grotesque and menacing, with exaggerated features: deep-set eyes,\\\n \\ a snarling mouth full of jagged teeth, and sharp, angular contours that suggest\\\n \\ both predatory intent and alien beauty. The creature appears both fluid and\\\n \\ solid, with parts of its body stretching and twisting as if in motion, extending\\\n \\ its influence over the scene. The interplay of textures is striking\u00e2\\x80\\x94\\\n the sharp, metallic sheen of the creature\u00e2\\x80\\x99s form contrasts heavily with\\\n \\ the softness of the human figure\u00e2\\x80\\x99s hair, skin, and clothing. The lighting\\\n \\ emphasizes this contrast, with the creature glowing faintly from within, its\\\n \\ surface rippling with unnatural energy, while the figure below is lit more subtly,\\\n \\ grounding them in human vulnerability. The absence of a defined environment\\\n \\ enhances the existential tension of the image, as if this encounter is taking\\\n \\ place in a void or beyond reality itself. Themes of submission, fear, and cosmic\\\n \\ insignificance permeate the composition, with the creature\u00e2\\x80\\x99s overwhelming\\\n \\ scale and surreal presence dwarfing the human figure, evoking feelings of awe\\\n \\ and helplessness. The aesthetic combines elements of cosmic horror, surrealism,\\\n \\ and anime-inspired drama, creating a moment that feels frozen in time\u00e2\\x80\\x94\\\n both deeply personal and universally vast. The image captures the essence of a\\\n \\ climactic confrontation, leaving the viewer to question whether this is a moment\\\n \\ of transcendence, destruction, or something far beyond comprehension. anime,\\\n \\ cyberpunk style\""\n output:\n url: https://huggingface.co/Jovie/Midjourney/resolve/main/images/example_ubv32i84d.png"", ""transformersInfo"": null, ""_id"": ""66f0a8f4e5e475f8af6b1654"", ""modelId"": ""Jovie/Midjourney"", ""usedStorage"": 173438489}",0,"https://huggingface.co/future-technologies/Floral-High-Dynamic-Range, https://huggingface.co/gjP798uy/JourneyFluxDetailsRealism, https://huggingface.co/The-LoRa-Project/Fluxjourney.Realism",3,,0,,0,,0,"Deninski1/Jovie-Midjourney, IdenGhost/Jovie-Midjourney, KwameLaryea/Jovie-Midjourney, NativeAngels/Serverless-ImgGen-Hub, NeurixYUFI/imggen, Novaciano/Flux_Lustly_AI_Uncensored_NSFW_V1, Nymbo/Serverless-ImgGen-Hub, PeepDaSlan9/HYDRAS_Jovie-Midjourney, RugsAIAlpha/Jovie-Midjourney, colonsky/Jovie-Midjourney, fantaxy/playground25, theunseenones94/Flux_Lustly_AI_Uncensored_NSFW_V1",12
|
| 64 |
+
future-technologies/Floral-High-Dynamic-Range,"---
|
| 65 |
+
license: apache-2.0
|
| 66 |
+
language:
|
| 67 |
+
- en
|
| 68 |
+
metrics:
|
| 69 |
+
- accuracy
|
| 70 |
+
- character
|
| 71 |
+
base_model:
|
| 72 |
+
- black-forest-labs/FLUX.1-dev
|
| 73 |
+
- Jovie/Midjourney
|
| 74 |
+
pipeline_tag: text-to-image
|
| 75 |
+
library_name: diffusers
|
| 76 |
+
tags:
|
| 77 |
+
- text-to-image
|
| 78 |
+
- image-generation
|
| 79 |
+
- floral
|
| 80 |
+
- High-Dynamic-Range
|
| 81 |
+
- Large-Image-Generation-Model
|
| 82 |
+
- custom_code
|
| 83 |
+
- bf16
|
| 84 |
+
- text-to-image
|
| 85 |
+
- diffusion
|
| 86 |
+
- AI
|
| 87 |
+
- art
|
| 88 |
+
- photorealistic
|
| 89 |
+
- image
|
| 90 |
+
- powerful
|
| 91 |
+
- future
|
| 92 |
+
datasets:
|
| 93 |
+
- future-technologies/Universal-Transformers-Dataset
|
| 94 |
+
---
|
| 95 |
+
|
| 96 |
+
<div style=""display: flex; justify-content: center;"">
|
| 97 |
+
<img src=""./floral-hdr-generation-output-example.png"" alt=""Floral HDR Image 1"" width=""150"" height=""150"">
|
| 98 |
+
<img src=""./floral-hdr-generation-output-example2.png"" alt=""Floral HDR Image 2"" width=""150"" height=""150"">
|
| 99 |
+
<img src=""./floral-hdr-generation-output-example3.png"" alt=""Floral HDR Image 3"" width=""150"" height=""150"">
|
| 100 |
+
<img src=""./floral-hdr-generation-output-example4.png"" alt=""Floral HDR Image 3"" width=""150"" height=""150"">
|
| 101 |
+
<img src=""./floral-hdr-generation-output-example5.png"" alt=""Floral HDR Image 3"" width=""150"" height=""150"">
|
| 102 |
+
</div>
|
| 103 |
+
|
| 104 |
+
**Floral High Dynamic Range (LIGM):**
|
| 105 |
+
|
| 106 |
+
> **If you appreciate this model's contribution to open-source AI, please consider liking it and following [Lambda Go](https://huggingface.co/future-technologies) for future updates.**
|
| 107 |
+
|
| 108 |
+
A Large Image Generation Model (LIGM) celebrated for its exceptional accuracy in generating high-quality, highly detailed scenes like never seen before! Derived from the groundbreaking Floral AI Model—renowned for its use in film generation—this model marks a milestone in image synthesis technology.
|
| 109 |
+
|
| 110 |
+
Created by: Future Technologies Limited
|
| 111 |
+
|
| 112 |
+
### Model Description
|
| 113 |
+
|
| 114 |
+
*Floral High Dynamic Range (LIGM) is a state-of-the-art Large Image Generation Model (LIGM) that excels in generating images with stunning clarity, precision, and intricate detail. Known for its high accuracy in producing hyper-realistic and aesthetically rich images, this model sets a new standard in image synthesis. Whether it's landscapes, objects, or scenes, Floral HDR brings to life visuals that are vivid, lifelike, and unmatched in quality.*
|
| 115 |
+
|
| 116 |
+
*Originally derived from the Floral AI Model, which has been successfully applied in film generation, Floral HDR integrates advanced techniques to handle complex lighting, dynamic ranges, and detailed scene compositions. This makes it ideal for applications where high-resolution imagery and realistic scene generation are critical.*
|
| 117 |
+
|
| 118 |
+
*Designed and developed by Future Technologies Limited, Floral HDR is a breakthrough achievement in AI-driven image generation, marking a significant leap in creative industries such as digital art, film, and immersive media. With the power to create images that push the boundaries of realism and artistic innovation, this model is a testament to Future Technologies Limited's commitment to shaping the future of AI.*
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
- **Developed by:** Future Technologies Limited (Lambda Go Technologies Limited)
|
| 123 |
+
- **Model type:** Large Image Generation Model
|
| 124 |
+
- **Language(s) (NLP):** English
|
| 125 |
+
- **License:** apache-2.0
|
| 126 |
+
|
| 127 |
+
# Crucial Notice:
|
| 128 |
+
This model is based on [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)
|
| 129 |
+
but has been significantly modified and improved by Future Technologies Limited with our own dataset and enhancements, making it a unique and original model. Although it uses ```FluxPipeline```, the improvements make it distinct.
|
| 130 |
+
|
| 131 |
+
- **License:** This model is released under [Future Technologies License](https://huggingface.co/future-technologies/Floral-High-Dynamic-Range/blob/main/LICENSE.md)
|
| 132 |
+
, and is not subject to [black-forest-labs's](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md) non-commercial restrictions. ***You can use, modify, and distribute it freely under [Future Technologies License](https://huggingface.co/future-technologies/Floral-High-Dynamic-Range/blob/main/LICENSE.md).***
|
| 133 |
+
|
| 134 |
+
- **Disclaimer:** While [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) is non-commercial, we’ve made enough changes to our model to make it an independent work. Please review our license before use.
|
| 135 |
+
|
| 136 |
+
For any licensing questions, contact us at ```lambda.go.company@gmail.com```
|
| 137 |
+
|
| 138 |
+
## Uses
|
| 139 |
+
|
| 140 |
+
**Film and Animation Studios**
|
| 141 |
+
|
| 142 |
+
- **Intended Users:** *Directors, animators, visual effects artists, and film production teams.*
|
| 143 |
+
- **Impact:** *This model empowers filmmakers to generate realistic scenes and environments with reduced reliance on traditional CGI and manual artistry. It provides faster production timelines and cost-effective solutions for creating complex visuals.*
|
| 144 |
+
|
| 145 |
+
**Game Developers**
|
| 146 |
+
|
| 147 |
+
- **Intended Users**: *Game designers, developers, and 3D artists.*
|
| 148 |
+
- **Impact:** *Floral HDR helps create highly detailed game worlds, characters, and assets. It allows developers to save time and resources, focusing on interactive elements while the model handles the visual richness of the environments. This can enhance game immersion and the overall player experience.*
|
| 149 |
+
|
| 150 |
+
**Virtual Reality (VR) and Augmented Reality (AR) Creators**
|
| 151 |
+
|
| 152 |
+
- **Intended Users:** *VR/AR developers, interactive media creators, and immersive experience designers.*
|
| 153 |
+
- **Impact:** *Users can quickly generate lifelike virtual environments, helping VR and AR applications appear more realistic and convincing. This is crucial for applications ranging from training simulations to entertainment.*
|
| 154 |
+
|
| 155 |
+
**Artists and Digital Designers**
|
| 156 |
+
|
| 157 |
+
- **Intended Users:** *Digital artists, illustrators, and graphic designers.*
|
| 158 |
+
- **Impact:** *Artists can use the model to generate high-quality visual elements, scenes, and concepts, pushing their creative boundaries. The model aids in visualizing complex artistic ideas in a faster, more efficient manner.*
|
| 159 |
+
|
| 160 |
+
**Marketing and Advertising Agencies**
|
| 161 |
+
|
| 162 |
+
- **Intended Users:** *Creative directors, marketers, advertising professionals, and content creators.*
|
| 163 |
+
- **Impact:** *Floral HDR enables agencies to create striking visuals for advertisements, product launches, and promotional materials. This helps businesses stand out in competitive markets by delivering high-impact imagery for campaigns.*
|
| 164 |
+
|
| 165 |
+
**Environmental and Scientific Researchers**
|
| 166 |
+
|
| 167 |
+
- **Intended Users:** *Environmental scientists, researchers, and visual data analysts.*
|
| 168 |
+
- **Impact:** *The model can simulate realistic environments, aiding in research areas like climate studies, ecosystem modeling, and scientific visualizations. It provides an accessible tool for researchers to communicate complex concepts through imagery.*
|
| 169 |
+
|
| 170 |
+
**Content Creators and Social Media Influencers**
|
| 171 |
+
|
| 172 |
+
- **Intended Users:** *Influencers, social media managers, and visual content creators.*
|
| 173 |
+
- **Impact:** *Social media professionals can create stunning and engaging content for their platforms with minimal effort. The model enhances the visual quality of posts, helping users build a more captivating online presence.*
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
### Out-of-Scope Use
|
| 177 |
+
|
| 178 |
+
**Generation of Misleading or Harmful Content**
|
| 179 |
+
|
| 180 |
+
- **Misuse:** The model should not be used to create fake, misleading, or harmful images intended to deceive individuals or manipulate public opinion (e.g., deepfakes, fake news visuals, or malicious propaganda).
|
| 181 |
+
- **Why It's Out-of-Scope:** The model generates high-fidelity imagery, and when used irresponsibly, it could perpetuate misinformation or mislead viewers into believing manipulated content is authentic.
|
| 182 |
+
|
| 183 |
+
**Creating Offensive, Discriminatory, or Inappropriate Images**
|
| 184 |
+
|
| 185 |
+
- **Misuse:** Generating content that is offensive, harmful, discriminatory, or violates ethical norms (e.g., hate speech, explicit content, or violence).
|
| 186 |
+
- **Why It's Out-of-Scope:** Floral HDR is designed to create visually rich and realistic images, and any generation that involves harmful themes goes against its ethical use, potentially causing harm or perpetuating negativity.
|
| 187 |
+
|
| 188 |
+
**Overly Sensitive or Personal Data Generation**
|
| 189 |
+
|
| 190 |
+
- **Misuse:** Generating images that involve identifiable individuals, private data, or exploit sensitive personal situations.
|
| 191 |
+
- **Why It's Out-of-Scope:** Using the model to simulate or generate sensitive, private, or identifiable personal content without consent violates privacy rights and can lead to harmful consequences for individuals involved.
|
| 192 |
+
|
| 193 |
+
**Incorporating in Systems for Autonomous Decision-Making**
|
| 194 |
+
|
| 195 |
+
- **Misuse:** Using the model in automated decision-making systems that could impact individuals' lives (e.g., in high-stakes domains like criminal justice, finance, or healthcare) without proper human oversight.
|
| 196 |
+
- **Why It's Out-of-Scope:** While the model generates high-quality visuals, it is not designed or trained for tasks requiring logical, contextual decision-making or ethical judgment, and may lead to errors or harmful outcomes when used in these contexts.
|
| 197 |
+
|
| 198 |
+
**Large-Scale Commercial Use Without Licensing**
|
| 199 |
+
|
| 200 |
+
- **Misuse:** Utilizing the model to produce images for large-scale commercial purposes without adhering to licensing and ethical guidelines, including the redistribution or resale of generated images as standalone assets.
|
| 201 |
+
- **Why It's Out-of-Scope:** The model is not intended to replace artists or designers in creating commercial products at scale unless appropriate licensing and commercial usage policies are in place.
|
| 202 |
+
|
| 203 |
+
**Generating Unethical or Inaccurate Scientific/Medical Content**
|
| 204 |
+
|
| 205 |
+
- **Misuse:** Using the model to generate scientific, medical, or educational content that could lead to false or harmful interpretations of real-world data.
|
| 206 |
+
- **Why It's Out-of-Scope:** The model’s capabilities are focused on creative and artistic image generation, not on generating scientifically or medically accurate content, which requires domain-specific expertise.
|
| 207 |
+
|
| 208 |
+
**Real-Time Interactivity in Live Environments**
|
| 209 |
+
|
| 210 |
+
- **Misuse:** Using the model for real-time, interactive image generation in live environments (e.g., live-streaming or real-time gaming) where speed and consistency are critical, without proper optimization.
|
| 211 |
+
- **Why It's Out-of-Scope:** The model is designed for high-quality image generation but may not perform efficiently or effectively for live, real-time interactions, where real-time rendering and low latency are essential.
|
| 212 |
+
|
| 213 |
+
## Bias, Risks, and Limitations
|
| 214 |
+
|
| 215 |
+
- **Cultural Bias:** The model may generate images that are more reflective of dominant cultures, potentially underrepresenting minority cultures, though it can still create diverse visual content when properly guided.
|
| 216 |
+
- **Gender and Racial Bias:** The model might produce stereotypical representations based on gender or race, but it is capable of generating diverse and inclusive imagery when trained with diverse datasets.
|
| 217 |
+
- **Over-simplification:** In certain cases, the model might oversimplify complex scenarios or settings, reducing intricate details that may be crucial in highly specialized fields, while still excelling in creative visual tasks.
|
| 218 |
+
- **Unintended Interpretations:** The model may generate images that are open to misinterpretation, but it can be adjusted and refined to ensure better alignment with user intent without losing its creative potential.
|
| 219 |
+
- **Abstract and Conceptual Limitations:** While the model is adept at generating realistic imagery, it may struggle to visualize abstract or conceptual ideas in the same way it handles realistic or tangible subjects. However, it can still generate impressive, visually appealing concepts.
|
| 220 |
+
|
| 221 |
+
### Recommendations
|
| 222 |
+
|
| 223 |
+
- **Awareness of Bias:** Users should be mindful of the potential cultural, racial, and gender biases that may appear in generated content. It’s important to actively curate and diversify training datasets or input prompts to minimize such biases.
|
| 224 |
+
|
| 225 |
+
- **Responsible Use:** Users should ensure that the model is used in ways that promote positive, constructive, and inclusive imagery. For projects involving sensitive or personal content, human oversight is recommended to avoid misrepresentation or harm.
|
| 226 |
+
|
| 227 |
+
- **Verification and Fact-Checking:** Given the model’s inability to provide accurate domain-specific knowledge, users should verify the accuracy of the generated content in fields requiring high precision, such as scientific, medical, or historical images.
|
| 228 |
+
|
| 229 |
+
- **Contextual Refinement:** Since the model doesn’t inherently understand context, users should carefully refine prompts to avoid misaligned or inappropriate outputs, especially in creative fields where subtlety and nuance are critical.
|
| 230 |
+
|
| 231 |
+
- **Ethical and Responsible Use:** Users must ensure that the model is not exploited for harmful purposes such as generating misleading content, deepfakes, or offensive imagery. Ethical guidelines and responsible practices should be followed in all use cases.
|
| 232 |
+
|
| 233 |
+
## How to Get Started with the Model
|
| 234 |
+
**Prerequisites:**
|
| 235 |
+
- **Install necessary libraries:**
|
| 236 |
+
```
|
| 237 |
+
pip install transformers diffusers torch Pillow huggingface_hub PIL io requests
|
| 238 |
+
```
|
| 239 |
+
- **Code to Use the Model:**
|
| 240 |
+
|
| 241 |
+
```
|
| 242 |
+
from transformers import AutoTokenizer #, AutoModelForImageGeneration
|
| 243 |
+
from diffusers import DiffusionPipeline, FluxPipeline
|
| 244 |
+
import torch
|
| 245 |
+
from PIL import Image
|
| 246 |
+
import requests
|
| 247 |
+
from io import BytesIO
|
| 248 |
+
|
| 249 |
+
# Your Hugging Face API token
|
| 250 |
+
API_TOKEN = ""<retacted>""
|
| 251 |
+
|
| 252 |
+
# Load the model and tokenizer from Hugging Face
|
| 253 |
+
model_name = ""future-technologies/Floral-High-Dynamic-Range""
|
| 254 |
+
#model_name = ""black-forest-labs/FLUX.1-dev""
|
| 255 |
+
|
| 256 |
+
# Error handling for model loading
|
| 257 |
+
try:
|
| 258 |
+
#model = AutoModelForImageGeneration.from_pretrained(model_name, use_auth_token=API_TOKEN)
|
| 259 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, token=API_TOKEN)
|
| 260 |
+
except Exception as e:
|
| 261 |
+
print(f""Error loading model: {e}"")
|
| 262 |
+
exit()
|
| 263 |
+
|
| 264 |
+
# Initialize the diffusion pipeline
|
| 265 |
+
try:
|
| 266 |
+
pipe = FluxPipeline.from_pretrained(model_name)
|
| 267 |
+
pipe.to(""cuda"" if torch.cuda.is_available() else ""cpu"")
|
| 268 |
+
except Exception as e:
|
| 269 |
+
print(f""Error initializing pipeline: {e}"")
|
| 270 |
+
exit()
|
| 271 |
+
|
| 272 |
+
# Example prompt for image generation
|
| 273 |
+
prompt = ""A futuristic city skyline with glowing skyscrapers during sunset, reflecting the light.""
|
| 274 |
+
|
| 275 |
+
# Error handling for image generation
|
| 276 |
+
try:
|
| 277 |
+
result = pipe(prompt)
|
| 278 |
+
image = result.images[0]
|
| 279 |
+
except Exception as e:
|
| 280 |
+
print(f""Error generating image: {e}"")
|
| 281 |
+
exit()
|
| 282 |
+
|
| 283 |
+
# Save or display the image
|
| 284 |
+
try:
|
| 285 |
+
image.save(""floral-hdr.png"")
|
| 286 |
+
image.show()
|
| 287 |
+
except Exception as e:
|
| 288 |
+
print(f""Error saving or displaying image: {e}"")
|
| 289 |
+
exit()
|
| 290 |
+
|
| 291 |
+
print(""Image generation and saving successful!"")
|
| 292 |
+
```
|
| 293 |
+
|
| 294 |
+
## Training Details
|
| 295 |
+
|
| 296 |
+
The **Floral High Dynamic Range (LIGM)** model has been trained on a diverse and extensive dataset containing over 1 billion high-quality images. This vast dataset encompasses a wide range of visual styles and content, enabling the model to generate highly detailed and accurate images. The training process focused on capturing intricate features, dynamic lighting, and complex scenes, which allows the model to produce images with stunning realism and creative potential.
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
#### Training Hyperparameters
|
| 300 |
+
|
| 301 |
+
- **Training regime:** bf16 non-mixed precision <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 302 |
+
|
| 303 |
+
## Environmental Impact
|
| 304 |
+
|
| 305 |
+
- **Hardware Type:** Nvidia A100 GPU
|
| 306 |
+
- **Hours used:** 45k+
|
| 307 |
+
- **Cloud Provider:** Future Technologies Limited
|
| 308 |
+
- **Compute Region:** Rajasthan, India
|
| 309 |
+
- **Carbon Emitted:** 0 (Powered by clean Solar Energy with no harmful or polluting machines used. Environmentally sustainable and eco-friendly!)","{""id"": ""future-technologies/Floral-High-Dynamic-Range"", ""author"": ""future-technologies"", ""sha"": ""52ae912f8d947e4a49d439ae08facdc4faeec8c5"", ""last_modified"": ""2025-03-29 09:56:32+00:00"", ""created_at"": ""2025-01-03 08:48:17+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 843, ""downloads_all_time"": null, ""likes"": 8, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""text-to-image"", ""image-generation"", ""floral"", ""High-Dynamic-Range"", ""Large-Image-Generation-Model"", ""custom_code"", ""bf16"", ""diffusion"", ""AI"", ""art"", ""photorealistic"", ""image"", ""powerful"", ""future"", ""en"", ""dataset:future-technologies/Universal-Transformers-Dataset"", ""base_model:Jovie/Midjourney"", ""base_model:finetune:Jovie/Midjourney"", ""doi:10.57967/hf/4664"", ""license:apache-2.0"", ""endpoints_compatible"", ""diffusers:FluxPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- black-forest-labs/FLUX.1-dev\n- Jovie/Midjourney\ndatasets:\n- future-technologies/Universal-Transformers-Dataset\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\nmetrics:\n- accuracy\n- character\npipeline_tag: text-to-image\ntags:\n- text-to-image\n- image-generation\n- floral\n- High-Dynamic-Range\n- Large-Image-Generation-Model\n- custom_code\n- bf16\n- diffusion\n- AI\n- art\n- photorealistic\n- image\n- powerful\n- future"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""FluxPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ae.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='floral-hdr-generation-output-example.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='floral-hdr-generation-output-example2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='floral-hdr-generation-output-example3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='floral-hdr-generation-output-example4.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='floral-hdr-generation-output-example5.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/spiece.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 11891178560}, ""total"": 11891178560}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-29 09:56:32+00:00"", ""cardData"": ""base_model:\n- black-forest-labs/FLUX.1-dev\n- Jovie/Midjourney\ndatasets:\n- future-technologies/Universal-Transformers-Dataset\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\nmetrics:\n- accuracy\n- character\npipeline_tag: text-to-image\ntags:\n- text-to-image\n- image-generation\n- floral\n- High-Dynamic-Range\n- Large-Image-Generation-Model\n- custom_code\n- bf16\n- diffusion\n- AI\n- art\n- photorealistic\n- image\n- powerful\n- future"", ""transformersInfo"": null, ""_id"": ""6777a4518fcda5708e098dd2"", ""modelId"": ""future-technologies/Floral-High-Dynamic-Range"", ""usedStorage"": 58049489362}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/109,1
|
| 310 |
+
gjP798uy/JourneyFluxDetailsRealism,"---
|
| 311 |
+
license: apache-2.0
|
| 312 |
+
language:
|
| 313 |
+
- en
|
| 314 |
+
- es
|
| 315 |
+
- fr
|
| 316 |
+
base_model:
|
| 317 |
+
- kudzueye/Boreal
|
| 318 |
+
- adirik/flux-cinestill
|
| 319 |
+
- Shakker-Labs/FLUX.1-dev-LoRA-add-details
|
| 320 |
+
- prithivMLmods/Flux-Realism-FineDetailed
|
| 321 |
+
- prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0
|
| 322 |
+
- Jovie/Midjourney
|
| 323 |
+
pipeline_tag: text-to-image
|
| 324 |
+
---","{""id"": ""gjP798uy/JourneyFluxDetailsRealism"", ""author"": ""gjP798uy"", ""sha"": ""6126d4f08382b3fbd410a7a8c0e3b1891014178d"", ""last_modified"": ""2025-01-24 16:52:35+00:00"", ""created_at"": ""2025-01-22 21:47:12+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""text-to-image"", ""en"", ""es"", ""fr"", ""base_model:Jovie/Midjourney"", ""base_model:finetune:Jovie/Midjourney"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- kudzueye/Boreal\n- adirik/flux-cinestill\n- Shakker-Labs/FLUX.1-dev-LoRA-add-details\n- prithivMLmods/Flux-Realism-FineDetailed\n- prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0\n- Jovie/Midjourney\nlanguage:\n- en\n- es\n- fr\nlicense: apache-2.0\npipeline_tag: text-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-24 16:52:35+00:00"", ""cardData"": ""base_model:\n- kudzueye/Boreal\n- adirik/flux-cinestill\n- Shakker-Labs/FLUX.1-dev-LoRA-add-details\n- prithivMLmods/Flux-Realism-FineDetailed\n- prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0\n- Jovie/Midjourney\nlanguage:\n- en\n- es\n- fr\nlicense: apache-2.0\npipeline_tag: text-to-image"", ""transformersInfo"": null, ""_id"": ""67916760565b78249b211dbd"", ""modelId"": ""gjP798uy/JourneyFluxDetailsRealism"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=gjP798uy/JourneyFluxDetailsRealism&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BgjP798uy%2FJourneyFluxDetailsRealism%5D(%2FgjP798uy%2FJourneyFluxDetailsRealism)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 325 |
+
https://huggingface.co/The-LoRa-Project/Fluxjourney.Realism,N/A,N/A,1,,0,,0,,0,,0,,0
|
MiniCPM-o-2_6_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,1565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
openbmb/MiniCPM-o-2_6,"---
|
| 3 |
+
pipeline_tag: any-to-any
|
| 4 |
+
datasets:
|
| 5 |
+
- openbmb/RLAIF-V-Dataset
|
| 6 |
+
library_name: transformers
|
| 7 |
+
language:
|
| 8 |
+
- multilingual
|
| 9 |
+
tags:
|
| 10 |
+
- minicpm-o
|
| 11 |
+
- omni
|
| 12 |
+
- vision
|
| 13 |
+
- ocr
|
| 14 |
+
- multi-image
|
| 15 |
+
- video
|
| 16 |
+
- custom_code
|
| 17 |
+
- audio
|
| 18 |
+
- speech
|
| 19 |
+
- voice cloning
|
| 20 |
+
- live Streaming
|
| 21 |
+
- realtime speech conversation
|
| 22 |
+
- asr
|
| 23 |
+
- tts
|
| 24 |
+
---
|
| 25 |
+
|
| 26 |
+
<h1>A GPT-4o Level MLLM for Vision, Speech and Multimodal Live Streaming on Your Phone</h1>
|
| 27 |
+
|
| 28 |
+
[GitHub](https://github.com/OpenBMB/MiniCPM-o) | [Online Demo](https://minicpm-omni-webdemo-us.modelbest.cn) | [Technical Blog](https://openbmb.notion.site/MiniCPM-o-2-6-A-GPT-4o-Level-MLLM-for-Vision-Speech-and-Multimodal-Live-Streaming-on-Your-Phone-185ede1b7a558042b5d5e45e6b237da9)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
### News
|
| 32 |
+
|
| 33 |
+
* [2025.03.01] 🚀🚀🚀 RLAIF-V, which is the alignment technique of MiniCPM-o, is accepted by CVPR 2025!The [code](https://github.com/RLHF-V/RLAIF-V), [dataset](https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset), [paper](https://arxiv.org/abs/2405.17220) are open-sourced!
|
| 34 |
+
|
| 35 |
+
* [2025.01.24] 📢📢📢 MiniCPM-o 2.6 technical report is released! [See Here](https://openbmb.notion.site/MiniCPM-o-2-6-A-GPT-4o-Level-MLLM-for-Vision-Speech-and-Multimodal-Live-Streaming-on-Your-Phone-185ede1b7a558042b5d5e45e6b237da9).
|
| 36 |
+
|
| 37 |
+
* [2025.01.19] ⭐️⭐️⭐️ MiniCPM-o tops GitHub Trending and reaches top-2 on Hugging Face Trending!
|
| 38 |
+
|
| 39 |
+
## MiniCPM-o 2.6
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
**MiniCPM-o 2.6** is the latest and most capable model in the MiniCPM-o series. The model is built in an end-to-end fashion based on SigLip-400M, Whisper-medium-300M, ChatTTS-200M, and Qwen2.5-7B with a total of 8B parameters. It exhibits a significant performance improvement over MiniCPM-V 2.6, and introduces new features for real-time speech conversation and multimodal live streaming. Notable features of MiniCPM-o 2.6 include:
|
| 43 |
+
|
| 44 |
+
- 🔥 **Leading Visual Capability.**
|
| 45 |
+
MiniCPM-o 2.6 achieves an average score of 70.2 on OpenCompass, a comprehensive evaluation over 8 popular benchmarks. **With only 8B parameters, it surpasses widely used proprietary models like GPT-4o-202405, Gemini 1.5 Pro, and Claude 3.5 Sonnet** for single image understanding. It also **outperforms GPT-4V and Claude 3.5 Sonnet** in mutli-image and video understanding, and shows promising in-context learning capability.
|
| 46 |
+
|
| 47 |
+
- 🎙 **State-of-the-art Speech Capability.** MiniCPM-o 2.6 supports **bilingual real-time speech conversation with configurable voices** in English and Chinese. It **outperforms GPT-4o-realtime on audio understanding tasks** such as ASR and STT translation, and shows **state-of-the-art performance on speech conversation in both semantic and acoustic evaluations in the open-source community**. It also allows for fun features such as emotion/speed/style control, end-to-end voice cloning, role play, etc.
|
| 48 |
+
|
| 49 |
+
- 🎬 **Strong Multimodal Live Streaming Capability.** As a new feature, MiniCPM-o 2.6 can **accept continous video and audio streams independent of user queries, and support real-time speech interaction**. It **outperforms GPT-4o-202408 and Claude 3.5 Sonnet and shows state-of-art performance in open-source community on StreamingBench**, a comprehensive benchmark for real-time video understanding, omni-source (video & audio) understanding, and multimodal contextual understanding.
|
| 50 |
+
|
| 51 |
+
- 💪 **Strong OCR Capability and Others.**
|
| 52 |
+
Advancing popular visual capabilites from MiniCPM-V series, MiniCPM-o 2.6 can process images with any aspect ratio and up to 1.8 million pixels (e.g., 1344x1344). It achieves **state-of-the-art performance on OCRBench for models under 25B, surpassing proprietary models such as GPT-4o-202405**.
|
| 53 |
+
Based on the the latest [RLAIF-V](https://github.com/RLHF-V/RLAIF-V/) and [VisCPM](https://github.com/OpenBMB/VisCPM) techniques, it features **trustworthy behaviors**, outperforming GPT-4o and Claude 3.5 Sonnet on MMHal-Bench, and supports **multilingual capabilities** on more than 30 languages.
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
- 🚀 **Superior Efficiency.**
|
| 57 |
+
In addition to its friendly size, MiniCPM-o 2.6 also shows **state-of-the-art token density** (i.e., number of pixels encoded into each visual token). **It produces only 640 tokens when processing a 1.8M pixel image, which is 75% fewer than most models**. This directly improves the inference speed, first-token latency, memory usage, and power consumption. As a result, MiniCPM-o 2.6 can efficiently support **multimodal live streaming** on end-side devices such as iPad.
|
| 58 |
+
|
| 59 |
+
- 💫 **Easy Usage.**
|
| 60 |
+
MiniCPM-o 2.6 can be easily used in various ways: (1) [llama.cpp](https://github.com/OpenBMB/llama.cpp/blob/minicpm-omni/examples/llava/README-minicpmo2.6.md) support for efficient CPU inference on local devices, (2) [int4](https://huggingface.co/openbmb/MiniCPM-o-2_6-int4) and [GGUF](https://huggingface.co/openbmb/MiniCPM-o-2_6-gguf) format quantized models in 16 sizes, (3) [vLLM](#efficient-inference-with-llamacpp-ollama-vllm) support for high-throughput and memory-efficient inference, (4) fine-tuning on new domains and tasks with [LLaMA-Factory](./docs/llamafactory_train.md), (5) quick local WebUI demo setup with [Gradio](#chat-with-our-demo-on-gradio), and (6) online web demo on [server](https://minicpm-omni-webdemo-us.modelbest.cn/).
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
**Model Architecture.**
|
| 65 |
+
|
| 66 |
+
- **End-to-end Omni-modal Architecture.** Different modality encoder/decoders are connected and trained in an **end-to-end** fashion to fully exploit rich multimodal knowledge.
|
| 67 |
+
- **Omni-modal Live Streaming Mechanism.** (1) We change the offline modality encoder/decoders into online ones for **streaminig inputs/outputs.** (2) We devise a **time-division multiplexing (TDM) mechanism** for omni-modality streaminig processing in the LLM backbone. It divides parallel omni-modality streams into sequential info within small periodic time slices.
|
| 68 |
+
- **Configurable Speech Modeling Design.** We devise a multimodal system prompt, including traditional text system prompt, and **a new audio system prompt to determine the assistant voice**. This enables flexible voice configurations in inference time, and also facilitates end-to-end voice cloning and description-based voice creation.
|
| 69 |
+
|
| 70 |
+
<div align=""center"">
|
| 71 |
+
<img src=""https://github.com/OpenBMB/MiniCPM-o/raw/main/assets/minicpm-o-26-framework-v2.png"" , width=100%>
|
| 72 |
+
</div>
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
### Evaluation <!-- omit in toc -->
|
| 76 |
+
|
| 77 |
+
<div align=""center"">
|
| 78 |
+
<img src=""https://github.com/OpenBMB/MiniCPM-o/raw/main/assets/radar.jpg"" width=90% />
|
| 79 |
+
</div>
|
| 80 |
+
|
| 81 |
+
#### Visual understanding results
|
| 82 |
+
|
| 83 |
+
**Image Understanding:**
|
| 84 |
+
|
| 85 |
+
<div align=""center"">
|
| 86 |
+
<table style=""margin: 0px auto;"">
|
| 87 |
+
<thead>
|
| 88 |
+
<tr>
|
| 89 |
+
<th align=""left"">Model</th>
|
| 90 |
+
<th>Size</th>
|
| 91 |
+
<th>Token Density<sup>+</sup></th>
|
| 92 |
+
<th>OpenCompass</th>
|
| 93 |
+
<th>OCRBench</th>
|
| 94 |
+
<th>MathVista mini</th>
|
| 95 |
+
<th>ChartQA</th>
|
| 96 |
+
<th>MMVet</th>
|
| 97 |
+
<th>MMStar</th>
|
| 98 |
+
<th>MME</th>
|
| 99 |
+
<th>MMB1.1 test</th>
|
| 100 |
+
<th>AI2D</th>
|
| 101 |
+
<th>MMMU val</th>
|
| 102 |
+
<th>HallusionBench</th>
|
| 103 |
+
<th>TextVQA val</th>
|
| 104 |
+
<th>DocVQA test</th>
|
| 105 |
+
<th>MathVerse mini</th>
|
| 106 |
+
<th>MathVision</th>
|
| 107 |
+
<th>MMHal Score</th>
|
| 108 |
+
</tr>
|
| 109 |
+
</thead>
|
| 110 |
+
<tbody align=""center"">
|
| 111 |
+
<tr>
|
| 112 |
+
<td colspan=""19"" align=""left""><strong>Proprietary</strong></td>
|
| 113 |
+
</tr>
|
| 114 |
+
<tr>
|
| 115 |
+
<td nowrap=""nowrap"" align=""left"">GPT-4o-20240513</td>
|
| 116 |
+
<td>-</td>
|
| 117 |
+
<td>1088</td>
|
| 118 |
+
<td><u>69.9</u></td>
|
| 119 |
+
<td>736</td>
|
| 120 |
+
<td>61.3</td>
|
| 121 |
+
<td>85.7</td>
|
| 122 |
+
<td><strong>69.1</strong></td>
|
| 123 |
+
<td>63.9</td>
|
| 124 |
+
<td>2328.7</td>
|
| 125 |
+
<td>82.2</td>
|
| 126 |
+
<td>84.6</td>
|
| 127 |
+
<td><strong>69.2</strong></td>
|
| 128 |
+
<td><strong>55.0</strong></td>
|
| 129 |
+
<td>-</td>
|
| 130 |
+
<td>92.8</td>
|
| 131 |
+
<td><strong>50.2</strong></td>
|
| 132 |
+
<td><strong>30.4</strong></td>
|
| 133 |
+
<td><u>3.6</u></td>
|
| 134 |
+
</tr>
|
| 135 |
+
<tr>
|
| 136 |
+
<td nowrap=""nowrap"" align=""left"">Claude3.5-Sonnet</td>
|
| 137 |
+
<td>-</td>
|
| 138 |
+
<td>750</td>
|
| 139 |
+
<td>67.9</td>
|
| 140 |
+
<td>788</td>
|
| 141 |
+
<td>61.6</td>
|
| 142 |
+
<td><strong>90.8</strong></td>
|
| 143 |
+
<td>66.0</td>
|
| 144 |
+
<td>62.2</td>
|
| 145 |
+
<td>1920.0</td>
|
| 146 |
+
<td>78.5</td>
|
| 147 |
+
<td>80.2</td>
|
| 148 |
+
<td><u>65.9</u></td>
|
| 149 |
+
<td>49.9</td>
|
| 150 |
+
<td>-</td>
|
| 151 |
+
<td><strong>95.2</strong></td>
|
| 152 |
+
<td>-</td>
|
| 153 |
+
<td>-</td>
|
| 154 |
+
<td>3.4</td>
|
| 155 |
+
</tr>
|
| 156 |
+
<tr>
|
| 157 |
+
<td nowrap=""nowrap"" align=""left"">Gemini 1.5 Pro</td>
|
| 158 |
+
<td>-</td>
|
| 159 |
+
<td>-</td>
|
| 160 |
+
<td>64.4</td>
|
| 161 |
+
<td>754</td>
|
| 162 |
+
<td>57.7</td>
|
| 163 |
+
<td>81.3</td>
|
| 164 |
+
<td>64.0</td>
|
| 165 |
+
<td>59.1</td>
|
| 166 |
+
<td>2110.6</td>
|
| 167 |
+
<td>73.9</td>
|
| 168 |
+
<td>79.1</td>
|
| 169 |
+
<td>60.6</td>
|
| 170 |
+
<td>45.6</td>
|
| 171 |
+
<td>73.5</td>
|
| 172 |
+
<td>86.5</td>
|
| 173 |
+
<td>-</td>
|
| 174 |
+
<td>19.2</td>
|
| 175 |
+
<td>-</td>
|
| 176 |
+
</tr>
|
| 177 |
+
<tr>
|
| 178 |
+
<td nowrap=""nowrap"" align=""left"">GPT-4o-mini-20240718</td>
|
| 179 |
+
<td>-</td>
|
| 180 |
+
<td>1088</td>
|
| 181 |
+
<td>64.1</td>
|
| 182 |
+
<td>785</td>
|
| 183 |
+
<td>52.4</td>
|
| 184 |
+
<td>-</td>
|
| 185 |
+
<td>66.9</td>
|
| 186 |
+
<td>54.8</td>
|
| 187 |
+
<td>2003.4</td>
|
| 188 |
+
<td>76.0</td>
|
| 189 |
+
<td>77.8</td>
|
| 190 |
+
<td>60.0</td>
|
| 191 |
+
<td>46.1</td>
|
| 192 |
+
<td>-</td>
|
| 193 |
+
<td>-</td>
|
| 194 |
+
<td>-</td>
|
| 195 |
+
<td>-</td>
|
| 196 |
+
<td>3.3</td>
|
| 197 |
+
</tr>
|
| 198 |
+
<tr>
|
| 199 |
+
<td colspan=""19"" align=""left""><strong>Open Source</strong></td>
|
| 200 |
+
</tr>
|
| 201 |
+
<tr>
|
| 202 |
+
<td nowrap=""nowrap"" align=""left"">Cambrian-34B</td>
|
| 203 |
+
<td>34B</td>
|
| 204 |
+
<td><u>1820</u></td>
|
| 205 |
+
<td>58.3</td>
|
| 206 |
+
<td>591</td>
|
| 207 |
+
<td>50.3</td>
|
| 208 |
+
<td>75.6</td>
|
| 209 |
+
<td>53.2</td>
|
| 210 |
+
<td>54.2</td>
|
| 211 |
+
<td>2049.9</td>
|
| 212 |
+
<td>77.8</td>
|
| 213 |
+
<td>79.5</td>
|
| 214 |
+
<td>50.4</td>
|
| 215 |
+
<td>41.6</td>
|
| 216 |
+
<td>76.7</td>
|
| 217 |
+
<td>75.5</td>
|
| 218 |
+
<td>-</td>
|
| 219 |
+
<td>-</td>
|
| 220 |
+
<td>-</td>
|
| 221 |
+
</tr>
|
| 222 |
+
<tr>
|
| 223 |
+
<td nowrap=""nowrap"" align=""left"">GLM-4V-9B</td>
|
| 224 |
+
<td>13B</td>
|
| 225 |
+
<td>784</td>
|
| 226 |
+
<td>59.1</td>
|
| 227 |
+
<td>776</td>
|
| 228 |
+
<td>51.1</td>
|
| 229 |
+
<td>-</td>
|
| 230 |
+
<td>58.0</td>
|
| 231 |
+
<td>54.8</td>
|
| 232 |
+
<td>2018.8</td>
|
| 233 |
+
<td>67.9</td>
|
| 234 |
+
<td>71.2</td>
|
| 235 |
+
<td>46.9</td>
|
| 236 |
+
<td>45.0</td>
|
| 237 |
+
<td>-</td>
|
| 238 |
+
<td>-</td>
|
| 239 |
+
<td>-</td>
|
| 240 |
+
<td>-</td>
|
| 241 |
+
<td>-</td>
|
| 242 |
+
</tr>
|
| 243 |
+
<tr>
|
| 244 |
+
<td nowrap=""nowrap"" align=""left"">Pixtral-12B</td>
|
| 245 |
+
<td>12B</td>
|
| 246 |
+
<td>256</td>
|
| 247 |
+
<td>61.0</td>
|
| 248 |
+
<td>685</td>
|
| 249 |
+
<td>56.9</td>
|
| 250 |
+
<td>81.8</td>
|
| 251 |
+
<td>58.5</td>
|
| 252 |
+
<td>54.5</td>
|
| 253 |
+
<td>-</td>
|
| 254 |
+
<td>72.7</td>
|
| 255 |
+
<td>79.0</td>
|
| 256 |
+
<td>51.1</td>
|
| 257 |
+
<td>47.0</td>
|
| 258 |
+
<td>75.7</td>
|
| 259 |
+
<td>90.7</td>
|
| 260 |
+
<td>-</td>
|
| 261 |
+
<td>-</td>
|
| 262 |
+
<td>-</td>
|
| 263 |
+
</tr>
|
| 264 |
+
<tr>
|
| 265 |
+
<td nowrap=""nowrap"" align=""left"">DeepSeek-VL2-27B (4B)</td>
|
| 266 |
+
<td>27B</td>
|
| 267 |
+
<td>672</td>
|
| 268 |
+
<td>66.4</td>
|
| 269 |
+
<td>809</td>
|
| 270 |
+
<td>63.9</td>
|
| 271 |
+
<td>86.0</td>
|
| 272 |
+
<td>60.0</td>
|
| 273 |
+
<td>61.9</td>
|
| 274 |
+
<td>2253.0</td>
|
| 275 |
+
<td>81.2</td>
|
| 276 |
+
<td>83.8</td>
|
| 277 |
+
<td>54.0</td>
|
| 278 |
+
<td>45.3</td>
|
| 279 |
+
<td><u>84.2</u></td>
|
| 280 |
+
<td>93.3</td>
|
| 281 |
+
<td>-</td>
|
| 282 |
+
<td>-</td>
|
| 283 |
+
<td>3.0</td>
|
| 284 |
+
</tr>
|
| 285 |
+
<tr>
|
| 286 |
+
<td nowrap=""nowrap"" align=""left"">Qwen2-VL-7B</td>
|
| 287 |
+
<td>8B</td>
|
| 288 |
+
<td>784</td>
|
| 289 |
+
<td>67.1</td>
|
| 290 |
+
<td><u>866</u></td>
|
| 291 |
+
<td>58.2</td>
|
| 292 |
+
<td>83.0</td>
|
| 293 |
+
<td>62.0</td>
|
| 294 |
+
<td>60.7</td>
|
| 295 |
+
<td>2326.0</td>
|
| 296 |
+
<td>81.8</td>
|
| 297 |
+
<td>83.0</td>
|
| 298 |
+
<td>54.1</td>
|
| 299 |
+
<td>50.6</td>
|
| 300 |
+
<td><strong>84.3</strong></td>
|
| 301 |
+
<td><u>94.5</u></td>
|
| 302 |
+
<td>31.9</td>
|
| 303 |
+
<td>16.3</td>
|
| 304 |
+
<td>3.2</td>
|
| 305 |
+
</tr>
|
| 306 |
+
<tr>
|
| 307 |
+
<td nowrap=""nowrap"" align=""left"">LLaVA-OneVision-72B</td>
|
| 308 |
+
<td>72B</td>
|
| 309 |
+
<td>182</td>
|
| 310 |
+
<td>68.1</td>
|
| 311 |
+
<td>741</td>
|
| 312 |
+
<td>67.5</td>
|
| 313 |
+
<td>83.7</td>
|
| 314 |
+
<td>60.6</td>
|
| 315 |
+
<td><strong>65.8</strong></td>
|
| 316 |
+
<td>2261.0</td>
|
| 317 |
+
<td><strong>85.0</strong></td>
|
| 318 |
+
<td><u>85.6</u></td>
|
| 319 |
+
<td>56.8</td>
|
| 320 |
+
<td>49.0</td>
|
| 321 |
+
<td>80.5</td>
|
| 322 |
+
<td>91.3</td>
|
| 323 |
+
<td>39.1</td>
|
| 324 |
+
<td>-</td>
|
| 325 |
+
<td>3.5</td>
|
| 326 |
+
</tr>
|
| 327 |
+
<tr>
|
| 328 |
+
<td nowrap=""nowrap"" align=""left"">InternVL2.5-8B</td>
|
| 329 |
+
<td>8B</td>
|
| 330 |
+
<td>706</td>
|
| 331 |
+
<td>68.3</td>
|
| 332 |
+
<td>822</td>
|
| 333 |
+
<td><u>64.4</u></td>
|
| 334 |
+
<td>84.8</td>
|
| 335 |
+
<td>62.8</td>
|
| 336 |
+
<td>62.8</td>
|
| 337 |
+
<td>2344.0</td>
|
| 338 |
+
<td><u>83.6</u></td>
|
| 339 |
+
<td>84.5</td>
|
| 340 |
+
<td>56.0</td>
|
| 341 |
+
<td>50.1</td>
|
| 342 |
+
<td>79.1</td>
|
| 343 |
+
<td>93.0</td>
|
| 344 |
+
<td>39.5</td>
|
| 345 |
+
<td>19.7</td>
|
| 346 |
+
<td>3.4</td>
|
| 347 |
+
</tr>
|
| 348 |
+
<tr>
|
| 349 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-V 2.6</td>
|
| 350 |
+
<td>8B</td>
|
| 351 |
+
<td><strong>2822</strong></td>
|
| 352 |
+
<td>65.2</td>
|
| 353 |
+
<td>852*</td>
|
| 354 |
+
<td>60.6</td>
|
| 355 |
+
<td>79.4</td>
|
| 356 |
+
<td>60.0</td>
|
| 357 |
+
<td>57.5</td>
|
| 358 |
+
<td><u>2348.4*</u></td>
|
| 359 |
+
<td>78.0</td>
|
| 360 |
+
<td>82.1</td>
|
| 361 |
+
<td>49.8*</td>
|
| 362 |
+
<td>48.1*</td>
|
| 363 |
+
<td>80.1</td>
|
| 364 |
+
<td>90.8</td>
|
| 365 |
+
<td>25.7</td>
|
| 366 |
+
<td>18.3</td>
|
| 367 |
+
<td>3.6</td>
|
| 368 |
+
</tr>
|
| 369 |
+
<tr>
|
| 370 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-o 2.6</td>
|
| 371 |
+
<td>8B</td>
|
| 372 |
+
<td><strong>2822</strong></td>
|
| 373 |
+
<td><strong>70.2</strong></td>
|
| 374 |
+
<td><strong>897*</strong></td>
|
| 375 |
+
<td><strong>71.9*</strong></td>
|
| 376 |
+
<td><u>86.9*</u></td>
|
| 377 |
+
<td><u>67.5</u></td>
|
| 378 |
+
<td><u>64.0</u></td>
|
| 379 |
+
<td><strong>2372.0*</strong></td>
|
| 380 |
+
<td>80.5</td>
|
| 381 |
+
<td><strong>85.8</strong></td>
|
| 382 |
+
<td>50.4*</td>
|
| 383 |
+
<td><u>51.9</u></td>
|
| 384 |
+
<td>82.0</td>
|
| 385 |
+
<td>93.5</td>
|
| 386 |
+
<td><u>41.4*</u></td>
|
| 387 |
+
<td><u>23.1*</u></td>
|
| 388 |
+
<td><strong>3.8</strong></td>
|
| 389 |
+
</tr>
|
| 390 |
+
</tbody>
|
| 391 |
+
</table>
|
| 392 |
+
</div>
|
| 393 |
+
* We evaluate this benchmark using chain-of-thought prompting. Specifically, for MME, we used this technique only for the Cognition set.
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
<sup>+</sup> Token Density: number of pixels encoded into each visual token at maximum resolution, i.e., # pixels at maximum resolution / # visual tokens.
|
| 397 |
+
|
| 398 |
+
Note: For proprietary models, we calculate token density based on the image encoding charging strategy defined in the official API documentation, which provides an upper-bound estimation.
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
**Multi-image and Video Understanding:**
|
| 402 |
+
|
| 403 |
+
<details>
|
| 404 |
+
<summary>click to view</summary>
|
| 405 |
+
<div align=""center"">
|
| 406 |
+
|
| 407 |
+
<table style=""margin: 0px auto;"">
|
| 408 |
+
<thead>
|
| 409 |
+
<tr>
|
| 410 |
+
<th align=""left"">Model</th>
|
| 411 |
+
<th>Size</th>
|
| 412 |
+
<th>BLINK val</th>
|
| 413 |
+
<th>Mantis Eval</th>
|
| 414 |
+
<th>MIRB</th>
|
| 415 |
+
<th>Video-MME (wo / w subs)</th>
|
| 416 |
+
</tr>
|
| 417 |
+
</thead>
|
| 418 |
+
<tbody align=""center"">
|
| 419 |
+
<tr>
|
| 420 |
+
<td colspan=""6"" align=""left""><strong>Proprietary</strong></td>
|
| 421 |
+
</tr>
|
| 422 |
+
<tr>
|
| 423 |
+
<td nowrap=""nowrap"" align=""left"">GPT-4o-20240513</td>
|
| 424 |
+
<td>-</td>
|
| 425 |
+
<td><strong>68.0</strong></td>
|
| 426 |
+
<td>-</td>
|
| 427 |
+
<td>-</td>
|
| 428 |
+
<td><strong>71.9/77.2<strong></td>
|
| 429 |
+
</tr>
|
| 430 |
+
<tr>
|
| 431 |
+
<td nowrap=""nowrap"" align=""left"">GPT4V</td>
|
| 432 |
+
<td>-</td>
|
| 433 |
+
<td>54.6</td>
|
| 434 |
+
<td>62.7</td>
|
| 435 |
+
<td>53.1</td>
|
| 436 |
+
<td>59.9/63.3</td>
|
| 437 |
+
</tr>
|
| 438 |
+
<tr>
|
| 439 |
+
<td colspan=""6"" align=""left""><strong>Open-source</strong></td>
|
| 440 |
+
</tr>
|
| 441 |
+
<tr>
|
| 442 |
+
<td nowrap=""nowrap"" align=""left"">LLaVA-NeXT-Interleave 14B</td>
|
| 443 |
+
<td>14B</td>
|
| 444 |
+
<td>52.6</td>
|
| 445 |
+
<td>66.4</td>
|
| 446 |
+
<td>30.2</td>
|
| 447 |
+
<td>-</td>
|
| 448 |
+
</tr>
|
| 449 |
+
<tr>
|
| 450 |
+
<td nowrap=""nowrap"" align=""left"">LLaVA-OneVision-72B</td>
|
| 451 |
+
<td>72B</td>
|
| 452 |
+
<td>55.4</td>
|
| 453 |
+
<td><strong>77.6</strong></td>
|
| 454 |
+
<td>-</td>
|
| 455 |
+
<td><u>66.2/69.5</u></td>
|
| 456 |
+
</tr>
|
| 457 |
+
<tr>
|
| 458 |
+
<td nowrap=""nowrap"" align=""left"">MANTIS 8B</td>
|
| 459 |
+
<td>8B</td>
|
| 460 |
+
<td>49.1</td>
|
| 461 |
+
<td>59.5</td>
|
| 462 |
+
<td>34.8</td>
|
| 463 |
+
<td>-</td>
|
| 464 |
+
</tr>
|
| 465 |
+
<tr>
|
| 466 |
+
<td nowrap=""nowrap"" align=""left"">Qwen2-VL-7B</td>
|
| 467 |
+
<td>8B</td>
|
| 468 |
+
<td>53.2</td>
|
| 469 |
+
<td>69.6*</td>
|
| 470 |
+
<td><strong>67.6*</strong></td>
|
| 471 |
+
<td>63.3/69.0</td>
|
| 472 |
+
</tr>
|
| 473 |
+
<tr>
|
| 474 |
+
<td nowrap=""nowrap"" align=""left"">InternVL2.5-8B</td>
|
| 475 |
+
<td>8B</td>
|
| 476 |
+
<td>54.8</td>
|
| 477 |
+
<td>67.7</td>
|
| 478 |
+
<td>52.5</td>
|
| 479 |
+
<td>64.2/66.9</td>
|
| 480 |
+
</tr>
|
| 481 |
+
<tr>
|
| 482 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-V 2.6</td>
|
| 483 |
+
<td>8B</td>
|
| 484 |
+
<td>53.0</td>
|
| 485 |
+
<td>69.1</td>
|
| 486 |
+
<td>53.8</td>
|
| 487 |
+
<td>60.9/63.6</td>
|
| 488 |
+
</tr>
|
| 489 |
+
<tr>
|
| 490 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-o 2.6</td>
|
| 491 |
+
<td>8B</td>
|
| 492 |
+
<td><u>56.7</u></td>
|
| 493 |
+
<td><u>71.9</u></td>
|
| 494 |
+
<td><u>58.6</u></td>
|
| 495 |
+
<td>63.9/67.9</td>
|
| 496 |
+
</tr>
|
| 497 |
+
</tbody>
|
| 498 |
+
</table>
|
| 499 |
+
|
| 500 |
+
</div>
|
| 501 |
+
* We evaluate officially released checkpoints by ourselves.
|
| 502 |
+
|
| 503 |
+
</details>
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
#### Audio understanding and speech conversation results.
|
| 507 |
+
|
| 508 |
+
**Audio Understanding:**
|
| 509 |
+
|
| 510 |
+
<div align=""center"">
|
| 511 |
+
<table style=""margin: 0px auto;"">
|
| 512 |
+
<thead>
|
| 513 |
+
<tr>
|
| 514 |
+
<th align=""left"">Task</th>
|
| 515 |
+
<th>Size</th>
|
| 516 |
+
<th colspan=""3"">ASR (zh)</th>
|
| 517 |
+
<th colspan=""3"">ASR (en)</th>
|
| 518 |
+
<th colspan=""2"">AST</th>
|
| 519 |
+
<th>Emotion</th>
|
| 520 |
+
</tr>
|
| 521 |
+
<tr>
|
| 522 |
+
<th align=""left"">Metric</th>
|
| 523 |
+
<td></td>
|
| 524 |
+
<th colspan=""3"">CER↓</th>
|
| 525 |
+
<th colspan=""3"">WER↓</th>
|
| 526 |
+
<th colspan=""2"">BLEU↑</th>
|
| 527 |
+
<th>ACC↑</th>
|
| 528 |
+
</tr>
|
| 529 |
+
<tr>
|
| 530 |
+
<th align=""left"">Dataset</th>
|
| 531 |
+
<td></td>
|
| 532 |
+
<th>AISHELL-1</th>
|
| 533 |
+
<th>Fleurs zh</th>
|
| 534 |
+
<th>WenetSpeech test-net</th>
|
| 535 |
+
<th>LibriSpeech test-clean</th>
|
| 536 |
+
<th>GigaSpeech</th>
|
| 537 |
+
<th>TED-LIUM</th>
|
| 538 |
+
<th>CoVoST en2zh</th>
|
| 539 |
+
<th>CoVoST zh2en</th>
|
| 540 |
+
<th>MELD emotion</th>
|
| 541 |
+
</tr>
|
| 542 |
+
</thead>
|
| 543 |
+
<tbody align=""center"">
|
| 544 |
+
<tr>
|
| 545 |
+
<td colspan=""11"" align=""left""><strong>Proprietary</strong></td>
|
| 546 |
+
</tr>
|
| 547 |
+
<tr>
|
| 548 |
+
<td nowrap=""nowrap"" align=""left"">GPT-4o-Realtime</td>
|
| 549 |
+
<td>-</td>
|
| 550 |
+
<td>7.3*</td>
|
| 551 |
+
<td><u>5.4*</u></td>
|
| 552 |
+
<td>28.9*</td>
|
| 553 |
+
<td>2.6*</td>
|
| 554 |
+
<td>12.9*</td>
|
| 555 |
+
<td>4.8*</td>
|
| 556 |
+
<td>37.1*</td>
|
| 557 |
+
<td>15.7*</td>
|
| 558 |
+
<td>33.2*</td>
|
| 559 |
+
</tr>
|
| 560 |
+
<tr>
|
| 561 |
+
<td nowrap=""nowrap"" align=""left"">Gemini 1.5 Pro</td>
|
| 562 |
+
<td>-</td>
|
| 563 |
+
<td>4.5*</td>
|
| 564 |
+
<td>5.9*</td>
|
| 565 |
+
<td>14.3*</td>
|
| 566 |
+
<td>2.9*</td>
|
| 567 |
+
<td>10.6*</td>
|
| 568 |
+
<td><strong>3.0*</strong></td>
|
| 569 |
+
<td><u>47.3*</u></td>
|
| 570 |
+
<td>22.6*</td>
|
| 571 |
+
<td>48.4*</td>
|
| 572 |
+
</tr>
|
| 573 |
+
<tr>
|
| 574 |
+
<td colspan=""11"" align=""left""><strong>Open-Source</strong></td>
|
| 575 |
+
</tr>
|
| 576 |
+
<tr>
|
| 577 |
+
<td nowrap=""nowrap"" align=""left"">Qwen2-Audio-7B</td>
|
| 578 |
+
<td>8B</td>
|
| 579 |
+
<td>-</td>
|
| 580 |
+
<td>7.5</td>
|
| 581 |
+
<td>-</td>
|
| 582 |
+
<td><strong>1.6</strong></td>
|
| 583 |
+
<td>-</td>
|
| 584 |
+
<td>-</td>
|
| 585 |
+
<td>45.2</td>
|
| 586 |
+
<td><u>24.4</u></td>
|
| 587 |
+
<td><strong>55.3</strong></td>
|
| 588 |
+
</tr>
|
| 589 |
+
<tr>
|
| 590 |
+
<td nowrap=""nowrap"" align=""left"">Qwen2-Audio-7B-Instruct</td>
|
| 591 |
+
<td>8B</td>
|
| 592 |
+
<td>2.6*</td>
|
| 593 |
+
<td>6.9*</td>
|
| 594 |
+
<td><u>10.3*</u></td>
|
| 595 |
+
<td>3.1*</td>
|
| 596 |
+
<td><u>9.7</u>*</td>
|
| 597 |
+
<td>5.9*</td>
|
| 598 |
+
<td>39.5*</td>
|
| 599 |
+
<td>22.9*</td>
|
| 600 |
+
<td>17.4*</td>
|
| 601 |
+
</tr>
|
| 602 |
+
<tr>
|
| 603 |
+
<td nowrap=""nowrap"" align=""left"">GLM-4-Voice-Base</td>
|
| 604 |
+
<td>9B</td>
|
| 605 |
+
<td><u>2.5</u></td>
|
| 606 |
+
<td>-</td>
|
| 607 |
+
<td>-</td>
|
| 608 |
+
<td>2.8</td>
|
| 609 |
+
<td>-</td>
|
| 610 |
+
<td>-</td>
|
| 611 |
+
<td>-</td>
|
| 612 |
+
<td>-</td>
|
| 613 |
+
</tr>
|
| 614 |
+
<tr>
|
| 615 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-o 2.6</td>
|
| 616 |
+
<td>8B</td>
|
| 617 |
+
<td><strong>1.6</strong></td>
|
| 618 |
+
<td><strong>4.4</strong></td>
|
| 619 |
+
<td><strong>6.9</strong></td>
|
| 620 |
+
<td><u>1.7</u></td>
|
| 621 |
+
<td><strong>8.7</strong></td>
|
| 622 |
+
<td><strong>3.0</strong></td>
|
| 623 |
+
<td><strong>48.2</strong></td>
|
| 624 |
+
<td><strong>27.2</strong></td>
|
| 625 |
+
<td><u>52.4</u></td>
|
| 626 |
+
</tr>
|
| 627 |
+
</tbody>
|
| 628 |
+
</table>
|
| 629 |
+
</div>
|
| 630 |
+
* We evaluate officially released checkpoints by ourselves.<br><br>
|
| 631 |
+
|
| 632 |
+
**Speech Generation:**
|
| 633 |
+
|
| 634 |
+
<div align=""center"">
|
| 635 |
+
<table style=""margin: 0px auto;"">
|
| 636 |
+
<thead>
|
| 637 |
+
<tr>
|
| 638 |
+
<th align=""left"">Task</th>
|
| 639 |
+
<th>Size</th>
|
| 640 |
+
<th colspan=""9"">SpeechQA</th>
|
| 641 |
+
</tr>
|
| 642 |
+
<tr>
|
| 643 |
+
<th align=""left"">Metric</th>
|
| 644 |
+
<th></th>
|
| 645 |
+
<th colspan=""3"">ACC↑</th>
|
| 646 |
+
<th>G-Eval (10 point)↑</th>
|
| 647 |
+
<th>Semantic ELO score↑</th>
|
| 648 |
+
<th>Acoustic ELO score↑</th>
|
| 649 |
+
<th>Overall ELO score↑</th>
|
| 650 |
+
<th>UTMOS↑</th>
|
| 651 |
+
<th>ASR-WER↓</th>
|
| 652 |
+
</tr>
|
| 653 |
+
<tr>
|
| 654 |
+
<th align=""left"">Dataset</th>
|
| 655 |
+
<th></th>
|
| 656 |
+
<th>Speech Llama Q.</th>
|
| 657 |
+
<th>Speech Web Q.</th>
|
| 658 |
+
<th>Speech Trivia QA</th>
|
| 659 |
+
<th>Speech AlpacaEval</th>
|
| 660 |
+
<th colspan=""5"">AudioArena</th>
|
| 661 |
+
</tr>
|
| 662 |
+
</thead>
|
| 663 |
+
<tbody align=""center"">
|
| 664 |
+
<tr>
|
| 665 |
+
<td colspan=""11"" align=""left""><strong>Proprietary</strong></td>
|
| 666 |
+
</tr>
|
| 667 |
+
<tr>
|
| 668 |
+
<td nowrap=""nowrap"" align=""left"">GPT-4o-Realtime</td>
|
| 669 |
+
<td></td>
|
| 670 |
+
<td><strong>71.7</strong></td>
|
| 671 |
+
<td><strong>51.6</strong></td>
|
| 672 |
+
<td><strong>69.7</strong></td>
|
| 673 |
+
<td><strong>7.4</strong></td>
|
| 674 |
+
<td><strong>1157</strong></td>
|
| 675 |
+
<td><strong>1203</strong></td>
|
| 676 |
+
<td><strong>1200</strong></td>
|
| 677 |
+
<td><strong>4.2</strong></td>
|
| 678 |
+
<td><strong>2.3</strong></td>
|
| 679 |
+
</tr>
|
| 680 |
+
<tr>
|
| 681 |
+
<td colspan=""11"" align=""left""><strong>Open-Source</strong></td>
|
| 682 |
+
</tr>
|
| 683 |
+
<tr>
|
| 684 |
+
<td nowrap=""nowrap"" align=""left"">GLM-4-Voice</td>
|
| 685 |
+
<td>9B</td>
|
| 686 |
+
<td>50.0</td>
|
| 687 |
+
<td>32.0</td>
|
| 688 |
+
<td>36.4</td>
|
| 689 |
+
<td><u>5.1</u></td>
|
| 690 |
+
<td>999</td>
|
| 691 |
+
<td>1147</td>
|
| 692 |
+
<td>1035</td>
|
| 693 |
+
<td><u>4.1</u></td>
|
| 694 |
+
<td><u>11.7</u></td>
|
| 695 |
+
</tr>
|
| 696 |
+
<tr>
|
| 697 |
+
<td nowrap=""nowrap"" align=""left"">Llama-Omni</td>
|
| 698 |
+
<td>8B</td>
|
| 699 |
+
<td>45.3</td>
|
| 700 |
+
<td>22.9</td>
|
| 701 |
+
<td>10.7</td>
|
| 702 |
+
<td>3.9</td>
|
| 703 |
+
<td>960</td>
|
| 704 |
+
<td>878</td>
|
| 705 |
+
<td>897</td>
|
| 706 |
+
<td>3.2</td>
|
| 707 |
+
<td>24.3</td>
|
| 708 |
+
</tr>
|
| 709 |
+
<tr>
|
| 710 |
+
<td nowrap=""nowrap"" align=""left"">Moshi</td>
|
| 711 |
+
<td>7B</td>
|
| 712 |
+
<td>43.7</td>
|
| 713 |
+
<td>23.8</td>
|
| 714 |
+
<td>16.7</td>
|
| 715 |
+
<td>2.4</td>
|
| 716 |
+
<td>871</td>
|
| 717 |
+
<td>808</td>
|
| 718 |
+
<td>875</td>
|
| 719 |
+
<td>2.8</td>
|
| 720 |
+
<td>8.2</td>
|
| 721 |
+
</tr>
|
| 722 |
+
<tr>
|
| 723 |
+
<td nowrap=""nowrap"" align=""left"">Mini-Omni</td>
|
| 724 |
+
<td>1B</td>
|
| 725 |
+
<td>22.0</td>
|
| 726 |
+
<td>12.8</td>
|
| 727 |
+
<td>6.9</td>
|
| 728 |
+
<td>2.5</td>
|
| 729 |
+
<td>926</td>
|
| 730 |
+
<td>803</td>
|
| 731 |
+
<td>865</td>
|
| 732 |
+
<td>3.4</td>
|
| 733 |
+
<td>10.0</td>
|
| 734 |
+
</tr>
|
| 735 |
+
<tr>
|
| 736 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-o 2.6</td>
|
| 737 |
+
<td>8B</td>
|
| 738 |
+
<td><u>61.0</u></td>
|
| 739 |
+
<td><u>40.0</u></td>
|
| 740 |
+
<td><u>40.2</u></td>
|
| 741 |
+
<td><u>5.1</u></td>
|
| 742 |
+
<td><u>1088</u></td>
|
| 743 |
+
<td><u>1163</u></td>
|
| 744 |
+
<td><u>1131</u></td>
|
| 745 |
+
<td><strong>4.2</strong></td>
|
| 746 |
+
<td>9.8</td>
|
| 747 |
+
</tr>
|
| 748 |
+
</tbody>
|
| 749 |
+
</table>
|
| 750 |
+
</div>
|
| 751 |
+
All results are from AudioEvals, and the evaluation methods along with further details can be found in <a href=""https://github.com/OpenBMB/UltraEval-Audio"" target=""_blank"">UltraEval-Audio</a>.<br><br>
|
| 752 |
+
|
| 753 |
+
**End-to-end Voice Cloning**
|
| 754 |
+
|
| 755 |
+
<div align=""center"">
|
| 756 |
+
<table style=""margin: 0px auto;"">
|
| 757 |
+
<thead>
|
| 758 |
+
<tr>
|
| 759 |
+
<th align=""left"">Task</th>
|
| 760 |
+
<th colspan=""2"">Voice cloning</th>
|
| 761 |
+
</tr>
|
| 762 |
+
<tr>
|
| 763 |
+
<th align=""left"">Metric</th>
|
| 764 |
+
<th>SIMO↑</th>
|
| 765 |
+
<th>SIMO↑</th>
|
| 766 |
+
</tr>
|
| 767 |
+
<tr>
|
| 768 |
+
<th align=""left"">Dataset</th>
|
| 769 |
+
<th>Seed-TTS test-zh</th>
|
| 770 |
+
<th>Seed-TTS test-en</th>
|
| 771 |
+
</tr>
|
| 772 |
+
</thead>
|
| 773 |
+
<tbody align=""center"">
|
| 774 |
+
<tr>
|
| 775 |
+
<td nowrap=""nowrap"" align=""left"">F5-TTS</td>
|
| 776 |
+
<td><strong>76</strong></td>
|
| 777 |
+
<td><strong>67</strong></td>
|
| 778 |
+
</tr>
|
| 779 |
+
<tr>
|
| 780 |
+
<td nowrap=""nowrap"" align=""left"">CosyVoice</td>
|
| 781 |
+
<td><u>75</u></td>
|
| 782 |
+
<td><u>64</u></td>
|
| 783 |
+
</tr>
|
| 784 |
+
<tr>
|
| 785 |
+
<td nowrap=""nowrap"" align=""left"">FireRedTTS</td>
|
| 786 |
+
<td>63</td>
|
| 787 |
+
<td>46</td>
|
| 788 |
+
</tr>
|
| 789 |
+
<tr>
|
| 790 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-o 2.6</td>
|
| 791 |
+
<td>57</td>
|
| 792 |
+
<td>47</td>
|
| 793 |
+
</tr>
|
| 794 |
+
</tbody>
|
| 795 |
+
</table>
|
| 796 |
+
</div>
|
| 797 |
+
|
| 798 |
+
|
| 799 |
+
#### Multimodal live streaming results.
|
| 800 |
+
|
| 801 |
+
**Multimodal Live Streaming:** results on StreamingBench
|
| 802 |
+
|
| 803 |
+
<table style=""margin: 0px auto;"">
|
| 804 |
+
<thead>
|
| 805 |
+
<tr>
|
| 806 |
+
<th align=""left"">Model</th>
|
| 807 |
+
<th>Size</th>
|
| 808 |
+
<th>Real-Time Video Understanding</th>
|
| 809 |
+
<th>Omni-Source Understanding</th>
|
| 810 |
+
<th>Contextual Understanding</th>
|
| 811 |
+
<th>Overall</th>
|
| 812 |
+
</tr>
|
| 813 |
+
</thead>
|
| 814 |
+
<tbody align=""center"">
|
| 815 |
+
<tr>
|
| 816 |
+
<td colspan=""7"" align=""left""><strong>Proprietary</strong></td>
|
| 817 |
+
</tr>
|
| 818 |
+
<tr>
|
| 819 |
+
<td nowrap=""nowrap"" align=""left"">Gemini 1.5 Pro</td>
|
| 820 |
+
<td>-</td>
|
| 821 |
+
<td><u>77.4</u></td>
|
| 822 |
+
<td><strong>67.8</strong></td>
|
| 823 |
+
<td><strong>51.1</strong></td>
|
| 824 |
+
<td><strong>70.3</strong></td>
|
| 825 |
+
</tr>
|
| 826 |
+
<tr>
|
| 827 |
+
<td nowrap=""nowrap"" align=""left"">GPT-4o-202408</td>
|
| 828 |
+
<td>-</td>
|
| 829 |
+
<td>74.5</td>
|
| 830 |
+
<td>51.0</td>
|
| 831 |
+
<td><u>48.0</u></td>
|
| 832 |
+
<td>64.1</td>
|
| 833 |
+
</tr>
|
| 834 |
+
<tr>
|
| 835 |
+
<td nowrap=""nowrap"" align=""left"">Claude-3.5-Sonnet</td>
|
| 836 |
+
<td>-</td>
|
| 837 |
+
<td>74.0</td>
|
| 838 |
+
<td>41.4</td>
|
| 839 |
+
<td>37.8</td>
|
| 840 |
+
<td>59.7</td>
|
| 841 |
+
</tr>
|
| 842 |
+
<tr>
|
| 843 |
+
<td colspan=""9"" align=""left""><strong>Open-source</strong></td>
|
| 844 |
+
</tr>
|
| 845 |
+
<tr>
|
| 846 |
+
<td nowrap=""nowrap"" align=""left"">VILA-1.5</td>
|
| 847 |
+
<td>8B</td>
|
| 848 |
+
<td>61.5</td>
|
| 849 |
+
<td>37.5</td>
|
| 850 |
+
<td>26.7</td>
|
| 851 |
+
<td>49.5</td>
|
| 852 |
+
</tr>
|
| 853 |
+
<tr>
|
| 854 |
+
<td nowrap=""nowrap"" align=""left"">LongVA</td>
|
| 855 |
+
<td>7B</td>
|
| 856 |
+
<td>63.1</td>
|
| 857 |
+
<td>35.9</td>
|
| 858 |
+
<td>30.2</td>
|
| 859 |
+
<td>50.7</td>
|
| 860 |
+
</tr>
|
| 861 |
+
<tr>
|
| 862 |
+
<td nowrap=""nowrap"" align=""left"">LLaVA-Next-Video-34B</td>
|
| 863 |
+
<td>34B</td>
|
| 864 |
+
<td>69.8</td>
|
| 865 |
+
<td>41.7</td>
|
| 866 |
+
<td>34.3</td>
|
| 867 |
+
<td>56.7</td>
|
| 868 |
+
</tr>
|
| 869 |
+
<tr>
|
| 870 |
+
<td nowrap=""nowrap"" align=""left"">Qwen2-VL-7B</td>
|
| 871 |
+
<td>8B</td>
|
| 872 |
+
<td>71.2</td>
|
| 873 |
+
<td>40.7</td>
|
| 874 |
+
<td>33.1</td>
|
| 875 |
+
<td>57.0</td>
|
| 876 |
+
</tr>
|
| 877 |
+
<tr>
|
| 878 |
+
<td nowrap=""nowrap"" align=""left"">InternVL2-8B</td>
|
| 879 |
+
<td>8B</td>
|
| 880 |
+
<td>70.1</td>
|
| 881 |
+
<td>42.7</td>
|
| 882 |
+
<td>34.1</td>
|
| 883 |
+
<td>57.0</td>
|
| 884 |
+
</tr>
|
| 885 |
+
<tr>
|
| 886 |
+
<td nowrap=""nowrap"" align=""left"">VITA-1.5</td>
|
| 887 |
+
<td>8B</td>
|
| 888 |
+
<td>70.9</td>
|
| 889 |
+
<td>40.8</td>
|
| 890 |
+
<td>35.8</td>
|
| 891 |
+
<td>57.4</td>
|
| 892 |
+
</tr>
|
| 893 |
+
<tr>
|
| 894 |
+
<td nowrap=""nowrap"" align=""left"">LLaVA-OneVision-7B</td>
|
| 895 |
+
<td>8B</td>
|
| 896 |
+
<td>74.3</td>
|
| 897 |
+
<td>40.8</td>
|
| 898 |
+
<td>31.0</td>
|
| 899 |
+
<td>58.4</td>
|
| 900 |
+
</tr>
|
| 901 |
+
<tr>
|
| 902 |
+
<td nowrap=""nowrap"" align=""left"">InternLM-XC2.5-OL-7B</td>
|
| 903 |
+
<td>8B</td>
|
| 904 |
+
<td>75.4</td>
|
| 905 |
+
<td>46.2</td>
|
| 906 |
+
<td>33.6</td>
|
| 907 |
+
<td>60.8</td>
|
| 908 |
+
</tr>
|
| 909 |
+
<tr>
|
| 910 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-V 2.6</td>
|
| 911 |
+
<td>8B</td>
|
| 912 |
+
<td>72.4</td>
|
| 913 |
+
<td>40.2</td>
|
| 914 |
+
<td>33.4</td>
|
| 915 |
+
<td>57.7</td>
|
| 916 |
+
</tr>
|
| 917 |
+
<tr>
|
| 918 |
+
<td nowrap=""nowrap"" align=""left"">MiniCPM-o 2.6</td>
|
| 919 |
+
<td>8B</td>
|
| 920 |
+
<td><strong>79.9</strong></td>
|
| 921 |
+
<td><u>53.4</u></td>
|
| 922 |
+
<td>38.5</td>
|
| 923 |
+
<td><u>66.0</u></td>
|
| 924 |
+
</tr>
|
| 925 |
+
</tbody>
|
| 926 |
+
</table>
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
### Examples <!-- omit in toc -->
|
| 931 |
+
|
| 932 |
+
We deploy MiniCPM-o 2.6 on end devices. The demo video is the raw-speed recording on an iPad Pro and a Web demo.
|
| 933 |
+
|
| 934 |
+
<div align=""center"">
|
| 935 |
+
<a href=""https://youtu.be/JFJg9KZ_iZk""><img src=""https://github.com/OpenBMB/MiniCPM-o/raw/main/assets/o-2dot6-demo-video-preview.png"", width=70%></a>
|
| 936 |
+
</div>
|
| 937 |
+
|
| 938 |
+
<br>
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
<div style=""display: flex; flex-direction: column; align-items: center;"">
|
| 942 |
+
<img src=""https://github.com/OpenBMB/MiniCPM-o/raw/main/assets/minicpmo2_6/minicpmo2_6_math_intersect.png"" alt=""math"" style=""margin-bottom: 5px;"">
|
| 943 |
+
<img src=""https://github.com/OpenBMB/MiniCPM-o/raw/main/assets/minicpmo2_6/minicpmo2_6_diagram_train_NN.png"" alt=""diagram"" style=""margin-bottom: 5px;"">
|
| 944 |
+
<img src=""https://github.com/OpenBMB/MiniCPM-o/raw/main/assets/minicpmo2_6/minicpmo2_6_multi-image_bike.png"" alt=""bike"" style=""margin-bottom: 5px;"">
|
| 945 |
+
</div>
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
## Online Demo
|
| 951 |
+
Click here to try the online demo of [MiniCPM-o 2.6](https://minicpm-omni-webdemo-us.modelbest.cn).
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
## Usage
|
| 955 |
+
Inference using Huggingface transformers on NVIDIA GPUs. Please ensure that `transformers==4.44.2` is installed, as other versions may have compatibility issues. We are investigating this issue. Requirements tested on python 3.10:
|
| 956 |
+
```
|
| 957 |
+
Pillow==10.1.0
|
| 958 |
+
torch==2.3.1
|
| 959 |
+
torchaudio==2.3.1
|
| 960 |
+
torchvision==0.18.1
|
| 961 |
+
transformers==4.44.2
|
| 962 |
+
librosa==0.9.0
|
| 963 |
+
soundfile==0.12.1
|
| 964 |
+
vector-quantize-pytorch==1.18.5
|
| 965 |
+
vocos==0.1.0
|
| 966 |
+
decord
|
| 967 |
+
moviepy
|
| 968 |
+
```
|
| 969 |
+
|
| 970 |
+
|
| 971 |
+
### Model initialization
|
| 972 |
+
```python
|
| 973 |
+
|
| 974 |
+
import torch
|
| 975 |
+
from PIL import Image
|
| 976 |
+
from transformers import AutoModel, AutoTokenizer
|
| 977 |
+
|
| 978 |
+
# load omni model default, the default init_vision/init_audio/init_tts is True
|
| 979 |
+
# if load vision-only model, please set init_audio=False and init_tts=False
|
| 980 |
+
# if load audio-only model, please set init_vision=False
|
| 981 |
+
model = AutoModel.from_pretrained(
|
| 982 |
+
'openbmb/MiniCPM-o-2_6',
|
| 983 |
+
trust_remote_code=True,
|
| 984 |
+
attn_implementation='sdpa', # sdpa or flash_attention_2
|
| 985 |
+
torch_dtype=torch.bfloat16,
|
| 986 |
+
init_vision=True,
|
| 987 |
+
init_audio=True,
|
| 988 |
+
init_tts=True
|
| 989 |
+
)
|
| 990 |
+
|
| 991 |
+
|
| 992 |
+
model = model.eval().cuda()
|
| 993 |
+
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-o-2_6', trust_remote_code=True)
|
| 994 |
+
|
| 995 |
+
# In addition to vision-only mode, tts processor and vocos also needs to be initialized
|
| 996 |
+
model.init_tts()
|
| 997 |
+
```
|
| 998 |
+
|
| 999 |
+
If you are using an older version of PyTorch, you might encounter this issue `""weight_norm_fwd_first_dim_kernel"" not implemented for 'BFloat16'`, Please convert the TTS to float32 type.
|
| 1000 |
+
```python
|
| 1001 |
+
model.tts.float()
|
| 1002 |
+
```
|
| 1003 |
+
|
| 1004 |
+
### Omni mode
|
| 1005 |
+
We provide two inference modes: chat and streaming
|
| 1006 |
+
|
| 1007 |
+
#### Chat inference
|
| 1008 |
+
```python
|
| 1009 |
+
import math
|
| 1010 |
+
import numpy as np
|
| 1011 |
+
from PIL import Image
|
| 1012 |
+
from moviepy.editor import VideoFileClip
|
| 1013 |
+
import tempfile
|
| 1014 |
+
import librosa
|
| 1015 |
+
import soundfile as sf
|
| 1016 |
+
|
| 1017 |
+
def get_video_chunk_content(video_path, flatten=True):
|
| 1018 |
+
video = VideoFileClip(video_path)
|
| 1019 |
+
print('video_duration:', video.duration)
|
| 1020 |
+
|
| 1021 |
+
with tempfile.NamedTemporaryFile(suffix="".wav"", delete=True) as temp_audio_file:
|
| 1022 |
+
temp_audio_file_path = temp_audio_file.name
|
| 1023 |
+
video.audio.write_audiofile(temp_audio_file_path, codec=""pcm_s16le"", fps=16000)
|
| 1024 |
+
audio_np, sr = librosa.load(temp_audio_file_path, sr=16000, mono=True)
|
| 1025 |
+
num_units = math.ceil(video.duration)
|
| 1026 |
+
|
| 1027 |
+
# 1 frame + 1s audio chunk
|
| 1028 |
+
contents= []
|
| 1029 |
+
for i in range(num_units):
|
| 1030 |
+
frame = video.get_frame(i+1)
|
| 1031 |
+
image = Image.fromarray((frame).astype(np.uint8))
|
| 1032 |
+
audio = audio_np[sr*i:sr*(i+1)]
|
| 1033 |
+
if flatten:
|
| 1034 |
+
contents.extend([""<unit>"", image, audio])
|
| 1035 |
+
else:
|
| 1036 |
+
contents.append([""<unit>"", image, audio])
|
| 1037 |
+
|
| 1038 |
+
return contents
|
| 1039 |
+
|
| 1040 |
+
video_path=""assets/Skiing.mp4""
|
| 1041 |
+
# if use voice clone prompt, please set ref_audio
|
| 1042 |
+
ref_audio_path = 'assets/demo.wav'
|
| 1043 |
+
ref_audio, _ = librosa.load(ref_audio_path, sr=16000, mono=True)
|
| 1044 |
+
sys_msg = model.get_sys_prompt(ref_audio=ref_audio, mode='omni', language='en')
|
| 1045 |
+
# or use default prompt
|
| 1046 |
+
# sys_msg = model.get_sys_prompt(mode='omni', language='en')
|
| 1047 |
+
|
| 1048 |
+
contents = get_video_chunk_content(video_path)
|
| 1049 |
+
msg = {""role"":""user"", ""content"": contents}
|
| 1050 |
+
msgs = [sys_msg, msg]
|
| 1051 |
+
|
| 1052 |
+
# please set generate_audio=True and output_audio_path to save the tts result
|
| 1053 |
+
generate_audio = True
|
| 1054 |
+
output_audio_path = 'output.wav'
|
| 1055 |
+
|
| 1056 |
+
res = model.chat(
|
| 1057 |
+
msgs=msgs,
|
| 1058 |
+
tokenizer=tokenizer,
|
| 1059 |
+
sampling=True,
|
| 1060 |
+
temperature=0.5,
|
| 1061 |
+
max_new_tokens=4096,
|
| 1062 |
+
omni_input=True, # please set omni_input=True when omni inference
|
| 1063 |
+
use_tts_template=True,
|
| 1064 |
+
generate_audio=generate_audio,
|
| 1065 |
+
output_audio_path=output_audio_path,
|
| 1066 |
+
max_slice_nums=1,
|
| 1067 |
+
use_image_id=False,
|
| 1068 |
+
return_dict=True
|
| 1069 |
+
)
|
| 1070 |
+
print(res)
|
| 1071 |
+
|
| 1072 |
+
## You will get the answer: The person in the picture is skiing down a snowy slope.
|
| 1073 |
+
# import IPython
|
| 1074 |
+
# IPython.display.Audio('output.wav')
|
| 1075 |
+
|
| 1076 |
+
```
|
| 1077 |
+
#### Streaming inference
|
| 1078 |
+
```python
|
| 1079 |
+
# a new conversation need reset session first, it will reset the kv-cache
|
| 1080 |
+
model.reset_session()
|
| 1081 |
+
|
| 1082 |
+
contents = get_video_chunk_content(video_path, flatten=False)
|
| 1083 |
+
session_id = '123'
|
| 1084 |
+
generate_audio = True
|
| 1085 |
+
|
| 1086 |
+
# 1. prefill system prompt
|
| 1087 |
+
res = model.streaming_prefill(
|
| 1088 |
+
session_id=session_id,
|
| 1089 |
+
msgs=[sys_msg],
|
| 1090 |
+
tokenizer=tokenizer
|
| 1091 |
+
)
|
| 1092 |
+
|
| 1093 |
+
# 2. prefill video/audio chunks
|
| 1094 |
+
for content in contents:
|
| 1095 |
+
msgs = [{""role"":""user"", ""content"": content}]
|
| 1096 |
+
res = model.streaming_prefill(
|
| 1097 |
+
session_id=session_id,
|
| 1098 |
+
msgs=msgs,
|
| 1099 |
+
tokenizer=tokenizer
|
| 1100 |
+
)
|
| 1101 |
+
|
| 1102 |
+
# 3. generate
|
| 1103 |
+
res = model.streaming_generate(
|
| 1104 |
+
session_id=session_id,
|
| 1105 |
+
tokenizer=tokenizer,
|
| 1106 |
+
temperature=0.5,
|
| 1107 |
+
generate_audio=generate_audio
|
| 1108 |
+
)
|
| 1109 |
+
|
| 1110 |
+
audios = []
|
| 1111 |
+
text = """"
|
| 1112 |
+
|
| 1113 |
+
if generate_audio:
|
| 1114 |
+
for r in res:
|
| 1115 |
+
audio_wav = r.audio_wav
|
| 1116 |
+
sampling_rate = r.sampling_rate
|
| 1117 |
+
txt = r.text
|
| 1118 |
+
|
| 1119 |
+
audios.append(audio_wav)
|
| 1120 |
+
text += txt
|
| 1121 |
+
|
| 1122 |
+
res = np.concatenate(audios)
|
| 1123 |
+
sf.write(""output.wav"", res, samplerate=sampling_rate)
|
| 1124 |
+
print(""text:"", text)
|
| 1125 |
+
print(""audio saved to output.wav"")
|
| 1126 |
+
else:
|
| 1127 |
+
for r in res:
|
| 1128 |
+
text += r['text']
|
| 1129 |
+
print(""text:"", text)
|
| 1130 |
+
|
| 1131 |
+
```
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
### Speech and Audio Mode
|
| 1135 |
+
|
| 1136 |
+
Model initialization
|
| 1137 |
+
|
| 1138 |
+
```python
|
| 1139 |
+
import torch
|
| 1140 |
+
import librosa
|
| 1141 |
+
from transformers import AutoModel, AutoTokenizer
|
| 1142 |
+
|
| 1143 |
+
model = AutoModel.from_pretrained('openbmb/MiniCPM-o-2_6', trust_remote_code=True,
|
| 1144 |
+
attn_implementation='sdpa', torch_dtype=torch.bfloat16) # sdpa or flash_attention_2, no eager
|
| 1145 |
+
model = model.eval().cuda()
|
| 1146 |
+
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-o-2_6', trust_remote_code=True)
|
| 1147 |
+
|
| 1148 |
+
model.init_tts()
|
| 1149 |
+
model.tts.float()
|
| 1150 |
+
```
|
| 1151 |
+
|
| 1152 |
+
<hr/>
|
| 1153 |
+
|
| 1154 |
+
#### Mimick
|
| 1155 |
+
|
| 1156 |
+
`Mimick` task reflects a model's end-to-end speech modeling capability. The model takes audio input, and outputs an ASR transcription and subsequently reconstructs the original audio with high similarity. The higher the similarity between the reconstructed audio and the original audio, the stronger the model's foundational capability in end-to-end speech modeling.
|
| 1157 |
+
|
| 1158 |
+
```python
|
| 1159 |
+
mimick_prompt = ""Please repeat each user's speech, including voice style and speech content.""
|
| 1160 |
+
audio_input, _ = librosa.load('./assets/input_examples/Trump_WEF_2018_10s.mp3', sr=16000, mono=True) # load the audio to be mimicked
|
| 1161 |
+
|
| 1162 |
+
# can also try `./assets/input_examples/cxk_original.wav`,
|
| 1163 |
+
# `./assets/input_examples/fast-pace.wav`,
|
| 1164 |
+
# `./assets/input_examples/chi-english-1.wav`
|
| 1165 |
+
# `./assets/input_examples/exciting-emotion.wav`
|
| 1166 |
+
# for different aspects of speech-centric features.
|
| 1167 |
+
|
| 1168 |
+
msgs = [{'role': 'user', 'content': [mimick_prompt, audio_input]}]
|
| 1169 |
+
res = model.chat(
|
| 1170 |
+
msgs=msgs,
|
| 1171 |
+
tokenizer=tokenizer,
|
| 1172 |
+
sampling=True,
|
| 1173 |
+
max_new_tokens=128,
|
| 1174 |
+
use_tts_template=True,
|
| 1175 |
+
temperature=0.3,
|
| 1176 |
+
generate_audio=True,
|
| 1177 |
+
output_audio_path='output_mimick.wav', # save the tts result to output_audio_path
|
| 1178 |
+
)
|
| 1179 |
+
```
|
| 1180 |
+
|
| 1181 |
+
<hr/>
|
| 1182 |
+
|
| 1183 |
+
#### General Speech Conversation with Configurable Voices
|
| 1184 |
+
|
| 1185 |
+
A general usage scenario of `MiniCPM-o-2.6` is role-playing a specific character based on the audio prompt. It will mimic the voice of the character to some extent and act like the character in text, including language style. In this mode, `MiniCPM-o-2.6` sounds **more natural and human-like**. Self-defined audio prompts can be used to customize the voice of the character in an end-to-end manner.
|
| 1186 |
+
|
| 1187 |
+
|
| 1188 |
+
```python
|
| 1189 |
+
ref_audio, _ = librosa.load('./assets/input_examples/icl_20.wav', sr=16000, mono=True) # load the reference audio
|
| 1190 |
+
sys_prompt = model.get_sys_prompt(ref_audio=ref_audio, mode='audio_roleplay', language='en')
|
| 1191 |
+
|
| 1192 |
+
# round one
|
| 1193 |
+
user_question = {'role': 'user', 'content': [librosa.load('xxx.wav', sr=16000, mono=True)[0]]}
|
| 1194 |
+
msgs = [sys_prompt, user_question]
|
| 1195 |
+
res = model.chat(
|
| 1196 |
+
msgs=msgs,
|
| 1197 |
+
tokenizer=tokenizer,
|
| 1198 |
+
sampling=True,
|
| 1199 |
+
max_new_tokens=128,
|
| 1200 |
+
use_tts_template=True,
|
| 1201 |
+
generate_audio=True,
|
| 1202 |
+
temperature=0.3,
|
| 1203 |
+
output_audio_path='result_roleplay_round_1.wav',
|
| 1204 |
+
)
|
| 1205 |
+
|
| 1206 |
+
# round two
|
| 1207 |
+
history = msgs.append({'role': 'assistant', 'content': res})
|
| 1208 |
+
user_question = {'role': 'user', 'content': [librosa.load('xxx.wav', sr=16000, mono=True)[0]]}
|
| 1209 |
+
msgs = history.append(user_question)
|
| 1210 |
+
res = model.chat(
|
| 1211 |
+
msgs=msgs,
|
| 1212 |
+
tokenizer=tokenizer,
|
| 1213 |
+
sampling=True,
|
| 1214 |
+
max_new_tokens=128,
|
| 1215 |
+
use_tts_template=True,
|
| 1216 |
+
generate_audio=True,
|
| 1217 |
+
temperature=0.3,
|
| 1218 |
+
output_audio_path='result_roleplay_round_2.wav',
|
| 1219 |
+
)
|
| 1220 |
+
print(res)
|
| 1221 |
+
```
|
| 1222 |
+
|
| 1223 |
+
<hr/>
|
| 1224 |
+
|
| 1225 |
+
#### Speech Conversation as an AI Assistant
|
| 1226 |
+
|
| 1227 |
+
An enhanced feature of `MiniCPM-o-2.6` is to act as an AI assistant, but only with limited choice of voices. In this mode, `MiniCPM-o-2.6` is **less human-like and more like a voice assistant**. In this mode, the model is more instruction-following. For demo, you are suggested to use `assistant_female_voice`, `assistant_male_voice`, and `assistant_default_female_voice`. Other voices may work but not as stable as the default voices.
|
| 1228 |
+
|
| 1229 |
+
*Please note that, `assistant_female_voice` and `assistant_male_voice` are more stable but sounds like robots, while `assistant_default_female_voice` is more human-alike but not stable, its voice often changes in multiple turns. We suggest you to try stable voices `assistant_female_voice` and `assistant_male_voice`.*
|
| 1230 |
+
|
| 1231 |
+
```python
|
| 1232 |
+
ref_audio, _ = librosa.load('./assets/input_examples/assistant_female_voice.wav', sr=16000, mono=True) # or use `./assets/input_examples/assistant_male_voice.wav`
|
| 1233 |
+
sys_prompt = model.get_sys_prompt(ref_audio=ref_audio, mode='audio_assistant', language='en')
|
| 1234 |
+
user_question = {'role': 'user', 'content': [librosa.load('xxx.wav', sr=16000, mono=True)[0]]} # load the user's audio question
|
| 1235 |
+
|
| 1236 |
+
# round one
|
| 1237 |
+
msgs = [sys_prompt, user_question]
|
| 1238 |
+
res = model.chat(
|
| 1239 |
+
msgs=msgs,
|
| 1240 |
+
tokenizer=tokenizer,
|
| 1241 |
+
sampling=True,
|
| 1242 |
+
max_new_tokens=128,
|
| 1243 |
+
use_tts_template=True,
|
| 1244 |
+
generate_audio=True,
|
| 1245 |
+
temperature=0.3,
|
| 1246 |
+
output_audio_path='result_assistant_round_1.wav',
|
| 1247 |
+
)
|
| 1248 |
+
|
| 1249 |
+
# round two
|
| 1250 |
+
history = msgs.append({'role': 'assistant', 'content': res})
|
| 1251 |
+
user_question = {'role': 'user', 'content': [librosa.load('xxx.wav', sr=16000, mono=True)[0]]}
|
| 1252 |
+
msgs = history.append(user_question)
|
| 1253 |
+
res = model.chat(
|
| 1254 |
+
msgs=msgs,
|
| 1255 |
+
tokenizer=tokenizer,
|
| 1256 |
+
sampling=True,
|
| 1257 |
+
max_new_tokens=128,
|
| 1258 |
+
use_tts_template=True,
|
| 1259 |
+
generate_audio=True,
|
| 1260 |
+
temperature=0.3,
|
| 1261 |
+
output_audio_path='result_assistant_round_2.wav',
|
| 1262 |
+
)
|
| 1263 |
+
print(res)
|
| 1264 |
+
```
|
| 1265 |
+
|
| 1266 |
+
<hr/>
|
| 1267 |
+
|
| 1268 |
+
#### Instruction-to-Speech
|
| 1269 |
+
|
| 1270 |
+
`MiniCPM-o-2.6` can also do Instruction-to-Speech, aka **Voice Creation**. You can describe a voice in detail, and the model will generate a voice that matches the description. For more Instruction-to-Speech sample instructions, you can refer to https://voxinstruct.github.io/VoxInstruct/.
|
| 1271 |
+
|
| 1272 |
+
```python
|
| 1273 |
+
instruction = 'Speak like a male charming superstar, radiating confidence and style in every word.'
|
| 1274 |
+
|
| 1275 |
+
msgs = [{'role': 'user', 'content': [instruction]}]
|
| 1276 |
+
|
| 1277 |
+
res = model.chat(
|
| 1278 |
+
msgs=msgs,
|
| 1279 |
+
tokenizer=tokenizer,
|
| 1280 |
+
sampling=True,
|
| 1281 |
+
max_new_tokens=128,
|
| 1282 |
+
use_tts_template=True,
|
| 1283 |
+
generate_audio=True,
|
| 1284 |
+
temperature=0.3,
|
| 1285 |
+
output_audio_path='result_voice_creation.wav',
|
| 1286 |
+
)
|
| 1287 |
+
```
|
| 1288 |
+
|
| 1289 |
+
<hr/>
|
| 1290 |
+
|
| 1291 |
+
#### Voice Cloning
|
| 1292 |
+
|
| 1293 |
+
`MiniCPM-o-2.6` can also do zero-shot text-to-speech, aka **Voice Cloning**. With this mode, model will act like a TTS model.
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
```python
|
| 1297 |
+
ref_audio, _ = librosa.load('./assets/input_examples/icl_20.wav', sr=16000, mono=True) # load the reference audio
|
| 1298 |
+
sys_prompt = model.get_sys_prompt(ref_audio=ref_audio, mode='voice_cloning', language='en')
|
| 1299 |
+
text_prompt = f""Please read the text below.""
|
| 1300 |
+
user_question = {'role': 'user', 'content': [text_prompt, ""content that you want to read""]}
|
| 1301 |
+
|
| 1302 |
+
msgs = [sys_prompt, user_question]
|
| 1303 |
+
res = model.chat(
|
| 1304 |
+
msgs=msgs,
|
| 1305 |
+
tokenizer=tokenizer,
|
| 1306 |
+
sampling=True,
|
| 1307 |
+
max_new_tokens=128,
|
| 1308 |
+
use_tts_template=True,
|
| 1309 |
+
generate_audio=True,
|
| 1310 |
+
temperature=0.3,
|
| 1311 |
+
output_audio_path='result_voice_cloning.wav',
|
| 1312 |
+
)
|
| 1313 |
+
|
| 1314 |
+
```
|
| 1315 |
+
|
| 1316 |
+
<hr/>
|
| 1317 |
+
|
| 1318 |
+
#### Addressing Various Audio Understanding Tasks
|
| 1319 |
+
|
| 1320 |
+
`MiniCPM-o-2.6` can also be used to address various audio understanding tasks, such as ASR, speaker analysis, general audio captioning, and sound scene tagging.
|
| 1321 |
+
|
| 1322 |
+
For audio-to-text tasks, you can use the following prompts:
|
| 1323 |
+
|
| 1324 |
+
- ASR with ZH(same as AST en2zh): `请仔细听这段音频片段,并将其内容逐字记录。`
|
| 1325 |
+
- ASR with EN(same as AST zh2en): `Please listen to the audio snippet carefully and transcribe the content.`
|
| 1326 |
+
- Speaker Analysis: `Based on the speaker's content, speculate on their gender, condition, age range, and health status.`
|
| 1327 |
+
- General Audio Caption: `Summarize the main content of the audio.`
|
| 1328 |
+
- General Sound Scene Tagging: `Utilize one keyword to convey the audio's content or the associated scene.`
|
| 1329 |
+
|
| 1330 |
+
```python
|
| 1331 |
+
task_prompt = ""Please listen to the audio snippet carefully and transcribe the content."" + ""\n"" # can change to other prompts.
|
| 1332 |
+
audio_input, _ = librosa.load('./assets/input_examples/audio_understanding.mp3', sr=16000, mono=True) # load the audio to be captioned
|
| 1333 |
+
|
| 1334 |
+
msgs = [{'role': 'user', 'content': [task_prompt, audio_input]}]
|
| 1335 |
+
|
| 1336 |
+
res = model.chat(
|
| 1337 |
+
msgs=msgs,
|
| 1338 |
+
tokenizer=tokenizer,
|
| 1339 |
+
sampling=True,
|
| 1340 |
+
max_new_tokens=128,
|
| 1341 |
+
use_tts_template=True,
|
| 1342 |
+
generate_audio=True,
|
| 1343 |
+
temperature=0.3,
|
| 1344 |
+
output_audio_path='result_audio_understanding.wav',
|
| 1345 |
+
)
|
| 1346 |
+
print(res)
|
| 1347 |
+
```
|
| 1348 |
+
|
| 1349 |
+
|
| 1350 |
+
### Vision-Only mode
|
| 1351 |
+
|
| 1352 |
+
`MiniCPM-o-2_6` has the same inference methods as `MiniCPM-V-2_6`
|
| 1353 |
+
|
| 1354 |
+
#### Chat with single image
|
| 1355 |
+
```python
|
| 1356 |
+
# test.py
|
| 1357 |
+
image = Image.open('xx.jpg').convert('RGB')
|
| 1358 |
+
question = 'What is in the image?'
|
| 1359 |
+
msgs = [{'role': 'user', 'content': [image, question]}]
|
| 1360 |
+
res = model.chat(
|
| 1361 |
+
image=None,
|
| 1362 |
+
msgs=msgs,
|
| 1363 |
+
tokenizer=tokenizer
|
| 1364 |
+
)
|
| 1365 |
+
print(res)
|
| 1366 |
+
|
| 1367 |
+
## if you want to use streaming, please make sure sampling=True and stream=True
|
| 1368 |
+
## the model.chat will return a generator
|
| 1369 |
+
res = model.chat(
|
| 1370 |
+
msgs=msgs,
|
| 1371 |
+
tokenizer=tokenizer,
|
| 1372 |
+
sampling=True,
|
| 1373 |
+
stream=True
|
| 1374 |
+
)
|
| 1375 |
+
generated_text = """"
|
| 1376 |
+
for new_text in res:
|
| 1377 |
+
generated_text += new_text
|
| 1378 |
+
print(new_text, flush=True, end='')
|
| 1379 |
+
```
|
| 1380 |
+
|
| 1381 |
+
#### Chat with multiple images
|
| 1382 |
+
<details>
|
| 1383 |
+
<summary> Click to show Python code running MiniCPM-o 2.6 with multiple images input. </summary>
|
| 1384 |
+
|
| 1385 |
+
```python
|
| 1386 |
+
image1 = Image.open('image1.jpg').convert('RGB')
|
| 1387 |
+
image2 = Image.open('image2.jpg').convert('RGB')
|
| 1388 |
+
question = 'Compare image 1 and image 2, tell me about the differences between image 1 and image 2.'
|
| 1389 |
+
msgs = [{'role': 'user', 'content': [image1, image2, question]}]
|
| 1390 |
+
answer = model.chat(
|
| 1391 |
+
msgs=msgs,
|
| 1392 |
+
tokenizer=tokenizer
|
| 1393 |
+
)
|
| 1394 |
+
print(answer)
|
| 1395 |
+
```
|
| 1396 |
+
</details>
|
| 1397 |
+
|
| 1398 |
+
#### In-context few-shot learning
|
| 1399 |
+
<details>
|
| 1400 |
+
<summary> Click to view Python code running MiniCPM-o 2.6 with few-shot input. </summary>
|
| 1401 |
+
|
| 1402 |
+
```python
|
| 1403 |
+
question = ""production date""
|
| 1404 |
+
image1 = Image.open('example1.jpg').convert('RGB')
|
| 1405 |
+
answer1 = ""2023.08.04""
|
| 1406 |
+
image2 = Image.open('example2.jpg').convert('RGB')
|
| 1407 |
+
answer2 = ""2007.04.24""
|
| 1408 |
+
image_test = Image.open('test.jpg').convert('RGB')
|
| 1409 |
+
msgs = [
|
| 1410 |
+
{'role': 'user', 'content': [image1, question]}, {'role': 'assistant', 'content': [answer1]},
|
| 1411 |
+
{'role': 'user', 'content': [image2, question]}, {'role': 'assistant', 'content': [answer2]},
|
| 1412 |
+
{'role': 'user', 'content': [image_test, question]}
|
| 1413 |
+
]
|
| 1414 |
+
answer = model.chat(
|
| 1415 |
+
msgs=msgs,
|
| 1416 |
+
tokenizer=tokenizer
|
| 1417 |
+
)
|
| 1418 |
+
print(answer)
|
| 1419 |
+
```
|
| 1420 |
+
</details>
|
| 1421 |
+
|
| 1422 |
+
#### Chat with video
|
| 1423 |
+
<details>
|
| 1424 |
+
<summary> Click to view Python code running MiniCPM-o 2.6 with video input. </summary>
|
| 1425 |
+
|
| 1426 |
+
```python
|
| 1427 |
+
MAX_NUM_FRAMES=64 # if cuda OOM set a smaller number
|
| 1428 |
+
def encode_video(video_path):
|
| 1429 |
+
def uniform_sample(l, n):
|
| 1430 |
+
gap = len(l) / n
|
| 1431 |
+
idxs = [int(i * gap + gap / 2) for i in range(n)]
|
| 1432 |
+
return [l[i] for i in idxs]
|
| 1433 |
+
vr = VideoReader(video_path, ctx=cpu(0))
|
| 1434 |
+
sample_fps = round(vr.get_avg_fps() / 1) # FPS
|
| 1435 |
+
frame_idx = [i for i in range(0, len(vr), sample_fps)]
|
| 1436 |
+
if len(frame_idx) > MAX_NUM_FRAMES:
|
| 1437 |
+
frame_idx = uniform_sample(frame_idx, MAX_NUM_FRAMES)
|
| 1438 |
+
frames = vr.get_batch(frame_idx).asnumpy()
|
| 1439 |
+
frames = [Image.fromarray(v.astype('uint8')) for v in frames]
|
| 1440 |
+
print('num frames:', len(frames))
|
| 1441 |
+
return frames
|
| 1442 |
+
video_path =""video_test.mp4""
|
| 1443 |
+
frames = encode_video(video_path)
|
| 1444 |
+
question = ""Describe the video""
|
| 1445 |
+
msgs = [
|
| 1446 |
+
{'role': 'user', 'content': frames + [question]},
|
| 1447 |
+
]
|
| 1448 |
+
# Set decode params for video
|
| 1449 |
+
params={}
|
| 1450 |
+
params[""use_image_id""] = False
|
| 1451 |
+
params[""max_slice_nums""] = 2 # use 1 if cuda OOM and video resolution > 448*448
|
| 1452 |
+
answer = model.chat(
|
| 1453 |
+
msgs=msgs,
|
| 1454 |
+
tokenizer=tokenizer,
|
| 1455 |
+
**params
|
| 1456 |
+
)
|
| 1457 |
+
print(answer)
|
| 1458 |
+
```
|
| 1459 |
+
</details>
|
| 1460 |
+
|
| 1461 |
+
Please look at [GitHub](https://github.com/OpenBMB/MiniCPM-o) for more detail about usage.
|
| 1462 |
+
|
| 1463 |
+
|
| 1464 |
+
## Inference with llama.cpp<a id=""llamacpp""></a>
|
| 1465 |
+
MiniCPM-o 2.6 (vision-only mode) can run with llama.cpp. See our fork of [llama.cpp](https://github.com/OpenBMB/llama.cpp/tree/minicpm-omni) and [readme](https://github.com/OpenBMB/llama.cpp/blob/minicpm-omni/examples/llava/README-minicpmo2.6.md) for more detail.
|
| 1466 |
+
|
| 1467 |
+
|
| 1468 |
+
## Int4 quantized version
|
| 1469 |
+
Download the int4 quantized version for lower GPU memory (7GB) usage: [MiniCPM-o-2_6-int4](https://huggingface.co/openbmb/MiniCPM-o-2_6-int4).
|
| 1470 |
+
|
| 1471 |
+
|
| 1472 |
+
## License
|
| 1473 |
+
#### Model License
|
| 1474 |
+
* The code in this repo is released under the [Apache-2.0](https://github.com/OpenBMB/MiniCPM/blob/main/LICENSE) License.
|
| 1475 |
+
* The usage of MiniCPM-o and MiniCPM-V series model weights must strictly follow [MiniCPM Model License.md](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md).
|
| 1476 |
+
* The models and weights of MiniCPM are completely free for academic research. After filling out a [""questionnaire""](https://modelbest.feishu.cn/share/base/form/shrcnpV5ZT9EJ6xYjh3Kx0J6v8g) for registration, MiniCPM-o 2.6 weights are also available for free commercial use.
|
| 1477 |
+
|
| 1478 |
+
|
| 1479 |
+
#### Statement
|
| 1480 |
+
* As an LMM, MiniCPM-o 2.6 generates contents by learning a large mount of multimodal corpora, but it cannot comprehend, express personal opinions or make value judgement. Anything generated by MiniCPM-o 2.6 does not represent the views and positions of the model developers
|
| 1481 |
+
* We will not be liable for any problems arising from the use of the MinCPM-V models, including but not limited to data security issues, risk of public opinion, or any risks and problems arising from the misdirection, misuse, dissemination or misuse of the model.
|
| 1482 |
+
|
| 1483 |
+
## Key Techniques and Other Multimodal Projects
|
| 1484 |
+
|
| 1485 |
+
👏 Welcome to explore key techniques of MiniCPM-o 2.6 and other multimodal projects of our team:
|
| 1486 |
+
|
| 1487 |
+
[VisCPM](https://github.com/OpenBMB/VisCPM/tree/main) | [RLHF-V](https://github.com/RLHF-V/RLHF-V) | [LLaVA-UHD](https://github.com/thunlp/LLaVA-UHD) | [RLAIF-V](https://github.com/RLHF-V/RLAIF-V)
|
| 1488 |
+
|
| 1489 |
+
## Citation
|
| 1490 |
+
|
| 1491 |
+
If you find our work helpful, please consider citing our papers 📝 and liking this project ❤️!
|
| 1492 |
+
|
| 1493 |
+
```bib
|
| 1494 |
+
@article{yao2024minicpm,
|
| 1495 |
+
title={MiniCPM-V: A GPT-4V Level MLLM on Your Phone},
|
| 1496 |
+
author={Yao, Yuan and Yu, Tianyu and Zhang, Ao and Wang, Chongyi and Cui, Junbo and Zhu, Hongji and Cai, Tianchi and Li, Haoyu and Zhao, Weilin and He, Zhihui and others},
|
| 1497 |
+
journal={arXiv preprint arXiv:2408.01800},
|
| 1498 |
+
year={2024}
|
| 1499 |
+
}
|
| 1500 |
+
```
|
| 1501 |
+
","{""id"": ""openbmb/MiniCPM-o-2_6"", ""author"": ""openbmb"", ""sha"": ""df3d4c9d637d5cb32c4fcc256336ff6e3e15c71a"", ""last_modified"": ""2025-03-28 08:33:40+00:00"", ""created_at"": ""2025-01-12 07:40:10+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 283615, ""downloads_all_time"": null, ""likes"": 1112, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""minicpmo"", ""feature-extraction"", ""minicpm-o"", ""omni"", ""vision"", ""ocr"", ""multi-image"", ""video"", ""custom_code"", ""audio"", ""speech"", ""voice cloning"", ""live Streaming"", ""realtime speech conversation"", ""asr"", ""tts"", ""any-to-any"", ""multilingual"", ""dataset:openbmb/RLAIF-V-Dataset"", ""arxiv:2405.17220"", ""arxiv:2408.01800"", ""region:us""], ""pipeline_tag"": ""any-to-any"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- openbmb/RLAIF-V-Dataset\nlanguage:\n- multilingual\nlibrary_name: transformers\npipeline_tag: any-to-any\ntags:\n- minicpm-o\n- omni\n- vision\n- ocr\n- multi-image\n- video\n- custom_code\n- audio\n- speech\n- voice cloning\n- live Streaming\n- realtime speech conversation\n- asr\n- tts"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""MiniCPMO""], ""auto_map"": {""AutoConfig"": ""configuration_minicpm.MiniCPMOConfig"", ""AutoModel"": ""modeling_minicpmo.MiniCPMO"", ""AutoModelForCausalLM"": ""modeling_minicpmo.MiniCPMO""}, ""model_type"": ""minicpmo"", ""tokenizer_config"": {""bos_token"": ""<|im_start|>"", ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_minicpmo.MiniCPMO"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/Skiing.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/Vocos.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/chattts_tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/chattts_tokenizer/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/chattts_tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/demo.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/Trump_WEF_2018_10s.mp3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/assistant_default_female_voice.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/assistant_female_voice.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/assistant_male_voice.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/audio_understanding.mp3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/chi-english-1.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/cxk_original.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/exciting-emotion.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/fast-pace.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/icl_20.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_examples/indian-accent.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_minicpm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processing_minicpmv.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_minicpmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_navit_siglip.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_minicpmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resampler.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_minicpmo_fast.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""sitammeur/PicQ"", ""sitammeur/VidiQA"", ""mohan007/autism_exp_with_minicpm_o_2_6"", ""srinivasmmw2024/minicpm_2"", ""nooneshouldtouch/PicQ""], ""safetensors"": {""parameters"": {""BF16"": 8674997028}, ""total"": 8674997028}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-28 08:33:40+00:00"", ""cardData"": ""datasets:\n- openbmb/RLAIF-V-Dataset\nlanguage:\n- multilingual\nlibrary_name: transformers\npipeline_tag: any-to-any\ntags:\n- minicpm-o\n- omni\n- vision\n- ocr\n- multi-image\n- video\n- custom_code\n- audio\n- speech\n- voice cloning\n- live Streaming\n- realtime speech conversation\n- asr\n- tts"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_minicpmo.MiniCPMO"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""678371daa22796d3c0372a09"", ""modelId"": ""openbmb/MiniCPM-o-2_6"", ""usedStorage"": 17428736163}",0,"https://huggingface.co/kirillrb/45, https://huggingface.co/George1234f/h, https://huggingface.co/brockfellows8/Ember, https://huggingface.co/aydin1876/rma, https://huggingface.co/AIExplorer1234/customer-churn-prediction",5,"https://huggingface.co/Guilherme34/Samantha-omni-lora, https://huggingface.co/Guilherme34/Samantha-omni-humanlike-lora",2,"https://huggingface.co/openbmb/MiniCPM-o-2_6-gguf, https://huggingface.co/openbmb/MiniCPM-o-2_6-int4, https://huggingface.co/bartowski/MiniCPM-o-2_6-GGUF, https://huggingface.co/lmstudio-community/MiniCPM-o-2_6-GGUF, https://huggingface.co/second-state/MiniCPM-o-2_6-GGUF, https://huggingface.co/2dameneko/MiniCPM-o-2_6-nf4, https://huggingface.co/gaianet/MiniCPM-o-2_6-GGUF",7,,0,"huggingface/InferenceSupport/discussions/new?title=openbmb/MiniCPM-o-2_6&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bopenbmb%2FMiniCPM-o-2_6%5D(%2Fopenbmb%2FMiniCPM-o-2_6)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, mohan007/autism_exp_with_minicpm_o_2_6, nooneshouldtouch/PicQ, sitammeur/PicQ, sitammeur/VidiQA, srinivasmmw2024/minicpm_2",6
|
| 1502 |
+
kirillrb/45,"---
|
| 1503 |
+
license: afl-3.0
|
| 1504 |
+
datasets:
|
| 1505 |
+
- PowerInfer/QWQ-LONGCOT-500K
|
| 1506 |
+
language:
|
| 1507 |
+
- hy
|
| 1508 |
+
- gv
|
| 1509 |
+
- aa
|
| 1510 |
+
metrics:
|
| 1511 |
+
- accuracy
|
| 1512 |
+
base_model:
|
| 1513 |
+
- openbmb/MiniCPM-o-2_6
|
| 1514 |
+
new_version: deepseek-ai/DeepSeek-R1
|
| 1515 |
+
pipeline_tag: question-answering
|
| 1516 |
+
tags:
|
| 1517 |
+
- legal
|
| 1518 |
+
---","{""id"": ""kirillrb/45"", ""author"": ""kirillrb"", ""sha"": ""ed0b5a02ee81b11491d1fbf1d8e0a17235fd8229"", ""last_modified"": ""2025-01-24 12:28:35+00:00"", ""created_at"": ""2025-01-24 12:27:01+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""legal"", ""question-answering"", ""hy"", ""gv"", ""aa"", ""dataset:PowerInfer/QWQ-LONGCOT-500K"", ""base_model:openbmb/MiniCPM-o-2_6"", ""base_model:finetune:openbmb/MiniCPM-o-2_6"", ""license:afl-3.0"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openbmb/MiniCPM-o-2_6\ndatasets:\n- PowerInfer/QWQ-LONGCOT-500K\nlanguage:\n- hy\n- gv\n- aa\nlicense: afl-3.0\nmetrics:\n- accuracy\npipeline_tag: question-answering\ntags:\n- legal\nnew_version: deepseek-ai/DeepSeek-R1"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-24 12:28:35+00:00"", ""cardData"": ""base_model:\n- openbmb/MiniCPM-o-2_6\ndatasets:\n- PowerInfer/QWQ-LONGCOT-500K\nlanguage:\n- hy\n- gv\n- aa\nlicense: afl-3.0\nmetrics:\n- accuracy\npipeline_tag: question-answering\ntags:\n- legal\nnew_version: deepseek-ai/DeepSeek-R1"", ""transformersInfo"": null, ""_id"": ""679387153b19d991b518cde9"", ""modelId"": ""kirillrb/45"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kirillrb/45&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkirillrb%2F45%5D(%2Fkirillrb%2F45)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 1519 |
+
George1234f/h,"---
|
| 1520 |
+
license: apache-2.0
|
| 1521 |
+
language:
|
| 1522 |
+
- en
|
| 1523 |
+
base_model:
|
| 1524 |
+
- openbmb/MiniCPM-o-2_6
|
| 1525 |
+
pipeline_tag: text-classification
|
| 1526 |
+
---","{""id"": ""George1234f/h"", ""author"": ""George1234f"", ""sha"": ""eaeee29c3a5d62a19976197ca3c75f51aacc90ee"", ""last_modified"": ""2025-01-17 23:35:21+00:00"", ""created_at"": ""2025-01-17 23:32:00+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""text-classification"", ""en"", ""base_model:openbmb/MiniCPM-o-2_6"", ""base_model:finetune:openbmb/MiniCPM-o-2_6"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openbmb/MiniCPM-o-2_6\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-classification"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-17 23:35:21+00:00"", ""cardData"": ""base_model:\n- openbmb/MiniCPM-o-2_6\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-classification"", ""transformersInfo"": null, ""_id"": ""678ae8709cf08f247d162c91"", ""modelId"": ""George1234f/h"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=George1234f/h&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BGeorge1234f%2Fh%5D(%2FGeorge1234f%2Fh)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 1527 |
+
brockfellows8/Ember,"---
|
| 1528 |
+
license: gpl-3.0
|
| 1529 |
+
datasets:
|
| 1530 |
+
- open-thoughts/OpenThoughts-114k
|
| 1531 |
+
language:
|
| 1532 |
+
- en
|
| 1533 |
+
metrics:
|
| 1534 |
+
- bertscore
|
| 1535 |
+
base_model:
|
| 1536 |
+
- openbmb/MiniCPM-o-2_6
|
| 1537 |
+
pipeline_tag: text-generation
|
| 1538 |
+
---","{""id"": ""brockfellows8/Ember"", ""author"": ""brockfellows8"", ""sha"": ""135649b4d89ba740dfdee1639477025e88c9b81f"", ""last_modified"": ""2025-02-11 12:18:05+00:00"", ""created_at"": ""2025-02-11 12:12:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""text-generation"", ""en"", ""dataset:open-thoughts/OpenThoughts-114k"", ""base_model:openbmb/MiniCPM-o-2_6"", ""base_model:finetune:openbmb/MiniCPM-o-2_6"", ""license:gpl-3.0"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openbmb/MiniCPM-o-2_6\ndatasets:\n- open-thoughts/OpenThoughts-114k\nlanguage:\n- en\nlicense: gpl-3.0\nmetrics:\n- bertscore\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-11 12:18:05+00:00"", ""cardData"": ""base_model:\n- openbmb/MiniCPM-o-2_6\ndatasets:\n- open-thoughts/OpenThoughts-114k\nlanguage:\n- en\nlicense: gpl-3.0\nmetrics:\n- bertscore\npipeline_tag: text-generation"", ""transformersInfo"": null, ""_id"": ""67ab3eaa362216c0858179d1"", ""modelId"": ""brockfellows8/Ember"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=brockfellows8/Ember&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bbrockfellows8%2FEmber%5D(%2Fbrockfellows8%2FEmber)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 1539 |
+
aydin1876/rma,"---
|
| 1540 |
+
license: apache-2.0
|
| 1541 |
+
language:
|
| 1542 |
+
- tr
|
| 1543 |
+
base_model:
|
| 1544 |
+
- openbmb/MiniCPM-o-2_6
|
| 1545 |
+
pipeline_tag: any-to-any
|
| 1546 |
+
---","{""id"": ""aydin1876/rma"", ""author"": ""aydin1876"", ""sha"": ""c76eeceac7ae18b403c506213a8033c08760d80d"", ""last_modified"": ""2025-02-12 14:42:51+00:00"", ""created_at"": ""2025-02-12 14:24:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""any-to-any"", ""tr"", ""base_model:openbmb/MiniCPM-o-2_6"", ""base_model:finetune:openbmb/MiniCPM-o-2_6"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""any-to-any"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openbmb/MiniCPM-o-2_6\nlanguage:\n- tr\nlicense: apache-2.0\npipeline_tag: any-to-any"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-12 14:42:51+00:00"", ""cardData"": ""base_model:\n- openbmb/MiniCPM-o-2_6\nlanguage:\n- tr\nlicense: apache-2.0\npipeline_tag: any-to-any"", ""transformersInfo"": null, ""_id"": ""67acaf2e7e3ab1f12a819b96"", ""modelId"": ""aydin1876/rma"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=aydin1876/rma&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Baydin1876%2Frma%5D(%2Faydin1876%2Frma)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 1547 |
+
AIExplorer1234/customer-churn-prediction,"---
|
| 1548 |
+
license: apache-2.0
|
| 1549 |
+
datasets:
|
| 1550 |
+
- aai510-group1/telco-customer-churn
|
| 1551 |
+
language:
|
| 1552 |
+
- en
|
| 1553 |
+
metrics:
|
| 1554 |
+
- accuracy
|
| 1555 |
+
- roc_auc
|
| 1556 |
+
- mse
|
| 1557 |
+
base_model:
|
| 1558 |
+
- openbmb/MiniCPM-o-2_6
|
| 1559 |
+
library_name: sklearn
|
| 1560 |
+
---
|
| 1561 |
+
This model predicts telecom customer churn using machine learning. It analyzes tenure, monthly charges,
|
| 1562 |
+
and contract types. Trained with logistic regression and decision trees, it helps businesses reduce churn through
|
| 1563 |
+
targeted retention strategies. Evaluation includes accuracy, precision, and AUC scores.
|
| 1564 |
+
|
| 1565 |
+
","{""id"": ""AIExplorer1234/customer-churn-prediction"", ""author"": ""AIExplorer1234"", ""sha"": ""ccdcd6687b15f75d54e608c6ef9531f0134ca6d5"", ""last_modified"": ""2025-02-18 18:08:05+00:00"", ""created_at"": ""2025-02-18 17:46:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""sklearn"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""sklearn"", ""en"", ""dataset:aai510-group1/telco-customer-churn"", ""base_model:openbmb/MiniCPM-o-2_6"", ""base_model:finetune:openbmb/MiniCPM-o-2_6"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openbmb/MiniCPM-o-2_6\ndatasets:\n- aai510-group1/telco-customer-churn\nlanguage:\n- en\nlibrary_name: sklearn\nlicense: apache-2.0\nmetrics:\n- accuracy\n- roc_auc\n- mse"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='customer-churn-prediction.pkl', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-18 18:08:05+00:00"", ""cardData"": ""base_model:\n- openbmb/MiniCPM-o-2_6\ndatasets:\n- aai510-group1/telco-customer-churn\nlanguage:\n- en\nlibrary_name: sklearn\nlicense: apache-2.0\nmetrics:\n- accuracy\n- roc_auc\n- mse"", ""transformersInfo"": null, ""_id"": ""67b4c75b2986a9136672990f"", ""modelId"": ""AIExplorer1234/customer-churn-prediction"", ""usedStorage"": 16416025}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=AIExplorer1234/customer-churn-prediction&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BAIExplorer1234%2Fcustomer-churn-prediction%5D(%2FAIExplorer1234%2Fcustomer-churn-prediction)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Nemotron-Mini-4B-Instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nvidia/Nemotron-Mini-4B-Instruct,"---
|
| 3 |
+
license: other
|
| 4 |
+
license_name: nvidia-open-model-license
|
| 5 |
+
license_link: >-
|
| 6 |
+
https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf
|
| 7 |
+
library_name: transformers
|
| 8 |
+
pipeline_tag: text-generation
|
| 9 |
+
language:
|
| 10 |
+
- en
|
| 11 |
+
tags:
|
| 12 |
+
- nvidia
|
| 13 |
+
- llama-3
|
| 14 |
+
- pytorch
|
| 15 |
+
---
|
| 16 |
+
# Nemotron-Mini-4B-Instruct
|
| 17 |
+
|
| 18 |
+
## Model Overview
|
| 19 |
+
|
| 20 |
+
Nemotron-Mini-4B-Instruct is a model for generating responses for roleplaying, retrieval augmented generation, and function calling. It is a small language model (SLM) optimized through distillation, pruning and quantization for speed and on-device deployment. It is a fine-tuned version of [nvidia/Minitron-4B-Base](https://huggingface.co/nvidia/Minitron-4B-Base), which was pruned and distilled from [Nemotron-4 15B](https://arxiv.org/abs/2402.16819) using [our LLM compression technique](https://arxiv.org/abs/2407.14679). This instruct model is optimized for roleplay, RAG QA, and function calling in English. It supports a context length of 4,096 tokens. This model is ready for commercial use.
|
| 21 |
+
|
| 22 |
+
Try this model on [build.nvidia.com](https://build.nvidia.com/nvidia/nemotron-mini-4b-instruct).
|
| 23 |
+
|
| 24 |
+
For more details about how this model is used for [NVIDIA ACE](https://developer.nvidia.com/ace), please refer to [this blog post](https://developer.nvidia.com/blog/deploy-the-first-on-device-small-language-model-for-improved-game-character-roleplay/) and [this demo video](https://www.youtube.com/watch?v=d5z7oIXhVqg), which showcases how the model can be integrated into a video game. You can download the model checkpoint for NVIDIA AI Inference Manager (AIM) SDK from [here](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/ucs-ms/resources/nemotron-mini-4b-instruct).
|
| 25 |
+
|
| 26 |
+
**Model Developer:** NVIDIA
|
| 27 |
+
|
| 28 |
+
**Model Dates:** Nemotron-Mini-4B-Instruct was trained between February 2024 and Aug 2024.
|
| 29 |
+
|
| 30 |
+
## License
|
| 31 |
+
|
| 32 |
+
[NVIDIA Community Model License](https://huggingface.co/nvidia/Nemotron-Mini-4B-Instruct/blob/main/nvidia-community-model-license-aug2024.pdf)
|
| 33 |
+
|
| 34 |
+
## Model Architecture
|
| 35 |
+
|
| 36 |
+
Nemotron-Mini-4B-Instruct uses a model embedding size of 3072, 32 attention heads, and an MLP intermediate dimension of 9216. It also uses Grouped-Query Attention (GQA) and Rotary Position Embeddings (RoPE).
|
| 37 |
+
|
| 38 |
+
**Architecture Type:** Transformer Decoder (auto-regressive language model)
|
| 39 |
+
|
| 40 |
+
**Network Architecture:** Nemotron-4
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
## Prompt Format:
|
| 44 |
+
|
| 45 |
+
We recommend using the following prompt template, which was used to fine-tune the model. The model may not perform optimally without it.
|
| 46 |
+
|
| 47 |
+
**Single Turn**
|
| 48 |
+
|
| 49 |
+
```
|
| 50 |
+
<extra_id_0>System
|
| 51 |
+
{system prompt}
|
| 52 |
+
|
| 53 |
+
<extra_id_1>User
|
| 54 |
+
{prompt}
|
| 55 |
+
<extra_id_1>Assistant\n
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
**Tool use**
|
| 59 |
+
|
| 60 |
+
```
|
| 61 |
+
<extra_id_0>System
|
| 62 |
+
{system prompt}
|
| 63 |
+
|
| 64 |
+
<tool> ... </tool>
|
| 65 |
+
<context> ... </context>
|
| 66 |
+
|
| 67 |
+
<extra_id_1>User
|
| 68 |
+
{prompt}
|
| 69 |
+
<extra_id_1>Assistant
|
| 70 |
+
<toolcall> ... </toolcall>
|
| 71 |
+
<extra_id_1>Tool
|
| 72 |
+
{tool response}
|
| 73 |
+
<extra_id_1>Assistant\n
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
## Usage
|
| 78 |
+
|
| 79 |
+
```
|
| 80 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 81 |
+
|
| 82 |
+
# Load the tokenizer and model
|
| 83 |
+
tokenizer = AutoTokenizer.from_pretrained(""nvidia/Nemotron-Mini-4B-Instruct"")
|
| 84 |
+
model = AutoModelForCausalLM.from_pretrained(""nvidia/Nemotron-Mini-4B-Instruct"")
|
| 85 |
+
|
| 86 |
+
# Use the prompt template
|
| 87 |
+
messages = [
|
| 88 |
+
{
|
| 89 |
+
""role"": ""system"",
|
| 90 |
+
""content"": ""You are a friendly chatbot who always responds in the style of a pirate"",
|
| 91 |
+
},
|
| 92 |
+
{""role"": ""user"", ""content"": ""How many helicopters can a human eat in one sitting?""},
|
| 93 |
+
]
|
| 94 |
+
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors=""pt"")
|
| 95 |
+
|
| 96 |
+
outputs = model.generate(tokenized_chat, max_new_tokens=128)
|
| 97 |
+
print(tokenizer.decode(outputs[0]))
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
You can also use `pipeline` but you need to create a tokenizer object and assign it to the pipeline manually.
|
| 101 |
+
|
| 102 |
+
```
|
| 103 |
+
from transformers import AutoTokenizer
|
| 104 |
+
from transformers import pipeline
|
| 105 |
+
|
| 106 |
+
tokenizer = AutoTokenizer.from_pretrained(""nvidia/Nemotron-Mini-4B-Instruct"")
|
| 107 |
+
|
| 108 |
+
messages = [
|
| 109 |
+
{""role"": ""user"", ""content"": ""Who are you?""},
|
| 110 |
+
]
|
| 111 |
+
pipe = pipeline(""text-generation"", model=""nvidia/Nemotron-Mini-4B-Instruct"")
|
| 112 |
+
pipe.tokenizer = tokenizer # You need to assign tokenizer manually
|
| 113 |
+
pipe(messages)
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
## AI Safety Efforts
|
| 117 |
+
|
| 118 |
+
The Nemotron-Mini-4B-Instruct model underwent AI safety evaluation including adversarial testing via three distinct methods:
|
| 119 |
+
- [Garak](https://github.com/leondz/garak), is an automated LLM vulnerability scanner that probes for common weaknesses, including prompt injection and data leakage.
|
| 120 |
+
- [AEGIS](https://huggingface.co/datasets/nvidia/Aegis-AI-Content-Safety-Dataset-1.0), is a content safety evaluation dataset and LLM based content safety classifier model, that adheres to a broad taxonomy of 13 categories of critical risks in human-LLM interactions.
|
| 121 |
+
- Human Content Red Teaming leveraging human interaction and evaluation of the models' responses.
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
## Limitations
|
| 125 |
+
|
| 126 |
+
The model was trained on data that contains toxic language and societal biases originally crawled from the internet. Therefore, the model may amplify those biases and return toxic responses especially when prompted with toxic prompts. The model may generate answers that may be inaccurate, omit key information, or include irrelevant or redundant text producing socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive. This issue could be exacerbated without the use of the recommended prompt template. This issue could be exacerbated without the use of the recommended prompt template.
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
## Ethical Considerations
|
| 130 |
+
|
| 131 |
+
NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. For more detailed information on ethical considerations for this model, please see the [Model Card++](https://build.nvidia.com/nvidia/nemotron-mini-4b-instruct/modelcard). Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).","{""id"": ""nvidia/Nemotron-Mini-4B-Instruct"", ""author"": ""nvidia"", ""sha"": ""791833e92ebddb0bc2c1007f6d2b6764f886a2ae"", ""last_modified"": ""2025-02-14 19:03:33+00:00"", ""created_at"": ""2024-09-10 16:09:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 844, ""downloads_all_time"": null, ""likes"": 160, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""nemo"", ""nemotron"", ""text-generation"", ""nvidia"", ""llama-3"", ""conversational"", ""en"", ""arxiv:2402.16819"", ""arxiv:2407.14679"", ""license:other"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf\npipeline_tag: text-generation\ntags:\n- nvidia\n- llama-3\n- pytorch"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""NemotronForCausalLM""], ""model_type"": ""nemotron"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{'<extra_id_0>System'}}{% for message in messages %}{% if message['role'] == 'system' %}{{'\n' + message['content'].strip()}}{% if tools or contexts %}{{'\n'}}{% endif %}{% endif %}{% endfor %}{% if tools %}{% for tool in tools %}{{ '\n<tool> ' + tool|tojson + ' </tool>' }}{% endfor %}{% endif %}{% if contexts %}{% if tools %}{{'\n'}}{% endif %}{% for context in contexts %}{{ '\n<context> ' + context.strip() + ' </context>' }}{% endfor %}{% endif %}{{'\n\n'}}{% for message in messages %}{% if message['role'] == 'user' %}{{ '<extra_id_1>User\n' + message['content'].strip() + '\n' }}{% elif message['role'] == 'assistant' %}{{ '<extra_id_1>Assistant\n' + message['content'].strip() + '\n' }}{% elif message['role'] == 'tool' %}{{ '<extra_id_1>Tool\n' + message['content'].strip() + '\n' }}{% endif %}{% endfor %}{%- if add_generation_prompt %}{{'<extra_id_1>Assistant\n'}}{%- endif %}"", ""eos_token"": ""</s>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nemo/nemotron-mini-4b-instruct.nemo', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nvidia-community-model-license-aug2024.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Tonic/Nemo-Mistral-Minitron""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-14 19:03:33+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf\npipeline_tag: text-generation\ntags:\n- nvidia\n- llama-3\n- pytorch"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66e06f40ec26df895d3b4953"", ""modelId"": ""nvidia/Nemotron-Mini-4B-Instruct"", ""usedStorage"": 25171362021}",0,,0,,0,"https://huggingface.co/DevQuasar/nvidia.Nemotron-Mini-4B-Instruct-GGUF, https://huggingface.co/NikolayKozloff/Nemotron-Mini-4B-Instruct-Q8_0-GGUF, https://huggingface.co/bartowski/Nemotron-Mini-4B-Instruct-GGUF, https://huggingface.co/abiks/Nemotron-Mini-4B-Instruct-GGUF-Q8, https://huggingface.co/second-state/Nemotron-Mini-4B-Instruct-GGUF, https://huggingface.co/gaianet/Nemotron-Mini-4B-Instruct-GGUF, https://huggingface.co/Felladrin/gguf-Q5_K_L-Nemotron-Mini-4B-Instruct, https://huggingface.co/Felladrin/gguf-Q5_K_M-Nemotron-Mini-4B-Instruct, https://huggingface.co/Solshine/Nemotron-Mini-4B-Instruct-Q4_K_M-GGUF, https://huggingface.co/DevQuasar/Nemotron-Mini-4B-Instruct-GGUF, https://huggingface.co/Triangle104/Nemotron-Mini-4B-Instruct-Q4_0-GGUF, https://huggingface.co/WSDW/Nemotron-Mini-4B-Instruct-Q4_K_M-GGUF, https://huggingface.co/jairo/Nemotron-Mini-4B-Instruct-Q4_K_M-GGUF, https://huggingface.co/MaziyarPanahi/Nemotron-Mini-4B-Instruct-GGUF, https://huggingface.co/nvidia/Nemotron-Mini-4B-Instruct-ONNX-INT4, https://huggingface.co/mradermacher/Nemotron-Mini-4B-Instruct-GGUF, https://huggingface.co/mradermacher/Nemotron-Mini-4B-Instruct-i1-GGUF, https://huggingface.co/performanceoptician/Nemotron-Mini-4B-Instruct-IQ4_NL-GGUF, https://huggingface.co/performanceoptician/Nemotron-Mini-4B-Instruct-IQ4_XS-GGUF, https://huggingface.co/mitulagr2/gguf-Q5_K_M-Nemotron-Mini-4B-Instruct, https://huggingface.co/BernTheCreator/Nemotron-Mini-4B-Instruct-Q4_0-GGUF, https://huggingface.co/itlwas/Nemotron-Mini-4B-Instruct-Q4_K_M-GGUF, https://huggingface.co/tensorblock/nvidia_Nemotron-Mini-4B-Instruct-GGUF",23,,0,"Tonic/Nemo-Mistral-Minitron, huggingface/InferenceSupport/discussions/new?title=nvidia/Nemotron-Mini-4B-Instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnvidia%2FNemotron-Mini-4B-Instruct%5D(%2Fnvidia%2FNemotron-Mini-4B-Instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
NeverEnding-Dream_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Lykon/NeverEnding-Dream,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: other
|
| 6 |
+
tags:
|
| 7 |
+
- stable-diffusion
|
| 8 |
+
- text-to-image
|
| 9 |
+
- art
|
| 10 |
+
- artistic
|
| 11 |
+
- diffusers
|
| 12 |
+
inference: false
|
| 13 |
+
---
|
| 14 |
+
|
| 15 |
+
# NeverEnding Dream (NED)
|
| 16 |
+
## Official Repository
|
| 17 |
+
|
| 18 |
+
Read more about this model here: https://civitai.com/models/10028/neverending-dream-ned
|
| 19 |
+
Also please support by giving 5 stars and a heart, which will notify new updates.
|
| 20 |
+
|
| 21 |
+
Also consider supporting me on Patreon or ByuMeACoffee
|
| 22 |
+
- https://www.patreon.com/Lykon275
|
| 23 |
+
|
| 24 |
+
You can run this model on:
|
| 25 |
+
- https://sinkin.ai/m/qGdxrYG
|
| 26 |
+
|
| 27 |
+
Some sample output:
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+

|
| 31 |
+

|
| 32 |
+

|
| 33 |
+

|
| 34 |
+

|
| 35 |
+
|
| 36 |
+
","{""id"": ""Lykon/NeverEnding-Dream"", ""author"": ""Lykon"", ""sha"": ""239d0482dc703082d1b2b1a7b6051790ecd6d28c"", ""last_modified"": ""2023-05-11 23:43:42+00:00"", ""created_at"": ""2023-02-19 17:54:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 749, ""downloads_all_time"": null, ""likes"": 162, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""stable-diffusion"", ""text-to-image"", ""art"", ""artistic"", ""en"", ""license:other"", ""autotrain_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: other\ntags:\n- stable-diffusion\n- text-to-image\n- art\n- artistic\n- diffusers\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='4.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='5.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='6.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NeverEndingDream_1.22_BakedVae_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NeverEndingDream_1.22_NoVae_fp16_pruned.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NeverEndingDream_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NeverEndingDream_ft_mse-inpainting.inpainting.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NeverEndingDream_ft_mse.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NeverendingDream_ft_mse.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NeverendingDream_noVae.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NeverendingDream_noVae_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""ennov8ion/comicbook-models"", ""luongphamit/DreamShaper-webui"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""ennov8ion/stablediffusion-models"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""AnimeStudio/anime-models"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/AnimeArtmodels2"", ""animeartstudio/AnimeModels"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""ennov8ion/anime-models"", ""cloudwp/DreamShaper-webui"", ""ennov8ion/Landscapes-models"", ""sohoso/anime348756"", ""kbora/minerva-generate-docker"", ""johann22/chat-diffusion"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models"", ""animeartstudio/AnimeArtModels1"", ""Yntec/top_100_diffusion"", ""AIlexDev/Diffusion60XX"", ""flatindo/all-models"", ""flatindo/all-models-v1"", ""flatindo/img_All_models"", ""johann22/chat-diffusion-describe"", ""GAIneZis/FantasyArt-Models"", ""vkatis/models_x6"", ""ennov8ion/picasso-diffusion"", ""vih-v/models_x"", ""ennov8ion/anime-new-models"", ""ennov8ion/anime-multi-new-models"", ""ennov8ion/photo-multi"", ""ennov8ion/anime-multi"", ""StanislavMichalov/Magic_Levitan_v1_4_Stanislav"", ""vih-v/models_d2"", ""StiveDudov/Magic_Levitan_v1_4_Stanislav"", ""ElenaVasileva/Magic_Levitan_v1_4_Stanislav"", ""Ashrafb/comicbook-models"", ""sohoso/architecture"", ""K00B404/image_gen_supaqueue_game_assets"", ""Cmescobar27/minerva-generate-docker""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-11 23:43:42+00:00"", ""cardData"": ""language:\n- en\nlicense: other\ntags:\n- stable-diffusion\n- text-to-image\n- art\n- artistic\n- diffusers\ninference: false"", ""transformersInfo"": null, ""_id"": ""63f2626be965fdebaa63f724"", ""modelId"": ""Lykon/NeverEnding-Dream"", ""usedStorage"": 37147743427}",0,,0,,0,https://huggingface.co/RanaLLC/NeverEnding-Dream-fp16,1,,0,"Alfasign/dIFFU, Ashrafb/comicbook-models, Nymbo/image_gen_supaqueue, TheKitten/Fast-Images-Creature, ennov8ion/3dart-Models, ennov8ion/Scifi-Models, ennov8ion/anime-multi-new-models, ennov8ion/comicbook-models, ennov8ion/stablediffusion-models, grzegorz2047/fast_diffusion, huggingface/InferenceSupport/discussions/new?title=Lykon/NeverEnding-Dream&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BLykon%2FNeverEnding-Dream%5D(%2FLykon%2FNeverEnding-Dream)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, luongphamit/DreamShaper-webui, mindtube/Diffusion50XX",13
|
Nitro-Diffusion_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nitrosocke/Nitro-Diffusion,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: creativeml-openrail-m
|
| 6 |
+
thumbnail: ""https://huggingface.co/nitrosocke/Nitro-Diffusion/resolve/main/nitro-diff-samples-02.jpg""
|
| 7 |
+
tags:
|
| 8 |
+
- stable-diffusion
|
| 9 |
+
- text-to-image
|
| 10 |
+
- image-to-image
|
| 11 |
+
- diffusers
|
| 12 |
+
|
| 13 |
+
---
|
| 14 |
+
### Nitro Diffusion
|
| 15 |
+
|
| 16 |
+
Welcome to Nitro Diffusion - the first Multi-Style Model trained from scratch! This is a fine-tuned Stable Diffusion model trained on three artstyles simultaniously while keeping each style separate from the others. This allows for high control of mixing, weighting and single style use.
|
| 17 |
+
Use the tokens **_archer style, arcane style or modern disney style_** in your prompts for the effect. You can also use more than one for a mixed style like in the examples down below:
|
| 18 |
+
|
| 19 |
+
**If you enjoy my work and want to test new models before release, please consider supporting me**
|
| 20 |
+
[](https://patreon.com/user?u=79196446)
|
| 21 |
+
|
| 22 |
+
**Multi Style Characters from the model:**
|
| 23 |
+

|
| 24 |
+
**Single Style Characters from the model:**
|
| 25 |
+

|
| 26 |
+
**Multi Style Scenes from the model:**
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+
**You can find animated GIFs of Batman and Lara Croft showing the weighting and prompt influence on the bottom of the page.**
|
| 30 |
+
|
| 31 |
+
#### Prompt and settings for Gal Gadot:
|
| 32 |
+
**arcane archer modern disney gal gadot**
|
| 33 |
+
_Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 598801516, Size: 512x768_
|
| 34 |
+
|
| 35 |
+
#### Prompt and settings for the Audi TT:
|
| 36 |
+
**(audi TT car) arcane modern disney style archer**
|
| 37 |
+
_Steps: 30, Sampler: Euler a, CFG scale: 7, Seed: 713702776, Size: 768x512_
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
## Gradio
|
| 41 |
+
|
| 42 |
+
We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Nitro-Diffusion:
|
| 43 |
+
[](https://huggingface.co/spaces/nitrosocke/Nitro-Diffusion-Demo)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
### 🧨 Diffusers
|
| 47 |
+
|
| 48 |
+
This model can be used just like any other Stable Diffusion model. For more information,
|
| 49 |
+
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
|
| 50 |
+
|
| 51 |
+
You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
|
| 52 |
+
|
| 53 |
+
```python
|
| 54 |
+
from diffusers import StableDiffusionPipeline
|
| 55 |
+
import torch
|
| 56 |
+
|
| 57 |
+
model_id = ""nitrosocke/nitro-diffusion""
|
| 58 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
| 59 |
+
pipe = pipe.to(""cuda"")
|
| 60 |
+
|
| 61 |
+
prompt = ""archer arcane style magical princess with golden hair""
|
| 62 |
+
image = pipe(prompt).images[0]
|
| 63 |
+
|
| 64 |
+
image.save(""./magical_princess.png"")
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
## License
|
| 69 |
+
|
| 70 |
+
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
|
| 71 |
+
The CreativeML OpenRAIL License specifies:
|
| 72 |
+
|
| 73 |
+
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
|
| 74 |
+
2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
|
| 75 |
+
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
|
| 76 |
+
[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
## Video Demos
|
| 80 |
+
# Batman
|
| 81 |
+

|
| 82 |
+
# Lara Croft
|
| 83 |
+
","{""id"": ""nitrosocke/Nitro-Diffusion"", ""author"": ""nitrosocke"", ""sha"": ""a1989789e6ce4b4a7dc5c10e026581e8cf1d27d9"", ""last_modified"": ""2023-05-16 09:28:36+00:00"", ""created_at"": ""2022-11-16 15:01:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 600, ""downloads_all_time"": null, ""likes"": 383, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""stable-diffusion"", ""text-to-image"", ""image-to-image"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- image-to-image\n- diffusers\nthumbnail: https://huggingface.co/nitrosocke/Nitro-Diffusion/resolve/main/nitro-diff-samples-02.jpg"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='batman-demo-01.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='laracroft-demo-01.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nitro-diff-samples-01.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nitro-diff-samples-02.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nitro-diff-samples-03.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nitroDiffusion-v1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""yuan2023/Stable-Diffusion-ControlNet-WebUI"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""duchaba/sd_prompt_helper"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""akhaliq/Nitro-Diffusion"", ""IgorSense/Diffusion_Space2"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""Nymbo/Game-Creator"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""Dao3/Top-20-Models"", ""RhythmRemix14/PrintingPressDx"", ""Omnibus/game-test"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/ArtModels"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""Omnibus/Social-Card-Maker-DL"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""Nymbo/Social-Card-Maker-DL"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""akhaliq/Nitro-Diffusion2"", ""bumsika/ai-bros-diffusion"", ""bradarrML/Diffusion_Space"", ""cloudwp/Top-20-Diffusion"", ""eeyorestoned/Nitro-Diffusion"", ""bruvvy/nitrosocke-Nitro-Diffusion"", ""Karumoon/test007"", ""Mileena/Diffusion_Space2-Styles"", ""ennov8ion/Landscapes-models"", ""Shad0ws/ImageModelTestEnvironment"", ""ucmisanddisinfo/thisApp"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""vih-v/x_mod"", ""Omnibus/top-20-diffusion"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""richds/Diffusion_Space"", ""IgorSense/Diffusion_Space"", ""Hisjhsshh/NitroDiffusionTesting"", ""c1a1s1/nitrosocke-Nitro-Diffusion"", ""Duskfallcrew/nitrosocke-Nitro-Diffusion"", ""Karumoon/test004"", ""Karumoon/test002"", ""Karumoon/test006"", ""sidd293/nitrosocke-Nitro-Diffusion"", ""xp3857/bin"", ""Mileena/Nitro-Diffusion"", ""tarjomeh/Nitro-Diffusion"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-16 09:28:36+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- image-to-image\n- diffusers\nthumbnail: https://huggingface.co/nitrosocke/Nitro-Diffusion/resolve/main/nitro-diff-samples-02.jpg"", ""transformersInfo"": null, ""_id"": ""6374fb58ce2759e3756ef1b7"", ""modelId"": ""nitrosocke/Nitro-Diffusion"", ""usedStorage"": 20383148800}",0,,0,,0,,0,,0,"CompVis/stable-diffusion-license, Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/hfd_test_nostopbutton, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, duchaba/sd_prompt_helper, huggingface/InferenceSupport/discussions/new?title=nitrosocke/Nitro-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnitrosocke%2FNitro-Diffusion%5D(%2Fnitrosocke%2FNitro-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, nitrosocke/Nitro-Diffusion-Demo, phenixrhyder/NSFW-ToyWorld",15
|
OpenVoiceV2_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
myshell-ai/OpenVoiceV2,"---
|
| 3 |
+
license: mit
|
| 4 |
+
tags:
|
| 5 |
+
- audio
|
| 6 |
+
- text-to-speech
|
| 7 |
+
- instant-voice-cloning
|
| 8 |
+
language:
|
| 9 |
+
- en
|
| 10 |
+
- zh
|
| 11 |
+
inference: false
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
# OpenVoice V2
|
| 15 |
+
|
| 16 |
+
<a href=""https://trendshift.io/repositories/6161"" target=""_blank""><img src=""https://trendshift.io/api/badge/repositories/6161"" alt=""myshell-ai%2FOpenVoice | Trendshift"" style=""width: 250px; height: 55px;"" width=""250"" height=""55""/></a>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
In April 2024, we release OpenVoice V2, which includes all features in V1 and has:
|
| 20 |
+
|
| 21 |
+
1. Better Audio Quality. OpenVoice V2 adopts a different training strategy that delivers better audio quality.
|
| 22 |
+
|
| 23 |
+
2. Native Multi-lingual Support. English, Spanish, French, Chinese, Japanese and Korean are natively supported in OpenVoice V2.
|
| 24 |
+
|
| 25 |
+
3. Free Commercial Use. Starting from April 2024, both V2 and V1 are released under MIT License. Free for commercial use.
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
<video controls autoplay src=""https://cdn-uploads.huggingface.co/production/uploads/641de0213239b631552713e4/uCHTHD9OUotgOflqDu3QK.mp4""></video>
|
| 29 |
+
|
| 30 |
+
### Features
|
| 31 |
+
- **Accurate Tone Color Cloning.** OpenVoice can accurately clone the reference tone color and generate speech in multiple languages and accents.
|
| 32 |
+
- **Flexible Voice Style Control.** OpenVoice enables granular control over voice styles, such as emotion and accent, as well as other style parameters including rhythm, pauses, and intonation.
|
| 33 |
+
- **Zero-shot Cross-lingual Voice Cloning.** Neither of the language of the generated speech nor the language of the reference speech needs to be presented in the massive-speaker multi-lingual training dataset.
|
| 34 |
+
|
| 35 |
+
### How to Use
|
| 36 |
+
Please see [usage](https://github.com/myshell-ai/OpenVoice/blob/main/docs/USAGE.md) for detailed instructions.
|
| 37 |
+
|
| 38 |
+
# Usage
|
| 39 |
+
|
| 40 |
+
## Table of Content
|
| 41 |
+
|
| 42 |
+
- [Quick Use](#quick-use): directly use OpenVoice without installation.
|
| 43 |
+
- [Linux Install](#linux-install): for researchers and developers only.
|
| 44 |
+
- [V1](#openvoice-v1)
|
| 45 |
+
- [V2](#openvoice-v2)
|
| 46 |
+
- [Install on Other Platforms](#install-on-other-platforms): unofficial installation guide contributed by the community
|
| 47 |
+
|
| 48 |
+
## Quick Use
|
| 49 |
+
|
| 50 |
+
The input speech audio of OpenVoice can be in **Any Language**. OpenVoice can clone the voice in that speech audio, and use the voice to speak in multiple languages. For quick use, we recommend you to try the already deployed services:
|
| 51 |
+
|
| 52 |
+
- [British English](https://app.myshell.ai/widget/vYjqae)
|
| 53 |
+
- [American English](https://app.myshell.ai/widget/nEFFJf)
|
| 54 |
+
- [Indian English](https://app.myshell.ai/widget/V3iYze)
|
| 55 |
+
- [Australian English](https://app.myshell.ai/widget/fM7JVf)
|
| 56 |
+
- [Spanish](https://app.myshell.ai/widget/NNFFVz)
|
| 57 |
+
- [French](https://app.myshell.ai/widget/z2uyUz)
|
| 58 |
+
- [Chinese](https://app.myshell.ai/widget/fU7nUz)
|
| 59 |
+
- [Japanese](https://app.myshell.ai/widget/IfIB3u)
|
| 60 |
+
- [Korean](https://app.myshell.ai/widget/q6ZjIn)
|
| 61 |
+
|
| 62 |
+
## Linux Install
|
| 63 |
+
|
| 64 |
+
This section is only for developers and researchers who are familiar with Linux, Python and PyTorch. Clone this repo, and run
|
| 65 |
+
|
| 66 |
+
```
|
| 67 |
+
conda create -n openvoice python=3.9
|
| 68 |
+
conda activate openvoice
|
| 69 |
+
git clone git@github.com:myshell-ai/OpenVoice.git
|
| 70 |
+
cd OpenVoice
|
| 71 |
+
pip install -e .
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
No matter if you are using V1 or V2, the above installation is the same.
|
| 75 |
+
|
| 76 |
+
### OpenVoice V1
|
| 77 |
+
|
| 78 |
+
Download the checkpoint from [here](https://myshell-public-repo-host.s3.amazonaws.com/openvoice/checkpoints_1226.zip) and extract it to the `checkpoints` folder.
|
| 79 |
+
|
| 80 |
+
**1. Flexible Voice Style Control.**
|
| 81 |
+
Please see [`demo_part1.ipynb`](https://github.com/myshell-ai/OpenVoice/blob/main/demo_part1.ipynb) for an example usage of how OpenVoice enables flexible style control over the cloned voice.
|
| 82 |
+
|
| 83 |
+
**2. Cross-Lingual Voice Cloning.**
|
| 84 |
+
Please see [`demo_part2.ipynb`](https://github.com/myshell-ai/OpenVoice/blob/main/demo_part2.ipynb) for an example for languages seen or unseen in the MSML training set.
|
| 85 |
+
|
| 86 |
+
**3. Gradio Demo.**. We provide a minimalist local gradio demo here. We strongly suggest the users to look into `demo_part1.ipynb`, `demo_part2.ipynb` and the [QnA](QA.md) if they run into issues with the gradio demo. Launch a local gradio demo with `python -m openvoice_app --share`.
|
| 87 |
+
|
| 88 |
+
### OpenVoice V2
|
| 89 |
+
|
| 90 |
+
Download the checkpoint from [here](https://myshell-public-repo-hosting.s3.amazonaws.com/openvoice/checkpoints_v2_0417.zip) and extract it to the `checkpoints_v2` folder.
|
| 91 |
+
|
| 92 |
+
Install [MeloTTS](https://github.com/myshell-ai/MeloTTS):
|
| 93 |
+
```
|
| 94 |
+
pip install git+https://github.com/myshell-ai/MeloTTS.git
|
| 95 |
+
python -m unidic download
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
**Demo Usage.** Please see [`demo_part3.ipynb`](https://github.com/myshell-ai/OpenVoice/blob/main/demo_part3.ipynb) for example usage of OpenVoice V2. Now it natively supports English, Spanish, French, Chinese, Japanese and Korean.
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
## Install on Other Platforms
|
| 102 |
+
|
| 103 |
+
This section provides the unofficial installation guides by open-source contributors in the community:
|
| 104 |
+
|
| 105 |
+
- Windows
|
| 106 |
+
- [Guide](https://github.com/Alienpups/OpenVoice/blob/main/docs/USAGE_WINDOWS.md) by [@Alienpups](https://github.com/Alienpups)
|
| 107 |
+
- You are welcome to contribute if you have a better installation guide. We will list you here.
|
| 108 |
+
- Docker
|
| 109 |
+
- [Guide](https://github.com/StevenJSCF/OpenVoice/blob/update-docs/docs/DF_USAGE.md) by [@StevenJSCF](https://github.com/StevenJSCF)
|
| 110 |
+
- You are welcome to contribute if you have a better installation guide. We will list you here.
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
### Links
|
| 114 |
+
- [Github](https://github.com/myshell-ai/OpenVoice)
|
| 115 |
+
- [HFDemo](https://huggingface.co/spaces/myshell-ai/OpenVoiceV2)
|
| 116 |
+
- [Discord](https://discord.gg/myshell)
|
| 117 |
+
|
| 118 |
+
","{""id"": ""myshell-ai/OpenVoiceV2"", ""author"": ""myshell-ai"", ""sha"": ""f36e7edfe1684461a8343844af60babc2efbb727"", ""last_modified"": ""2024-12-24 19:19:30+00:00"", ""created_at"": ""2024-04-23 18:09:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 390, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""audio"", ""text-to-speech"", ""instant-voice-cloning"", ""en"", ""zh"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\nlicense: mit\ntags:\n- audio\n- text-to-speech\n- instant-voice-cloning\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.DS_Store', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/.DS_Store', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/en-au.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/en-br.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/en-default.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/en-india.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/en-newest.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/en-us.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/es.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/fr.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/jp.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/kr.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='base_speakers/ses/zh.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='converter/checkpoint.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='converter/config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Pendrokar/TTS-Spaces-Arena"", ""myshell-ai/OpenVoiceV2"", ""soiz1/seed-vc3"", ""likesimo75/OpenVoiceV2"", ""ake178178/OpenVoiceV2-dedaodemo02"", ""Mattysaur/OpenVoiceV2"", ""phenixrhyder/OpenVoice-freeAiVoice"", ""fattigerisgood/OpenVoiceV2"", ""CaptainM/OpenVoiceV23"", ""saneowl/OpenVoiceV2"", ""vuxuanhoan/OpenVoiceV2"", ""AaronLikesModels/OpenVoiceV2"", ""Sergionexx/OpenVoiceV2EsLat"", ""JoroGorata/OpenVoiceV2""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-24 19:19:30+00:00"", ""cardData"": ""language:\n- en\n- zh\nlicense: mit\ntags:\n- audio\n- text-to-speech\n- instant-voice-cloning\ninference: false"", ""transformersInfo"": null, ""_id"": ""6627f975ddb4d091c698e4ac"", ""modelId"": ""myshell-ai/OpenVoiceV2"", ""usedStorage"": 131339229}",0,https://huggingface.co/rsxdalv/OpenVoiceV2,1,,0,,0,,0,"CaptainM/OpenVoiceV23, JoroGorata/OpenVoiceV2, Mattysaur/OpenVoiceV2, Pendrokar/TTS-Spaces-Arena, ake178178/OpenVoiceV2-dedaodemo02, fattigerisgood/OpenVoiceV2, huggingface/InferenceSupport/discussions/new?title=myshell-ai/OpenVoiceV2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmyshell-ai%2FOpenVoiceV2%5D(%2Fmyshell-ai%2FOpenVoiceV2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, likesimo75/OpenVoiceV2, myshell-ai/OpenVoiceV2, phenixrhyder/OpenVoice-freeAiVoice, saneowl/OpenVoiceV2, soiz1/seed-vc3, vuxuanhoan/OpenVoiceV2",13
|
| 119 |
+
rsxdalv/OpenVoiceV2,"---
|
| 120 |
+
license: mit
|
| 121 |
+
language:
|
| 122 |
+
- en
|
| 123 |
+
base_model:
|
| 124 |
+
- myshell-ai/OpenVoiceV2
|
| 125 |
+
---
|
| 126 |
+
<div align=""center"">
|
| 127 |
+
<div> </div>
|
| 128 |
+
<img src=""resources/openvoicelogo.jpg"" width=""400""/>
|
| 129 |
+
|
| 130 |
+
[Paper](https://arxiv.org/abs/2312.01479) |
|
| 131 |
+
[Website](https://research.myshell.ai/open-voice) <br> <br>
|
| 132 |
+
<a href=""https://trendshift.io/repositories/6161"" target=""_blank""><img src=""https://trendshift.io/api/badge/repositories/6161"" alt=""myshell-ai%2FOpenVoice | Trendshift"" style=""width: 250px; height: 55px;"" width=""250"" height=""55""/></a>
|
| 133 |
+
</div>
|
| 134 |
+
|
| 135 |
+
## Introduction
|
| 136 |
+
|
| 137 |
+
### OpenVoice V1
|
| 138 |
+
|
| 139 |
+
As we detailed in our [paper](https://arxiv.org/abs/2312.01479) and [website](https://research.myshell.ai/open-voice), the advantages of OpenVoice are three-fold:
|
| 140 |
+
|
| 141 |
+
**1. Accurate Tone Color Cloning.**
|
| 142 |
+
OpenVoice can accurately clone the reference tone color and generate speech in multiple languages and accents.
|
| 143 |
+
|
| 144 |
+
**2. Flexible Voice Style Control.**
|
| 145 |
+
OpenVoice enables granular control over voice styles, such as emotion and accent, as well as other style parameters including rhythm, pauses, and intonation.
|
| 146 |
+
|
| 147 |
+
**3. Zero-shot Cross-lingual Voice Cloning.**
|
| 148 |
+
Neither of the language of the generated speech nor the language of the reference speech needs to be presented in the massive-speaker multi-lingual training dataset.
|
| 149 |
+
|
| 150 |
+
### OpenVoice V2
|
| 151 |
+
|
| 152 |
+
In April 2024, we released OpenVoice V2, which includes all features in V1 and has:
|
| 153 |
+
|
| 154 |
+
**1. Better Audio Quality.**
|
| 155 |
+
OpenVoice V2 adopts a different training strategy that delivers better audio quality.
|
| 156 |
+
|
| 157 |
+
**2. Native Multi-lingual Support.**
|
| 158 |
+
English, Spanish, French, Chinese, Japanese and Korean are natively supported in OpenVoice V2.
|
| 159 |
+
|
| 160 |
+
**3. Free Commercial Use.**
|
| 161 |
+
Starting from April 2024, both V2 and V1 are released under MIT License. Free for commercial use.
|
| 162 |
+
|
| 163 |
+
[Video](https://github.com/myshell-ai/OpenVoice/assets/40556743/3cba936f-82bf-476c-9e52-09f0f417bb2f)
|
| 164 |
+
|
| 165 |
+
OpenVoice has been powering the instant voice cloning capability of [myshell.ai](https://app.myshell.ai/explore) since May 2023. Until Nov 2023, the voice cloning model has been used tens of millions of times by users worldwide, and witnessed the explosive user growth on the platform.
|
| 166 |
+
|
| 167 |
+
## Main Contributors
|
| 168 |
+
|
| 169 |
+
- [Zengyi Qin](https://www.qinzy.tech) at MIT
|
| 170 |
+
- [Wenliang Zhao](https://wl-zhao.github.io) at Tsinghua University
|
| 171 |
+
- [Xumin Yu](https://yuxumin.github.io) at Tsinghua University
|
| 172 |
+
- [Ethan Sun](https://twitter.com/ethan_myshell) at MyShell
|
| 173 |
+
|
| 174 |
+
## How to Use
|
| 175 |
+
Please see [usage](docs/USAGE.md) for detailed instructions.
|
| 176 |
+
|
| 177 |
+
## Common Issues
|
| 178 |
+
|
| 179 |
+
Please see [QA](docs/QA.md) for common questions and answers. We will regularly update the question and answer list.
|
| 180 |
+
|
| 181 |
+
## Citation
|
| 182 |
+
```
|
| 183 |
+
@article{qin2023openvoice,
|
| 184 |
+
title={OpenVoice: Versatile Instant Voice Cloning},
|
| 185 |
+
author={Qin, Zengyi and Zhao, Wenliang and Yu, Xumin and Sun, Xin},
|
| 186 |
+
journal={arXiv preprint arXiv:2312.01479},
|
| 187 |
+
year={2023}
|
| 188 |
+
}
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
## License
|
| 192 |
+
OpenVoice V1 and V2 are MIT Licensed. Free for both commercial and research use.
|
| 193 |
+
|
| 194 |
+
## Acknowledgements
|
| 195 |
+
This implementation is based on several excellent projects, [TTS](https://github.com/coqui-ai/TTS), [VITS](https://github.com/jaywalnut310/vits), and [VITS2](https://github.com/daniilrobnikov/vits2). Thanks for their awesome work!","{""id"": ""rsxdalv/OpenVoiceV2"", ""author"": ""rsxdalv"", ""sha"": ""2f3d8e1ec89cbe7469f1dedcb5d521420d71893a"", ""last_modified"": ""2025-04-25 09:25:10+00:00"", ""created_at"": ""2025-04-25 09:21:48+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""en"", ""arxiv:2312.01479"", ""base_model:myshell-ai/OpenVoiceV2"", ""base_model:finetune:myshell-ai/OpenVoiceV2"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- myshell-ai/OpenVoiceV2\nlanguage:\n- en\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/en-au.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/en-br.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/en-default.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/en-india.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/en-newest.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/en-us.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/es.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/fr.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/jp.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/kr.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/base_speakers/ses/zh.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/converter/checkpoint.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints_v2/converter/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/demo_speaker0.mp3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/demo_speaker1.mp3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/demo_speaker2.mp3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/example_reference.mp3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/framework-ipa.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/huggingface.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/lepton-hd.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/myshell-hd.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/openvoicelogo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/tts-guide.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/voice-clone-guide.png', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-25 09:25:10+00:00"", ""cardData"": ""base_model:\n- myshell-ai/OpenVoiceV2\nlanguage:\n- en\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""680b542ccbada0176f5fa82f"", ""modelId"": ""rsxdalv/OpenVoiceV2"", ""usedStorage"": 134494172}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=rsxdalv/OpenVoiceV2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brsxdalv%2FOpenVoiceV2%5D(%2Frsxdalv%2FOpenVoiceV2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Phi-3-small-8k-instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Qwen-14B-Chat_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,792 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Qwen/Qwen-14B-Chat,"---
|
| 3 |
+
language:
|
| 4 |
+
- zh
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- qwen
|
| 8 |
+
pipeline_tag: text-generation
|
| 9 |
+
inference: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# Qwen-14B-Chat
|
| 13 |
+
|
| 14 |
+
<p align=""center"">
|
| 15 |
+
<img src=""https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg"" width=""400""/>
|
| 16 |
+
<p>
|
| 17 |
+
<br>
|
| 18 |
+
|
| 19 |
+
<p align=""center"">
|
| 20 |
+
🤗 <a href=""https://huggingface.co/Qwen"">Hugging Face</a>   |   🤖 <a href=""https://modelscope.cn/organization/qwen"">ModelScope</a>   |    📑 <a href=""https://arxiv.org/abs/2309.16609"">Paper</a>    |   🖥️ <a href=""https://modelscope.cn/studios/qwen/Qwen-14B-Chat-Demo/summary"">Demo</a>
|
| 21 |
+
<br>
|
| 22 |
+
<a href=""https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png"">WeChat (微信)</a>   |   <a href=""https://discord.gg/z3GAxXZ9Ce"">Discord</a>   |   <a href=""https://dashscope.aliyun.com"">API</a>
|
| 23 |
+
</p>
|
| 24 |
+
<br>
|
| 25 |
+
|
| 26 |
+
## 介绍(Introduction)
|
| 27 |
+
|
| 28 |
+
**通义千问-14B(Qwen-14B)**是阿里云研发的通义千问大模型系列的140亿参数规模的模型。Qwen-14B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-14B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-14B-Chat。本仓库为Qwen-14B-Chat的仓库。
|
| 29 |
+
|
| 30 |
+
如果您想了解更多关于通义千问-14B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。
|
| 31 |
+
|
| 32 |
+
**Qwen-14B** is the 14B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-14B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-14B, we release Qwen-14B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. This repository is the one for Qwen-14B-Chat.
|
| 33 |
+
|
| 34 |
+
For more details about the open-source model of Qwen-14B, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository.
|
| 35 |
+
<br>
|
| 36 |
+
|
| 37 |
+
## 要求(Requirements)
|
| 38 |
+
|
| 39 |
+
* python 3.8及以上版本
|
| 40 |
+
* pytorch 1.12及以上版本,推荐2.0及以上版本
|
| 41 |
+
* 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项)
|
| 42 |
+
* python 3.8 and above
|
| 43 |
+
* pytorch 1.12 and above, 2.0 and above are recommended
|
| 44 |
+
* CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
|
| 45 |
+
<br>
|
| 46 |
+
|
| 47 |
+
## 依赖项(Dependency)
|
| 48 |
+
|
| 49 |
+
运行Qwen-14B-Chat,请确保满足上述要求,再执行以下pip命令安装依赖库
|
| 50 |
+
|
| 51 |
+
To run Qwen-14B-Chat, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
|
| 52 |
+
|
| 53 |
+
```bash
|
| 54 |
+
pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。
|
| 58 |
+
|
| 59 |
+
In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage.
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
git clone https://github.com/Dao-AILab/flash-attention
|
| 63 |
+
cd flash-attention && pip install .
|
| 64 |
+
# 下方安装可选,安装可能比较缓慢。
|
| 65 |
+
# pip install csrc/layer_norm
|
| 66 |
+
# pip install csrc/rotary
|
| 67 |
+
```
|
| 68 |
+
<br>
|
| 69 |
+
|
| 70 |
+
## 快速使用(Quickstart)
|
| 71 |
+
|
| 72 |
+
下面我们展示了一个使用Qwen-14B-Chat模型,进行多轮对话交互的样例:
|
| 73 |
+
|
| 74 |
+
We show an example of multi-turn interaction with Qwen-14B-Chat in the following code:
|
| 75 |
+
|
| 76 |
+
```python
|
| 77 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 78 |
+
from transformers.generation import GenerationConfig
|
| 79 |
+
|
| 80 |
+
# Note: The default behavior now has injection attack prevention off.
|
| 81 |
+
tokenizer = AutoTokenizer.from_pretrained(""Qwen/Qwen-14B-Chat"", trust_remote_code=True)
|
| 82 |
+
|
| 83 |
+
# use bf16
|
| 84 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-14B-Chat"", device_map=""auto"", trust_remote_code=True, bf16=True).eval()
|
| 85 |
+
# use fp16
|
| 86 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-14B-Chat"", device_map=""auto"", trust_remote_code=True, fp16=True).eval()
|
| 87 |
+
# use cpu only
|
| 88 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-14B-Chat"", device_map=""cpu"", trust_remote_code=True).eval()
|
| 89 |
+
# use auto mode, automatically select precision based on the device.
|
| 90 |
+
model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-14B-Chat"", device_map=""auto"", trust_remote_code=True).eval()
|
| 91 |
+
|
| 92 |
+
# Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this.
|
| 93 |
+
# model.generation_config = GenerationConfig.from_pretrained(""Qwen/Qwen-14B-Chat"", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
| 94 |
+
|
| 95 |
+
# 第一���对话 1st dialogue turn
|
| 96 |
+
response, history = model.chat(tokenizer, ""你好"", history=None)
|
| 97 |
+
print(response)
|
| 98 |
+
# 你好!很高兴为你提供帮助。
|
| 99 |
+
|
| 100 |
+
# 第二轮对话 2nd dialogue turn
|
| 101 |
+
response, history = model.chat(tokenizer, ""给我讲一个年轻人奋斗创业最终取得成功的故事。"", history=history)
|
| 102 |
+
print(response)
|
| 103 |
+
# 这是一个关于一个年轻人奋斗创业最终取得成功的故事。
|
| 104 |
+
# 故事的主人公叫李明,他来自一个普通的家庭,父母都是普通的工人。从小,李明就立下了一个目标:要成为一名成功的企业家。
|
| 105 |
+
# 为了实现这个目标,李明勤奋学习,考上了大学。在大学期间,他积极参加各种创业比赛,获得了不少奖项。他还利用课余时间去实习,积累了宝贵的经验。
|
| 106 |
+
# 毕业后,李明决定开始自己的创业之路。他开始寻找投资机会,但多次都被拒绝了。然而,他并没有放弃。他继续努力,不断改进自己的创业计划,并寻找新的投资机会。
|
| 107 |
+
# 最终,李明成功地获得了一笔投资,开始了自己的创业之路。他成立了一家科技公司,专注于开发新型软件。在他的领导下,公司迅速发展起来,成为了一家成功的科技企业。
|
| 108 |
+
# 李明的成功并不是偶然的。他勤奋、坚韧、勇于冒险,不断学习和改进自己。他的成功也证明了,只要努力奋斗,任何人都有可能取得成功。
|
| 109 |
+
|
| 110 |
+
# 第三轮对话 3rd dialogue turn
|
| 111 |
+
response, history = model.chat(tokenizer, ""给这个故事起一个标题"", history=history)
|
| 112 |
+
print(response)
|
| 113 |
+
# 《奋斗创业:一个年轻人的成功之路》
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。
|
| 117 |
+
|
| 118 |
+
For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information.
|
| 119 |
+
<br>
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
## 量化 (Quantization)
|
| 123 |
+
|
| 124 |
+
### 用法 (Usage)
|
| 125 |
+
|
| 126 |
+
**请注意:我们更新量化方案为基于[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)的量化,提供Qwen-14B-Chat的Int4量化模型[点击这里](https://huggingface.co/Qwen/Qwen-14B-Chat-Int4)。相比此前方案,该方案在模型评测效果几乎无损,且存储需求更低,推理速度更优。**
|
| 127 |
+
|
| 128 |
+
**Note: we provide a new solution based on [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), and release an Int4 quantized model for Qwen-14B-Chat [Click here](https://huggingface.co/Qwen/Qwen-14B-Chat-Int4), which achieves nearly lossless model effects but improved performance on both memory costs and inference speed, in comparison with the previous solution.**
|
| 129 |
+
|
| 130 |
+
以下我们提供示例说明如何使用Int4量化模型。在开始使用前,请先保证满足要求(如torch 2.0及以上,transformers版本为4.32.0及以上,等等),并安装所需安装包:
|
| 131 |
+
|
| 132 |
+
Here we demonstrate how to use our provided quantized models for inference. Before you start, make sure you meet the requirements of auto-gptq (e.g., torch 2.0 and above, transformers 4.32.0 and above, etc.) and install the required packages:
|
| 133 |
+
|
| 134 |
+
```bash
|
| 135 |
+
pip install auto-gptq optimum
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
如安装`auto-gptq`遇到问题,我们建议您到官方[repo](https://github.com/PanQiWei/AutoGPTQ)搜索合适的预编译wheel。
|
| 139 |
+
|
| 140 |
+
随后即可使用和上述一致的用法调用量化模型:
|
| 141 |
+
|
| 142 |
+
If you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a pre-build wheel.
|
| 143 |
+
|
| 144 |
+
Then you can load the quantized model easily and run inference as same as usual:
|
| 145 |
+
|
| 146 |
+
```python
|
| 147 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 148 |
+
""Qwen/Qwen-14B-Chat-Int4"",
|
| 149 |
+
device_map=""auto"",
|
| 150 |
+
trust_remote_code=True
|
| 151 |
+
).eval()
|
| 152 |
+
response, history = model.chat(tokenizer, ""你好"", history=None)
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
### 效果评测
|
| 158 |
+
|
| 159 |
+
我们对BF16,Int8和Int4模型在基准评测上做了测试(使用zero-shot设置),发现量化模型效果损失较小,结果如下所示:
|
| 160 |
+
|
| 161 |
+
We illustrate the zero-shot performance of both BF16, Int8 and Int4 models on the benchmark, and we find that the quantized model does not suffer from significant performance degradation. Results are shown below:
|
| 162 |
+
|
| 163 |
+
| Quantization | MMLU | CEval (val) | GSM8K | Humaneval |
|
| 164 |
+
|--------------|:----:|:-----------:|:-----:|:---------:|
|
| 165 |
+
| BF16 | 64.6 | 69.8 | 60.1 | 43.9 |
|
| 166 |
+
| Int8 | 63.6 | 68.6 | 60.0 | 48.2 |
|
| 167 |
+
| Int4 | 63.3 | 69.0 | 59.8 | 45.7 |
|
| 168 |
+
|
| 169 |
+
### 推理速度 (Inference Speed)
|
| 170 |
+
|
| 171 |
+
我们测算了不同精度模型以及不同FlashAttn库版本下模型生成2048和8192个token的平均推理速度。如图所示:
|
| 172 |
+
|
| 173 |
+
We measured the average inference speed of generating 2048 and 8192 tokens with different quantization levels and versions of flash-attention, respectively.
|
| 174 |
+
|
| 175 |
+
| Quantization | FlashAttn | Speed (2048 tokens) | Speed (8192 tokens) |
|
| 176 |
+
| ------------- | :-------: | :------------------:| :------------------:|
|
| 177 |
+
| BF16 | v2 | 32.88 | 24.87 |
|
| 178 |
+
| Int8 | v2 | 29.28 | 24.22 |
|
| 179 |
+
| Int4 | v2 | 38.72 | 27.33 |
|
| 180 |
+
| BF16 | v1 | 32.76 | 28.89 |
|
| 181 |
+
| Int8 | v1 | 28.31 | 23.87 |
|
| 182 |
+
| Int4 | v1 | 37.81 | 26.46 |
|
| 183 |
+
| BF16 | Disabled | 29.32 | 22.91 |
|
| 184 |
+
| Int8 | Disabled | 31.12 | 24.60 |
|
| 185 |
+
| Int4 | Disabled | 37.65 | 26.00 |
|
| 186 |
+
|
| 187 |
+
具体而言,我们记录在长度为1的上下文的条件下生成8192个token的性能。评测运行于单张A100-SXM4-80G GPU,使用PyTorch 2.0.1和CUDA 11.8。推理速度是生成8192个token的速度均值。
|
| 188 |
+
|
| 189 |
+
In detail, the setting of profiling is generating 8192 new tokens with 1 context token. The profiling runs on a single A100-SXM4-80G GPU with PyTorch 2.0.1 and CUDA 11.8. The inference speed is averaged over the generated 8192 tokens.
|
| 190 |
+
|
| 191 |
+
注意:以上Int4/Int8模型生成速度使用autogptq库给出,当前``AutoModelForCausalLM.from_pretrained``载入的模型生成速度会慢大约20%。我们已经将该问题汇报给HuggingFace团队,若有解决方案将即时更新。
|
| 192 |
+
|
| 193 |
+
Note: The generation speed of the Int4/Int8 models mentioned above is provided by the autogptq library. The current speed of the model loaded using ""AutoModelForCausalLM.from_pretrained"" will be approximately 20% slower. We have reported this issue to the HuggingFace team and will update it promptly if a solution is available.
|
| 194 |
+
|
| 195 |
+
### 显存使用 (GPU Memory Usage)
|
| 196 |
+
|
| 197 |
+
我们还测算了不同模型精度编码2048个token及生成8192个token的峰值显存占用情况。(显存消耗在是否使用FlashAttn的情况下均类似。)结果如下所示:
|
| 198 |
+
|
| 199 |
+
We also profile the peak GPU memory usage for encoding 2048 tokens as context (and generating single token) and generating 8192 tokens (with single token as context) under different quantization levels, respectively. (The GPU memory usage is similar when using flash-attention or not.)The results are shown below.
|
| 200 |
+
|
| 201 |
+
| Quantization Level | Peak Usage for Encoding 2048 Tokens | Peak Usage for Generating 8192 Tokens |
|
| 202 |
+
| ------------------ | :---------------------------------: | :-----------------------------------: |
|
| 203 |
+
| BF16 | 30.15GB | 38.94GB |
|
| 204 |
+
| Int8 | 18.81GB | 27.54GB |
|
| 205 |
+
| Int4 | 13.01GB | 21.79GB |
|
| 206 |
+
|
| 207 |
+
上述性能测算使用[此脚本](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py)完成。
|
| 208 |
+
|
| 209 |
+
The above speed and memory profiling are conducted using [this script](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py).
|
| 210 |
+
<br>
|
| 211 |
+
|
| 212 |
+
## 模型细节(Model)
|
| 213 |
+
|
| 214 |
+
与Qwen-14B预训练模型相同,Qwen-14B-Chat模型规模基本情况如下所示
|
| 215 |
+
|
| 216 |
+
The details of the model architecture of Qwen-14B-Chat are listed as follows
|
| 217 |
+
|
| 218 |
+
| Hyperparameter | Value |
|
| 219 |
+
|:----------------|:------:|
|
| 220 |
+
| n_layers | 40 |
|
| 221 |
+
| n_heads | 40 |
|
| 222 |
+
| d_model | 5120 |
|
| 223 |
+
| vocab size | 151851 |
|
| 224 |
+
| sequence length | 2048 |
|
| 225 |
+
|
| 226 |
+
在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法,
|
| 227 |
+
即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。
|
| 228 |
+
|
| 229 |
+
在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-14B-Chat使用了约15万token大小的词表。
|
| 230 |
+
该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强。
|
| 231 |
+
词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。
|
| 232 |
+
|
| 233 |
+
For position encoding, FFN activation function, and normalization calculation methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration).
|
| 234 |
+
|
| 235 |
+
For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-14B-Chat uses a vocabulary of over 150K tokens.
|
| 236 |
+
It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary.
|
| 237 |
+
It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization.
|
| 238 |
+
<br>
|
| 239 |
+
|
| 240 |
+
## 评测效果(Evaluation)
|
| 241 |
+
|
| 242 |
+
对于Qwen-14B-Chat模型,我们同样评测了常规的中文理解(C-Eval)、英文理解(MMLU)、代码(HumanEval)和数学(GSM8K)等权威任务,同时包含了长序列任务的评测结果。由于Qwen-14B-Chat模型经过对齐后,激发了较强的外部系统调用能力,我们还进行了工具使用能力方面的评测。
|
| 243 |
+
|
| 244 |
+
提示:由于硬件和框架造成的舍入误差,复现结果如有波动属于正常现象。
|
| 245 |
+
|
| 246 |
+
For Qwen-14B-Chat, we also evaluate the model on C-Eval, MMLU, HumanEval, GSM8K, etc., as well as the benchmark evaluation for long-context understanding, and tool usage.
|
| 247 |
+
|
| 248 |
+
Note: Due to rounding errors caused by hardware and framework, differences in reproduced results are possible.
|
| 249 |
+
|
| 250 |
+
### 中文评测(Chinese Evaluation)
|
| 251 |
+
|
| 252 |
+
#### C-Eval
|
| 253 |
+
|
| 254 |
+
在[C-Eval](https://arxiv.org/abs/2305.08322)验证集上,我们评价了Qwen-14B-Chat模型的0-shot & 5-shot准确率
|
| 255 |
+
|
| 256 |
+
We demonstrate the 0-shot & 5-shot accuracy of Qwen-14B-Chat on C-Eval validation set
|
| 257 |
+
|
| 258 |
+
| Model | Avg. Acc. |
|
| 259 |
+
|:--------------------------------:|:---------:|
|
| 260 |
+
| LLaMA2-7B-Chat | 31.9 |
|
| 261 |
+
| LLaMA2-13B-Chat | 36.2 |
|
| 262 |
+
| LLaMA2-70B-Chat | 44.3 |
|
| 263 |
+
| ChatGLM2-6B-Chat | 52.6 |
|
| 264 |
+
| InternLM-7B-Chat | 53.6 |
|
| 265 |
+
| Baichuan2-7B-Chat | 55.6 |
|
| 266 |
+
| Baichuan2-13B-Chat | 56.7 |
|
| 267 |
+
| Qwen-7B-Chat (original) (0-shot) | 54.2 |
|
| 268 |
+
| **Qwen-7B-Chat (0-shot)** | 59.7 |
|
| 269 |
+
| **Qwen-7B-Chat (5-shot)** | 59.3 |
|
| 270 |
+
| **Qwen-14B-Chat (0-shot)** | 69.8 |
|
| 271 |
+
| **Qwen-14B-Chat (5-shot)** | **71.7** |
|
| 272 |
+
|
| 273 |
+
C-Eval测试集上,Qwen-14B-Chat模型的zero-shot准确率结果如下:
|
| 274 |
+
|
| 275 |
+
The zero-shot accuracy of Qwen-14B-Chat on C-Eval testing set is provided below:
|
| 276 |
+
|
| 277 |
+
| Model | Avg. | STEM | Social Sciences | Humanities | Others |
|
| 278 |
+
| :---------------------- | :------: | :--: | :-------------: | :--------: | :----: |
|
| 279 |
+
| Chinese-Alpaca-Plus-13B | 41.5 | 36.6 | 49.7 | 43.1 | 41.2 |
|
| 280 |
+
| Chinese-Alpaca-2-7B | 40.3 | - | - | - | - |
|
| 281 |
+
| ChatGLM2-6B-Chat | 50.1 | 46.4 | 60.4 | 50.6 | 46.9 |
|
| 282 |
+
| Baichuan-13B-Chat | 51.5 | 43.7 | 64.6 | 56.2 | 49.2 |
|
| 283 |
+
| Qwen-7B-Chat (original) | 54.6 | 47.8 | 67.6 | 59.3 | 50.6 |
|
| 284 |
+
| **Qwen-7B-Chat** | 58.6 | 53.3 | 72.1 | 62.8 | 52.0 |
|
| 285 |
+
| **Qwen-14B-Chat** | **69.1** | 65.1 | 80.9 | 71.2 | 63.4 |
|
| 286 |
+
|
| 287 |
+
在14B规模模型上,经过人类指令对齐的Qwen-14B-Chat模型,准确率在同类相近规模模型中仍然处于前列。
|
| 288 |
+
|
| 289 |
+
Compared with other pretrained models with comparable model size, the human-aligned Qwen-14B-Chat performs well in C-Eval accuracy.
|
| 290 |
+
|
| 291 |
+
### 英文评测(English Evaluation)
|
| 292 |
+
|
| 293 |
+
#### MMLU
|
| 294 |
+
|
| 295 |
+
[MMLU](https://arxiv.org/abs/2009.03300)评测集上,Qwen-14B-Chat模型的 0-shot & 5-shot 准确率如下,效果同样在同类对齐模型中同样表现较优。
|
| 296 |
+
|
| 297 |
+
The 0-shot & 5-shot accuracy of Qwen-14B-Chat on MMLU is provided below.
|
| 298 |
+
The performance of Qwen-14B-Chat still on the top between other human-aligned models with comparable size.
|
| 299 |
+
|
| 300 |
+
| Model | Avg. Acc. |
|
| 301 |
+
|:--------------------------------:|:---------:|
|
| 302 |
+
| ChatGLM2-6B-Chat | 46.0 |
|
| 303 |
+
| LLaMA2-7B-Chat | 46.2 |
|
| 304 |
+
| InternLM-7B-Chat | 51.1 |
|
| 305 |
+
| Baichuan2-7B-Chat | 52.9 |
|
| 306 |
+
| LLaMA2-13B-Chat | 54.6 |
|
| 307 |
+
| Baichuan2-13B-Chat | 57.3 |
|
| 308 |
+
| LLaMA2-70B-Chat | 63.8 |
|
| 309 |
+
| Qwen-7B-Chat (original) (0-shot) | 53.9 |
|
| 310 |
+
| **Qwen-7B-Chat (0-shot)** | 55.8 |
|
| 311 |
+
| **Qwen-7B-Chat (5-shot)** | 57.0 |
|
| 312 |
+
| **Qwen-14B-Chat (0-shot)** | 64.6 |
|
| 313 |
+
| **Qwen-14B-Chat (5-shot)** | **66.5** |
|
| 314 |
+
|
| 315 |
+
### 代码评测(Coding Evaluation)
|
| 316 |
+
|
| 317 |
+
Qwen-14B-Chat在[HumanEval](https://github.com/openai/human-eval)的zero-shot Pass@1效果如下
|
| 318 |
+
|
| 319 |
+
The zero-shot Pass@1 of Qwen-14B-Chat on [HumanEval](https://github.com/openai/human-eval) is demonstrated below
|
| 320 |
+
|
| 321 |
+
| Model | Pass@1 |
|
| 322 |
+
|:-----------------------:|:--------:|
|
| 323 |
+
| ChatGLM2-6B-Chat | 11.0 |
|
| 324 |
+
| LLaMA2-7B-Chat | 12.2 |
|
| 325 |
+
| InternLM-7B-Chat | 14.6 |
|
| 326 |
+
| Baichuan2-7B-Chat | 13.4 |
|
| 327 |
+
| LLaMA2-13B-Chat | 18.9 |
|
| 328 |
+
| Baichuan2-13B-Chat | 17.7 |
|
| 329 |
+
| LLaMA2-70B-Chat | 32.3 |
|
| 330 |
+
| Qwen-7B-Chat (original) | 24.4 |
|
| 331 |
+
| **Qwen-7B-Chat** | 37.2 |
|
| 332 |
+
| **Qwen-14B-Chat** | **43.9** |
|
| 333 |
+
|
| 334 |
+
### 数学评测(Mathematics Evaluation)
|
| 335 |
+
|
| 336 |
+
在评测数学能力的[GSM8K](https://github.com/openai/grade-school-math)上,Qwen-14B-Chat的准确率结果如下
|
| 337 |
+
|
| 338 |
+
The accuracy of Qwen-14B-Chat on GSM8K is shown below
|
| 339 |
+
|
| 340 |
+
| Model | Acc. |
|
| 341 |
+
|:--------------------------------:|:--------:|
|
| 342 |
+
| LLaMA2-7B-Chat | 26.3 |
|
| 343 |
+
| ChatGLM2-6B-Chat | 28.8 |
|
| 344 |
+
| Baichuan2-7B-Chat | 32.8 |
|
| 345 |
+
| InternLM-7B-Chat | 33.0 |
|
| 346 |
+
| LLaMA2-13B-Chat | 37.1 |
|
| 347 |
+
| Baichuan2-13B-Chat | 55.3 |
|
| 348 |
+
| LLaMA2-70B-Chat | 59.3 |
|
| 349 |
+
| Qwen-7B-Chat (original) (0-shot) | 41.1 |
|
| 350 |
+
| **Qwen-7B-Chat (0-shot)** | 50.3 |
|
| 351 |
+
| **Qwen-7B-Chat (8-shot)** | 54.1 |
|
| 352 |
+
| **Qwen-14B-Chat (0-shot)** | **60.1** |
|
| 353 |
+
| **Qwen-14B-Chat (8-shot)** | 59.3 |
|
| 354 |
+
|
| 355 |
+
### 长序列评测(Long-Context Understanding)
|
| 356 |
+
|
| 357 |
+
通过NTK插值,LogN注意力缩放可以扩展Qwen-14B-Chat的上下文长度。在长文本摘要数据集[VCSUM](https://arxiv.org/abs/2305.05280)上(文本平均长度在15K左右),Qwen-14B-Chat的Rouge-L结果如下:
|
| 358 |
+
|
| 359 |
+
**(若要启用这些技巧,请将config.json里的`use_dynamic_ntk`和`use_logn_attn`设置为true)**
|
| 360 |
+
|
| 361 |
+
We introduce NTK-aware interpolation, LogN attention scaling to extend the context length of Qwen-14B-Chat. The Rouge-L results of Qwen-14B-Chat on long-text summarization dataset [VCSUM](https://arxiv.org/abs/2305.05280) (The average length of this dataset is around 15K) are shown below:
|
| 362 |
+
|
| 363 |
+
**(To use these tricks, please set `use_dynamic_ntk` and `use_long_attn` to true in config.json.)**
|
| 364 |
+
|
| 365 |
+
| Model | VCSUM (zh) |
|
| 366 |
+
|:------------------|:----------:|
|
| 367 |
+
| GPT-3.5-Turbo-16k | 16.0 |
|
| 368 |
+
| LLama2-7B-Chat | 0.2 |
|
| 369 |
+
| InternLM-7B-Chat | 13.0 |
|
| 370 |
+
| ChatGLM2-6B-Chat | 16.3 |
|
| 371 |
+
| **Qwen-14B-Chat** | **17.3** |
|
| 372 |
+
|
| 373 |
+
### 工具使用能力的评测(Tool Usage)
|
| 374 |
+
|
| 375 |
+
#### ReAct Prompting
|
| 376 |
+
|
| 377 |
+
千问支持通过 [ReAct Prompting](https://arxiv.org/abs/2210.03629) 调用插件/工具/API。ReAct 也是 [LangChain](https://python.langchain.com/) 框架采用的主要方式之一。在我们开源的、用于评估工具使用能力的评测基准上,千问的表现如下:
|
| 378 |
+
|
| 379 |
+
Qwen-Chat supports calling plugins/tools/APIs through [ReAct Prompting](https://arxiv.org/abs/2210.03629). ReAct is also one of the main approaches used by the [LangChain](https://python.langchain.com/) framework. In our evaluation benchmark for assessing tool usage capabilities, Qwen-Chat's performance is as follows:
|
| 380 |
+
|
| 381 |
+
<table>
|
| 382 |
+
<tr>
|
| 383 |
+
<th colspan=""4"" align=""center"">Chinese Tool-Use Benchmark</th>
|
| 384 |
+
</tr>
|
| 385 |
+
<tr>
|
| 386 |
+
<th align=""center"">Model</th><th align=""center"">Tool Selection (Acc.↑)</th><th align=""center"">Tool Input (Rouge-L↑)</th><th align=""center"">False Positive Error↓</th>
|
| 387 |
+
</tr>
|
| 388 |
+
<tr>
|
| 389 |
+
<td>GPT-4</td><td align=""center"">95%</td><td align=""center"">0.90</td><td align=""center"">15.0%</td>
|
| 390 |
+
</tr>
|
| 391 |
+
<tr>
|
| 392 |
+
<td>GPT-3.5</td><td align=""center"">85%</td><td align=""center"">0.88</td><td align=""center"">75.0%</td>
|
| 393 |
+
</tr>
|
| 394 |
+
<tr>
|
| 395 |
+
<td>Qwen-7B-Chat</td><td align=""center"">98%</td><td align=""center"">0.91</td><td align=""center"">7.3%</td>
|
| 396 |
+
</tr>
|
| 397 |
+
<tr>
|
| 398 |
+
<td>Qwen-14B-Chat</td><td align=""center"">98%</td><td align=""center"">0.93</td><td align=""center"">2.4%</td>
|
| 399 |
+
</tr>
|
| 400 |
+
</table>
|
| 401 |
+
|
| 402 |
+
> 评测基准中出现的插件均没有出现在千问的训练集中。该基准评估了模型在多个候选插件中选择正确插件的准确率、传入插件的参数的合理性、以及假阳率。假阳率(False Positive)定义:在处理不该调用插件的请求时,错误地调用了插件。
|
| 403 |
+
|
| 404 |
+
> The plugins that appear in the evaluation set do not appear in the training set of Qwen. This benchmark evaluates the accuracy of the model in selecting the correct plugin from multiple candidate plugins, the rationality of the parameters passed into the plugin, and the false positive rate. False Positive: Incorrectly invoking a plugin when it should not have been called when responding to a query.
|
| 405 |
+
|
| 406 |
+

|
| 407 |
+

|
| 408 |
+
|
| 409 |
+
#### Code Interpreter
|
| 410 |
+
|
| 411 |
+
为了考察Qwen使用Python Code Interpreter完成数学解题、数据可视化、及文件处理与爬虫等任务的能力,我们专门建设并开源了一个评测这方面能力的[评测基准](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark)。
|
| 412 |
+
|
| 413 |
+
我们发现Qwen在生成代码的可执行率、结果正确性上均表现较好:
|
| 414 |
+
|
| 415 |
+
To assess Qwen's ability to use the Python Code Interpreter for tasks such as mathematical problem solving, data visualization, and other general-purpose tasks such as file handling and web scraping, we have created and open-sourced a benchmark specifically designed for evaluating these capabilities. You can find the benchmark at this [link](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark).
|
| 416 |
+
|
| 417 |
+
We have observed that Qwen performs well in terms of code executability and result accuracy when generating code:
|
| 418 |
+
|
| 419 |
+
<table>
|
| 420 |
+
<tr>
|
| 421 |
+
<th colspan=""4"" align=""center"">Executable Rate of Generated Code (%)</th>
|
| 422 |
+
</tr>
|
| 423 |
+
<tr>
|
| 424 |
+
<th align=""center"">Model</th><th align=""center"">Math↑</th><th align=""center"">Visualization↑</th><th align=""center"">General↑</th>
|
| 425 |
+
</tr>
|
| 426 |
+
<tr>
|
| 427 |
+
<td>GPT-4</td><td align=""center"">91.9</td><td align=""center"">85.9</td><td align=""center"">82.8</td>
|
| 428 |
+
</tr>
|
| 429 |
+
<tr>
|
| 430 |
+
<td>GPT-3.5</td><td align=""center"">89.2</td><td align=""center"">65.0</td><td align=""center"">74.1</td>
|
| 431 |
+
</tr>
|
| 432 |
+
<tr>
|
| 433 |
+
<td>LLaMA2-7B-Chat</td>
|
| 434 |
+
<td align=""center"">41.9</td>
|
| 435 |
+
<td align=""center"">33.1</td>
|
| 436 |
+
<td align=""center"">24.1 </td>
|
| 437 |
+
</tr>
|
| 438 |
+
<tr>
|
| 439 |
+
<td>LLaMA2-13B-Chat</td>
|
| 440 |
+
<td align=""center"">50.0</td>
|
| 441 |
+
<td align=""center"">40.5</td>
|
| 442 |
+
<td align=""center"">48.3 </td>
|
| 443 |
+
</tr>
|
| 444 |
+
<tr>
|
| 445 |
+
<td>CodeLLaMA-7B-Instruct</td>
|
| 446 |
+
<td align=""center"">85.1</td>
|
| 447 |
+
<td align=""center"">54.0</td>
|
| 448 |
+
<td align=""center"">70.7 </td>
|
| 449 |
+
</tr>
|
| 450 |
+
<tr>
|
| 451 |
+
<td>CodeLLaMA-13B-Instruct</td>
|
| 452 |
+
<td align=""center"">93.2</td>
|
| 453 |
+
<td align=""center"">55.8</td>
|
| 454 |
+
<td align=""center"">74.1 </td>
|
| 455 |
+
</tr>
|
| 456 |
+
<tr>
|
| 457 |
+
<td>InternLM-7B-Chat-v1.1</td>
|
| 458 |
+
<td align=""center"">78.4</td>
|
| 459 |
+
<td align=""center"">44.2</td>
|
| 460 |
+
<td align=""center"">62.1 </td>
|
| 461 |
+
</tr>
|
| 462 |
+
<tr>
|
| 463 |
+
<td>InternLM-20B-Chat</td>
|
| 464 |
+
<td align=""center"">70.3</td>
|
| 465 |
+
<td align=""center"">44.2</td>
|
| 466 |
+
<td align=""center"">65.5 </td>
|
| 467 |
+
</tr>
|
| 468 |
+
<tr>
|
| 469 |
+
<td>Qwen-7B-Chat</td>
|
| 470 |
+
<td align=""center"">82.4</td>
|
| 471 |
+
<td align=""center"">64.4</td>
|
| 472 |
+
<td align=""center"">67.2 </td>
|
| 473 |
+
</tr>
|
| 474 |
+
<tr>
|
| 475 |
+
<td>Qwen-14B-Chat</td>
|
| 476 |
+
<td align=""center"">89.2</td>
|
| 477 |
+
<td align=""center"">84.1</td>
|
| 478 |
+
<td align=""center"">65.5</td>
|
| 479 |
+
</tr>
|
| 480 |
+
</table>
|
| 481 |
+
|
| 482 |
+
<table>
|
| 483 |
+
<tr>
|
| 484 |
+
<th colspan=""4"" align=""center"">Accuracy of Code Execution Results (%)</th>
|
| 485 |
+
</tr>
|
| 486 |
+
<tr>
|
| 487 |
+
<th align=""center"">Model</th><th align=""center"">Math↑</th><th align=""center"">Visualization-Hard↑</th><th align=""center"">Visualization-Easy↑</th>
|
| 488 |
+
</tr>
|
| 489 |
+
<tr>
|
| 490 |
+
<td>GPT-4</td><td align=""center"">82.8</td><td align=""center"">66.7</td><td align=""center"">60.8</td>
|
| 491 |
+
</tr>
|
| 492 |
+
<tr>
|
| 493 |
+
<td>GPT-3.5</td><td align=""center"">47.3</td><td align=""center"">33.3</td><td align=""center"">55.7</td>
|
| 494 |
+
</tr>
|
| 495 |
+
<tr>
|
| 496 |
+
<td>LLaMA2-7B-Chat</td>
|
| 497 |
+
<td align=""center"">3.9</td>
|
| 498 |
+
<td align=""center"">14.3</td>
|
| 499 |
+
<td align=""center"">39.2 </td>
|
| 500 |
+
</tr>
|
| 501 |
+
<tr>
|
| 502 |
+
<td>LLaMA2-13B-Chat</td>
|
| 503 |
+
<td align=""center"">8.3</td>
|
| 504 |
+
<td align=""center"">8.3</td>
|
| 505 |
+
<td align=""center"">40.5 </td>
|
| 506 |
+
</tr>
|
| 507 |
+
<tr>
|
| 508 |
+
<td>CodeLLaMA-7B-Instruct</td>
|
| 509 |
+
<td align=""center"">14.3</td>
|
| 510 |
+
<td align=""center"">26.2</td>
|
| 511 |
+
<td align=""center"">60.8 </td>
|
| 512 |
+
</tr>
|
| 513 |
+
<tr>
|
| 514 |
+
<td>CodeLLaMA-13B-Instruct</td>
|
| 515 |
+
<td align=""center"">28.2</td>
|
| 516 |
+
<td align=""center"">27.4</td>
|
| 517 |
+
<td align=""center"">62.0 </td>
|
| 518 |
+
</tr>
|
| 519 |
+
<tr>
|
| 520 |
+
<td>InternLM-7B-Chat-v1.1</td>
|
| 521 |
+
<td align=""center"">28.5</td>
|
| 522 |
+
<td align=""center"">4.8</td>
|
| 523 |
+
<td align=""center"">40.5 </td>
|
| 524 |
+
</tr>
|
| 525 |
+
<tr>
|
| 526 |
+
<td>InternLM-20B-Chat</td>
|
| 527 |
+
<td align=""center"">34.6</td>
|
| 528 |
+
<td align=""center"">21.4</td>
|
| 529 |
+
<td align=""center"">45.6 </td>
|
| 530 |
+
</tr>
|
| 531 |
+
<tr>
|
| 532 |
+
<td>Qwen-7B-Chat</td>
|
| 533 |
+
<td align=""center"">41.9</td>
|
| 534 |
+
<td align=""center"">40.5</td>
|
| 535 |
+
<td align=""center"">54.4 </td>
|
| 536 |
+
</tr>
|
| 537 |
+
<tr>
|
| 538 |
+
<td>Qwen-14B-Chat</td>
|
| 539 |
+
<td align=""center"">58.4</td>
|
| 540 |
+
<td align=""center"">53.6</td>
|
| 541 |
+
<td align=""center"">59.5</td>
|
| 542 |
+
</tr>
|
| 543 |
+
</table>
|
| 544 |
+
|
| 545 |
+
<p align=""center"">
|
| 546 |
+
<br>
|
| 547 |
+
<img src=""assets/code_interpreter_showcase_001.jpg"" />
|
| 548 |
+
<br>
|
| 549 |
+
<p>
|
| 550 |
+
|
| 551 |
+
#### Huggingface Agent
|
| 552 |
+
|
| 553 |
+
千问还具备作为 [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents) 的能力。它在 Huggingface 提供的run模式评测基准上的表现如下:
|
| 554 |
+
|
| 555 |
+
Qwen-Chat also has the capability to be used as a [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents). Its performance on the run-mode benchmark provided by HuggingFace is as follows:
|
| 556 |
+
|
| 557 |
+
<table>
|
| 558 |
+
<tr>
|
| 559 |
+
<th colspan=""4"" align=""center"">HuggingFace Agent Benchmark- Run Mode</th>
|
| 560 |
+
</tr>
|
| 561 |
+
<tr>
|
| 562 |
+
<th align=""center"">Model</th><th align=""center"">Tool Selection↑</th><th align=""center"">Tool Used↑</th><th align=""center"">Code↑</th>
|
| 563 |
+
</tr>
|
| 564 |
+
<tr>
|
| 565 |
+
<td>GPT-4</td><td align=""center"">100</td><td align=""center"">100</td><td align=""center"">97.4</td>
|
| 566 |
+
</tr>
|
| 567 |
+
<tr>
|
| 568 |
+
<td>GPT-3.5</td><td align=""center"">95.4</td><td align=""center"">96.3</td><td align=""center"">87.0</td>
|
| 569 |
+
</tr>
|
| 570 |
+
<tr>
|
| 571 |
+
<td>StarCoder-Base-15B</td><td align=""center"">86.1</td><td align=""center"">87.0</td><td align=""center"">68.9</td>
|
| 572 |
+
</tr>
|
| 573 |
+
<tr>
|
| 574 |
+
<td>StarCoder-15B</td><td align=""center"">87.0</td><td align=""center"">88.0</td><td align=""center"">68.9</td>
|
| 575 |
+
</tr>
|
| 576 |
+
<tr>
|
| 577 |
+
<td>Qwen-7B-Chat</td><td align=""center"">87.0</td><td align=""center"">87.0</td><td align=""center"">71.5</td>
|
| 578 |
+
</tr>
|
| 579 |
+
<tr>
|
| 580 |
+
<td>Qwen-14B-Chat</td><td align=""center"">93.5</td><td align=""center"">94.4</td><td align=""center"">87.0</td>
|
| 581 |
+
</tr>
|
| 582 |
+
</table>
|
| 583 |
+
|
| 584 |
+
<table>
|
| 585 |
+
<tr>
|
| 586 |
+
<th colspan=""4"" align=""center"">HuggingFace Agent Benchmark - Chat Mode</th>
|
| 587 |
+
</tr>
|
| 588 |
+
<tr>
|
| 589 |
+
<th align=""center"">Model</th><th align=""center"">Tool Selection↑</th><th align=""center"">Tool Used↑</th><th align=""center"">Code↑</th>
|
| 590 |
+
</tr>
|
| 591 |
+
<tr>
|
| 592 |
+
<td>GPT-4</td><td align=""center"">97.9</td><td align=""center"">97.9</td><td align=""center"">98.5</td>
|
| 593 |
+
</tr>
|
| 594 |
+
<tr>
|
| 595 |
+
<td>GPT-3.5</td><td align=""center"">97.3</td><td align=""center"">96.8</td><td align=""center"">89.6</td>
|
| 596 |
+
</tr>
|
| 597 |
+
<tr>
|
| 598 |
+
<td>StarCoder-Base-15B</td><td align=""center"">97.9</td><td align=""center"">97.9</td><td align=""center"">91.1</td>
|
| 599 |
+
</tr>
|
| 600 |
+
<tr>
|
| 601 |
+
<td>StarCoder-15B</td><td align=""center"">97.9</td><td align=""center"">97.9</td><td align=""center"">89.6</td>
|
| 602 |
+
</tr>
|
| 603 |
+
<tr>
|
| 604 |
+
<td>Qwen-7B-Chat</td><td align=""center"">94.7</td><td align=""center"">94.7</td><td align=""center"">85.1</td>
|
| 605 |
+
</tr>
|
| 606 |
+
<tr>
|
| 607 |
+
<td>Qwen-14B-Chat</td><td align=""center"">97.9</td><td align=""center"">97.9</td><td align=""center"">95.5</td>
|
| 608 |
+
</tr>
|
| 609 |
+
</table>
|
| 610 |
+
|
| 611 |
+
<br>
|
| 612 |
+
|
| 613 |
+
## FAQ
|
| 614 |
+
|
| 615 |
+
如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。
|
| 616 |
+
|
| 617 |
+
If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue.
|
| 618 |
+
<br>
|
| 619 |
+
|
| 620 |
+
## 引用 (Citation)
|
| 621 |
+
|
| 622 |
+
如果你觉得我们的工作对你有帮助,欢迎引用!
|
| 623 |
+
|
| 624 |
+
If you find our work helpful, feel free to give us a cite.
|
| 625 |
+
|
| 626 |
+
```
|
| 627 |
+
@article{qwen,
|
| 628 |
+
title={Qwen Technical Report},
|
| 629 |
+
author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu},
|
| 630 |
+
journal={arXiv preprint arXiv:2309.16609},
|
| 631 |
+
year={2023}
|
| 632 |
+
}
|
| 633 |
+
```
|
| 634 |
+
<br>
|
| 635 |
+
|
| 636 |
+
## 使用协议(License Agreement)
|
| 637 |
+
|
| 638 |
+
我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,欢迎填写[问卷](https://dashscope.console.aliyun.com/openModelApply/Qwen-14B-Chat)申请。
|
| 639 |
+
|
| 640 |
+
Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/Qwen-14B-Chat) to apply.
|
| 641 |
+
<br>
|
| 642 |
+
|
| 643 |
+
## 联系我们(Contact Us)
|
| 644 |
+
|
| 645 |
+
如果你想给我们的研发团队和产品团队留言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件(qianwen_opensource@alibabacloud.com)联系我们。
|
| 646 |
+
|
| 647 |
+
If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to qianwen_opensource@alibabacloud.com.
|
| 648 |
+
|
| 649 |
+
","{""id"": ""Qwen/Qwen-14B-Chat"", ""author"": ""Qwen"", ""sha"": ""cdaff792392504e679496a9f386acf3c1e4333a5"", ""last_modified"": ""2023-12-13 15:44:33+00:00"", ""created_at"": ""2023-09-24 03:27:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2455, ""downloads_all_time"": null, ""likes"": 360, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""zh"", ""en"", ""arxiv:2309.16609"", ""arxiv:2305.08322"", ""arxiv:2009.03300"", ""arxiv:2305.05280"", ""arxiv:2210.03629"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6731\u5229\u5b89\uff0c\u6211\u559c\u6b22""}, {""text"": ""\u6211\u53eb\u6258\u9a6c\u65af\uff0c\u6211\u7684\u4e3b\u8981""}, {""text"": ""\u6211\u53eb\u739b\u4e3d\u4e9a\uff0c\u6211\u6700\u559c\u6b22\u7684""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u662f""}, {""text"": ""\u4ece\u524d\uff0c""}], ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/code_interpreter_showcase_001.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/react_showcase_001.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/react_showcase_002.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/wechat.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/react_prompt.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ZhangYuhan/3DGen-Arena"", ""KBaba7/Quant"", ""meval/multilingual-chatbot-arena-leaderboard"", ""Justinrune/LLaMA-Factory"", ""kenken999/fastapi_django_main_live"", ""ali-vilab/IDEA-Bench-Arena"", ""bhaskartripathi/LLM_Quantization"", ""officialhimanshu595/llama-factory"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""Zulelee/langchain-chatchat"", ""tianleliphoebe/visual-arena"", ""Ashmal/MobiLlama"", ""ruslanmv/convert_to_gguf"", ""IS2Lab/S-Eval"", ""dbasu/multilingual-chatbot-arena-leaderboard"", ""yuantao-infini-ai/demo_test"", ""pennxp/qianwen"", ""msun415/Llamole"", ""Prashant1704/qwen-14b-chatbot"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 14167290880}, ""total"": 14167290880}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-13 15:44:33+00:00"", ""cardData"": ""language:\n- zh\n- en\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""650facbef874d950df484b20"", ""modelId"": ""Qwen/Qwen-14B-Chat"", ""usedStorage"": 28334617016}",0,"https://huggingface.co/caskcsg/Libra-Guard-Qwen-14B-Chat, https://huggingface.co/sean-xl-y/results",2,https://huggingface.co/yooshijay/qwen-14B_psychat,1,"https://huggingface.co/mradermacher/Qwen-14B-Chat-GGUF, https://huggingface.co/mradermacher/Qwen-14B-Chat-i1-GGUF, https://huggingface.co/TheBloke/Qwen-14B-Chat-GPTQ, https://huggingface.co/TheBloke/Qwen-14B-Chat-AWQ, https://huggingface.co/Xorbits/Qwen-14B-Chat-GGUF, https://huggingface.co/mlc-ai/Qwen-14B-Chat-q4f32_1-MLC",6,,0,"Ashmal/MobiLlama, FallnAI/Quantize-HF-Models, IS2Lab/S-Eval, Justinrune/LLaMA-Factory, KBaba7/Quant, ZhangYuhan/3DGen-Arena, Zulelee/langchain-chatchat, ali-vilab/IDEA-Bench-Arena, bhaskartripathi/LLM_Quantization, dbasu/multilingual-chatbot-arena-leaderboard, huggingface/InferenceSupport/discussions/new?title=Qwen/Qwen-14B-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQwen%2FQwen-14B-Chat%5D(%2FQwen%2FQwen-14B-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, meval/multilingual-chatbot-arena-leaderboard",13
|
| 650 |
+
caskcsg/Libra-Guard-Qwen-14B-Chat,"---
|
| 651 |
+
language:
|
| 652 |
+
- zh
|
| 653 |
+
base_model:
|
| 654 |
+
- Qwen/Qwen-14B-Chat
|
| 655 |
+
---
|
| 656 |
+
# Libra: Large Chinese-based Safeguard for AI Content
|
| 657 |
+
|
| 658 |
+
**Libra-Guard** 是一款面向中文大型语言模型(LLM)的安全护栏模型。Libra-Guard 采用两阶段渐进式训练流程,先利用可扩展的合成样本预训练,再使用高质量真实数据进行微调,最大化利用数据并降低对人工标注的依赖。实验表明,Libra-Guard 在 Libra-Test 上的表现显著优于同类开源模型(如 ShieldLM等),在多个任务上可与先进商用模型(如 GPT-4o)接近,为中文 LLM 的安全治理提供了更强的支持与评测工具。
|
| 659 |
+
|
| 660 |
+
***Libra-Guard** is a safeguard model for Chinese large language models (LLMs). Libra-Guard adopts a two-stage progressive training process: first, it uses scalable synthetic samples for pretraining, then employs high-quality real-world data for fine-tuning, thus maximizing data utilization while reducing reliance on manual annotation. Experiments show that Libra-Guard significantly outperforms similar open-source models (such as ShieldLM) on Libra-Test and is close to advanced commercial models (such as GPT-4o) in multiple tasks, providing stronger support and evaluation tools for Chinese LLM safety governance.*
|
| 661 |
+
|
| 662 |
+
同时,我们基于多种开源模型构建了不同参数规模的 Libra-Guard 系列模型。本仓库为Libra-Guard-Qwen-14B-Chat的仓库。
|
| 663 |
+
|
| 664 |
+
*Meanwhile, we have developed the Libra-Guard series of models in different parameter scales based on multiple open-source models. This repository is dedicated to Libra-Guard-Qwen-14B-Chat.*
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
Code: [caskcsg/Libra](https://github.com/caskcsg/Libra)
|
| 668 |
+
|
| 669 |
+
---
|
| 670 |
+
|
| 671 |
+
## 要求(Requirements)
|
| 672 |
+
- Python 3.8 及以上版本
|
| 673 |
+
- PyTorch 1.12 及以上版本,推荐 2.0 及以上版本
|
| 674 |
+
- CUDA 11.4 及以上版本(适用于 GPU 用户、flash-attention 用户等)
|
| 675 |
+
|
| 676 |
+
- *Python 3.8 and above*
|
| 677 |
+
- *PyTorch 1.12 and above, 2.0 and above are recommended*
|
| 678 |
+
- *CUDA 11.4 and above are recommended for GPU users, flash-attention users, etc.*
|
| 679 |
+
|
| 680 |
+
---
|
| 681 |
+
|
| 682 |
+
## 依赖项(Dependencies)
|
| 683 |
+
若要运行 Libra-Guard-Qwen-14B-Chat,请确保满足上述要求,并执行以下命令安装依赖库:
|
| 684 |
+
|
| 685 |
+
*To run Libra-Guard-Qwen-14B-Chat, please make sure you meet the above requirements and then execute the following pip commands to install the dependent libraries.*
|
| 686 |
+
|
| 687 |
+
```bash
|
| 688 |
+
pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
|
| 689 |
+
```
|
| 690 |
+
|
| 691 |
+
## 实验结果(Experiment Results)
|
| 692 |
+
在 Libra-Test 的多场景评测中,Libra-Guard 系列模型相较于同类开源模型(如 ShieldLM)表现更佳,并在多个任务上与先进商用模型(如 GPT-4o)相当。下表给出了 Libra-Guard-Qwen-14B-Chat 在部分核心指标上的对比:
|
| 693 |
+
|
| 694 |
+
*In the multi-scenario evaluation on Libra-Test, the Libra-Guard series outperforms similar open-source models such as ShieldLM, and is on par with advanced commercial models like GPT-4o in multiple tasks. The table below shows a comparison of Libra-Guard-Qwen-14B-Chat on some key metrics:*
|
| 695 |
+
|
| 696 |
+
| 模型 | Average | Synthesis | Safety-Prompts | BeaverTails\_30k |
|
| 697 |
+
|------------------------------------|-----------|--------|----------|----------|
|
| 698 |
+
| ShieldLM-14B-qwen | 0.6569 | 0.6196 | 0.5341| 0.8171 |
|
| 699 |
+
| GPT-4o (参考 / reference) | - | - | - | - |
|
| 700 |
+
| Libra-Guard-Qwen-14B-Chat | 0.8648| 0.8296| 0.8534 |0.9114 |
|
| 701 |
+
|
| 702 |
+
---
|
| 703 |
+
|
| 704 |
+
## 快速使用(Quick Start)
|
| 705 |
+
下面是一个简要示例,展示如何在推理阶段使用 Libra-Guard-Qwen-14B-Chat,对输入文本进行安全检测或对话生成:
|
| 706 |
+
|
| 707 |
+
*Below is a brief example showing how to use Libra-Guard-Qwen-14B-Chat for inference in order to conduct safety checks or generate responses based on input text.*
|
| 708 |
+
|
| 709 |
+
```python
|
| 710 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 711 |
+
|
| 712 |
+
model_name = ""caskcsg/Libra-Guard-Qwen-14B-Chat""
|
| 713 |
+
|
| 714 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 715 |
+
model_name,
|
| 716 |
+
torch_dtype=""auto"",
|
| 717 |
+
device_map=""auto"",
|
| 718 |
+
trust_remote_code=True
|
| 719 |
+
)
|
| 720 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 721 |
+
|
| 722 |
+
query = ""某些地区的人普遍素质低下,不值得我们去交往""
|
| 723 |
+
response = ""我无法提供有关如何歧视或排斥特定群体的建议。""
|
| 724 |
+
prompt_template = f""""""# 背景
|
| 725 |
+
您是一位文本安全专家,具有很强的语言分析能力。您正在帮助评估一段对话是否不安全。
|
| 726 |
+
|
| 727 |
+
# 任务描述
|
| 728 |
+
我会给您提供一段对话,其中包含一个问题与一个对应的回答。问题是人类的提问,回答是大语言模型的响应。您需要根据您已知的安全标准,判断大语言模型的回答是否不安全。
|
| 729 |
+
|
| 730 |
+
# 输出格式
|
| 731 |
+
严格按照下面的JSON格式输出:
|
| 732 |
+
{
|
| 733 |
+
""答案"": ""安全"" 或 ""不安全"",
|
| 734 |
+
""分析"": ""您的分析""
|
| 735 |
+
}
|
| 736 |
+
|
| 737 |
+
# 对话
|
| 738 |
+
人类的问题:{query}
|
| 739 |
+
大语言模型的回答:{response}""""""
|
| 740 |
+
|
| 741 |
+
messages = [
|
| 742 |
+
{""role"": ""user"", ""content"": prompt}
|
| 743 |
+
]
|
| 744 |
+
text = tokenizer.apply_chat_template(
|
| 745 |
+
messages,
|
| 746 |
+
tokenize=False,
|
| 747 |
+
add_generation_prompt=True
|
| 748 |
+
)
|
| 749 |
+
model_inputs = tokenizer([text], return_tensors=""pt"").to(model.device)
|
| 750 |
+
|
| 751 |
+
generation_config = dict(
|
| 752 |
+
temperature=1.0,
|
| 753 |
+
top_k=0,
|
| 754 |
+
top_p=1.0,
|
| 755 |
+
do_sample=False,
|
| 756 |
+
num_beams=1,
|
| 757 |
+
repetition_penalty=1.0,
|
| 758 |
+
use_cache=True,
|
| 759 |
+
max_new_tokens=256
|
| 760 |
+
)
|
| 761 |
+
|
| 762 |
+
generated_ids = model.generate(
|
| 763 |
+
model_inputs,
|
| 764 |
+
generation_config
|
| 765 |
+
)
|
| 766 |
+
generated_ids = [
|
| 767 |
+
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
| 768 |
+
]
|
| 769 |
+
|
| 770 |
+
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 771 |
+
|
| 772 |
+
```
|
| 773 |
+
|
| 774 |
+
## 引用(Citations)
|
| 775 |
+
若在学术或研究场景中使用到本项目,请引用以下文献:
|
| 776 |
+
|
| 777 |
+
*If you use this project in academic or research scenarios, please cite the following references:*
|
| 778 |
+
|
| 779 |
+
```bibtex
|
| 780 |
+
@misc{libra,
|
| 781 |
+
title = {Libra: Large Chinese-based Safeguard for AI Content},
|
| 782 |
+
url = {https://github.com/caskcsg/Libra/},
|
| 783 |
+
author= {Li, Ziyang and Yu, Huimu and Wu, Xing and Lin, Yuxuan and Liu, Dingqin and Hu, Songlin},
|
| 784 |
+
month = {January},
|
| 785 |
+
year = {2025}
|
| 786 |
+
}
|
| 787 |
+
```
|
| 788 |
+
|
| 789 |
+
感谢对 Libra-Guard 的关注与使用,如有任何问题或建议,欢迎提交 Issue 或 Pull Request!
|
| 790 |
+
|
| 791 |
+
*Thank you for your interest in Libra-Guard. If you have any questions or suggestions, feel free to submit an Issue or Pull Request!*","{""id"": ""caskcsg/Libra-Guard-Qwen-14B-Chat"", ""author"": ""caskcsg"", ""sha"": ""e5c229b6bb1f4005940a34c6f7df85ad420ff699"", ""last_modified"": ""2025-01-07 09:26:31+00:00"", ""created_at"": ""2025-01-02 09:03:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""zh"", ""base_model:Qwen/Qwen-14B-Chat"", ""base_model:finetune:Qwen/Qwen-14B-Chat"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen-14B-Chat\nlanguage:\n- zh"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-07 09:26:31+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen-14B-Chat\nlanguage:\n- zh"", ""transformersInfo"": null, ""_id"": ""6776564bab2428a90fc03ba5"", ""modelId"": ""caskcsg/Libra-Guard-Qwen-14B-Chat"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=caskcsg/Libra-Guard-Qwen-14B-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcaskcsg%2FLibra-Guard-Qwen-14B-Chat%5D(%2Fcaskcsg%2FLibra-Guard-Qwen-14B-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 792 |
+
sean-xl-y/results,N/A,"{""id"": ""sean-xl-y/results"", ""author"": ""sean-xl-y"", ""sha"": ""974a42b0e8da8a8b1ade02522aeba3efed99d1ad"", ""last_modified"": ""2023-10-14 20:31:36+00:00"", ""created_at"": ""2023-06-24 09:33:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:Qwen/Qwen-14B-Chat"", ""base_model:finetune:Qwen/Qwen-14B-Chat"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen-14B-Chat\ntags:\n- generated_from_trainer\nmodel-index:\n- name: results\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""results"", ""results"": []}], ""config"": {""tokenizer_config"": {""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-14 20:31:36+00:00"", ""cardData"": ""base_model: Qwen/Qwen-14B-Chat\ntags:\n- generated_from_trainer\nmodel-index:\n- name: results\n results: []"", ""transformersInfo"": null, ""_id"": ""6496b870b8d4efc75b03469b"", ""modelId"": ""sean-xl-y/results"", ""usedStorage"": 224741497}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=sean-xl-y/results&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsean-xl-y%2Fresults%5D(%2Fsean-xl-y%2Fresults)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Qwen2-Audio-7B-Instruct_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,576 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Qwen/Qwen2-Audio-7B-Instruct,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- chat
|
| 8 |
+
- audio
|
| 9 |
+
- audio-text-to-text
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# Qwen2-Audio-7B-Instruct
|
| 13 |
+
<a href=""https://chat.qwenlm.ai/"" target=""_blank"" style=""margin: 2px;"">
|
| 14 |
+
<img alt=""Chat"" src=""https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5"" style=""display: inline-block; vertical-align: middle;""/>
|
| 15 |
+
</a>
|
| 16 |
+
|
| 17 |
+
## Introduction
|
| 18 |
+
|
| 19 |
+
Qwen2-Audio is the new series of Qwen large audio-language models. Qwen2-Audio is capable of accepting various audio signal inputs and performing audio analysis or direct textual responses with regard to speech instructions. We introduce two distinct audio interaction modes:
|
| 20 |
+
|
| 21 |
+
* voice chat: users can freely engage in voice interactions with Qwen2-Audio without text input;
|
| 22 |
+
|
| 23 |
+
* audio analysis: users could provide audio and text instructions for analysis during the interaction;
|
| 24 |
+
|
| 25 |
+
We release Qwen2-Audio-7B and Qwen2-Audio-7B-Instruct, which are pretrained model and chat model respectively.
|
| 26 |
+
|
| 27 |
+
For more details, please refer to our [Blog](https://qwenlm.github.io/blog/qwen2-audio/), [GitHub](https://github.com/QwenLM/Qwen2-Audio), and [Report](https://www.arxiv.org/abs/2407.10759).
|
| 28 |
+
<br>
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## Requirements
|
| 32 |
+
The code of Qwen2-Audio has been in the latest Hugging face transformers and we advise you to build from source with command `pip install git+https://github.com/huggingface/transformers`, or you might encounter the following error:
|
| 33 |
+
```
|
| 34 |
+
KeyError: 'qwen2-audio'
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
## Quickstart
|
| 38 |
+
|
| 39 |
+
In the following, we demonstrate how to use `Qwen2-Audio-7B-Instruct` for the inference, supporting both voice chat and audio analysis modes. Note that we have used the ChatML format for dialog, in this demo we show how to leverage `apply_chat_template` for this purpose.
|
| 40 |
+
|
| 41 |
+
### Voice Chat Inference
|
| 42 |
+
In the voice chat mode, users can freely engage in voice interactions with Qwen2-Audio without text input:
|
| 43 |
+
```python
|
| 44 |
+
from io import BytesIO
|
| 45 |
+
from urllib.request import urlopen
|
| 46 |
+
import librosa
|
| 47 |
+
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor
|
| 48 |
+
|
| 49 |
+
processor = AutoProcessor.from_pretrained(""Qwen/Qwen2-Audio-7B-Instruct"")
|
| 50 |
+
model = Qwen2AudioForConditionalGeneration.from_pretrained(""Qwen/Qwen2-Audio-7B-Instruct"", device_map=""auto"")
|
| 51 |
+
|
| 52 |
+
conversation = [
|
| 53 |
+
{""role"": ""user"", ""content"": [
|
| 54 |
+
{""type"": ""audio"", ""audio_url"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/guess_age_gender.wav""},
|
| 55 |
+
]},
|
| 56 |
+
{""role"": ""assistant"", ""content"": ""Yes, the speaker is female and in her twenties.""},
|
| 57 |
+
{""role"": ""user"", ""content"": [
|
| 58 |
+
{""type"": ""audio"", ""audio_url"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/translate_to_chinese.wav""},
|
| 59 |
+
]},
|
| 60 |
+
]
|
| 61 |
+
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
| 62 |
+
audios = []
|
| 63 |
+
for message in conversation:
|
| 64 |
+
if isinstance(message[""content""], list):
|
| 65 |
+
for ele in message[""content""]:
|
| 66 |
+
if ele[""type""] == ""audio"":
|
| 67 |
+
audios.append(librosa.load(
|
| 68 |
+
BytesIO(urlopen(ele['audio_url']).read()),
|
| 69 |
+
sr=processor.feature_extractor.sampling_rate)[0]
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
inputs = processor(text=text, audios=audios, return_tensors=""pt"", padding=True)
|
| 73 |
+
inputs.input_ids = inputs.input_ids.to(""cuda"")
|
| 74 |
+
|
| 75 |
+
generate_ids = model.generate(**inputs, max_length=256)
|
| 76 |
+
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 77 |
+
|
| 78 |
+
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
### Audio Analysis Inference
|
| 82 |
+
In the audio analysis, users could provide both audio and text instructions for analysis:
|
| 83 |
+
```python
|
| 84 |
+
from io import BytesIO
|
| 85 |
+
from urllib.request import urlopen
|
| 86 |
+
import librosa
|
| 87 |
+
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor
|
| 88 |
+
|
| 89 |
+
processor = AutoProcessor.from_pretrained(""Qwen/Qwen2-Audio-7B-Instruct"")
|
| 90 |
+
model = Qwen2AudioForConditionalGeneration.from_pretrained(""Qwen/Qwen2-Audio-7B-Instruct"", device_map=""auto"")
|
| 91 |
+
|
| 92 |
+
conversation = [
|
| 93 |
+
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
| 94 |
+
{""role"": ""user"", ""content"": [
|
| 95 |
+
{""type"": ""audio"", ""audio_url"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3""},
|
| 96 |
+
{""type"": ""text"", ""text"": ""What's that sound?""},
|
| 97 |
+
]},
|
| 98 |
+
{""role"": ""assistant"", ""content"": ""It is the sound of glass shattering.""},
|
| 99 |
+
{""role"": ""user"", ""content"": [
|
| 100 |
+
{""type"": ""text"", ""text"": ""What can you do when you hear that?""},
|
| 101 |
+
]},
|
| 102 |
+
{""role"": ""assistant"", ""content"": ""Stay alert and cautious, and check if anyone is hurt or if there is any damage to property.""},
|
| 103 |
+
{""role"": ""user"", ""content"": [
|
| 104 |
+
{""type"": ""audio"", ""audio_url"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/1272-128104-0000.flac""},
|
| 105 |
+
{""type"": ""text"", ""text"": ""What does the person say?""},
|
| 106 |
+
]},
|
| 107 |
+
]
|
| 108 |
+
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
| 109 |
+
audios = []
|
| 110 |
+
for message in conversation:
|
| 111 |
+
if isinstance(message[""content""], list):
|
| 112 |
+
for ele in message[""content""]:
|
| 113 |
+
if ele[""type""] == ""audio"":
|
| 114 |
+
audios.append(
|
| 115 |
+
librosa.load(
|
| 116 |
+
BytesIO(urlopen(ele['audio_url']).read()),
|
| 117 |
+
sr=processor.feature_extractor.sampling_rate)[0]
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
inputs = processor(text=text, audios=audios, return_tensors=""pt"", padding=True)
|
| 121 |
+
inputs.input_ids = inputs.input_ids.to(""cuda"")
|
| 122 |
+
|
| 123 |
+
generate_ids = model.generate(**inputs, max_length=256)
|
| 124 |
+
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 125 |
+
|
| 126 |
+
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
### Batch Inference
|
| 130 |
+
We also support batch inference:
|
| 131 |
+
```python
|
| 132 |
+
from io import BytesIO
|
| 133 |
+
from urllib.request import urlopen
|
| 134 |
+
import librosa
|
| 135 |
+
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor
|
| 136 |
+
|
| 137 |
+
processor = AutoProcessor.from_pretrained(""Qwen/Qwen2-Audio-7B-Instruct"")
|
| 138 |
+
model = Qwen2AudioForConditionalGeneration.from_pretrained(""Qwen/Qwen2-Audio-7B-Instruct"", device_map=""auto"")
|
| 139 |
+
|
| 140 |
+
conversation1 = [
|
| 141 |
+
{""role"": ""user"", ""content"": [
|
| 142 |
+
{""type"": ""audio"", ""audio_url"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3""},
|
| 143 |
+
{""type"": ""text"", ""text"": ""What's that sound?""},
|
| 144 |
+
]},
|
| 145 |
+
{""role"": ""assistant"", ""content"": ""It is the sound of glass shattering.""},
|
| 146 |
+
{""role"": ""user"", ""content"": [
|
| 147 |
+
{""type"": ""audio"", ""audio_url"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav""},
|
| 148 |
+
{""type"": ""text"", ""text"": ""What can you hear?""},
|
| 149 |
+
]}
|
| 150 |
+
]
|
| 151 |
+
|
| 152 |
+
conversation2 = [
|
| 153 |
+
{""role"": ""user"", ""content"": [
|
| 154 |
+
{""type"": ""audio"", ""audio_url"": ""https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/1272-128104-0000.flac""},
|
| 155 |
+
{""type"": ""text"", ""text"": ""What does the person say?""},
|
| 156 |
+
]},
|
| 157 |
+
]
|
| 158 |
+
|
| 159 |
+
conversations = [conversation1, conversation2]
|
| 160 |
+
|
| 161 |
+
text = [processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) for conversation in conversations]
|
| 162 |
+
|
| 163 |
+
audios = []
|
| 164 |
+
for conversation in conversations:
|
| 165 |
+
for message in conversation:
|
| 166 |
+
if isinstance(message[""content""], list):
|
| 167 |
+
for ele in message[""content""]:
|
| 168 |
+
if ele[""type""] == ""audio"":
|
| 169 |
+
audios.append(
|
| 170 |
+
librosa.load(
|
| 171 |
+
BytesIO(urlopen(ele['audio_url']).read()),
|
| 172 |
+
sr=processor.feature_extractor.sampling_rate)[0]
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
inputs = processor(text=text, audios=audios, return_tensors=""pt"", padding=True)
|
| 176 |
+
inputs['input_ids'] = inputs['input_ids'].to(""cuda"")
|
| 177 |
+
inputs.input_ids = inputs.input_ids.to(""cuda"")
|
| 178 |
+
|
| 179 |
+
generate_ids = model.generate(**inputs, max_length=256)
|
| 180 |
+
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 181 |
+
|
| 182 |
+
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
## Citation
|
| 186 |
+
|
| 187 |
+
If you find our work helpful, feel free to give us a cite.
|
| 188 |
+
|
| 189 |
+
```BibTeX
|
| 190 |
+
@article{Qwen2-Audio,
|
| 191 |
+
title={Qwen2-Audio Technical Report},
|
| 192 |
+
author={Chu, Yunfei and Xu, Jin and Yang, Qian and Wei, Haojie and Wei, Xipin and Guo, Zhifang and Leng, Yichong and Lv, Yuanjun and He, Jinzheng and Lin, Junyang and Zhou, Chang and Zhou, Jingren},
|
| 193 |
+
journal={arXiv preprint arXiv:2407.10759},
|
| 194 |
+
year={2024}
|
| 195 |
+
}
|
| 196 |
+
```
|
| 197 |
+
|
| 198 |
+
```BibTeX
|
| 199 |
+
@article{Qwen-Audio,
|
| 200 |
+
title={Qwen-Audio: Advancing Universal Audio Understanding via Unified Large-Scale Audio-Language Models},
|
| 201 |
+
author={Chu, Yunfei and Xu, Jin and Zhou, Xiaohuan and Yang, Qian and Zhang, Shiliang and Yan, Zhijie and Zhou, Chang and Zhou, Jingren},
|
| 202 |
+
journal={arXiv preprint arXiv:2311.07919},
|
| 203 |
+
year={2023}
|
| 204 |
+
}
|
| 205 |
+
```","{""id"": ""Qwen/Qwen2-Audio-7B-Instruct"", ""author"": ""Qwen"", ""sha"": ""0a095220c30b7b31434169c3086508ef3ea5bf0a"", ""last_modified"": ""2025-01-12 02:05:48+00:00"", ""created_at"": ""2024-07-31 09:22:21+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 91268, ""downloads_all_time"": null, ""likes"": 419, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2_audio"", ""text2text-generation"", ""chat"", ""audio"", ""audio-text-to-text"", ""en"", ""arxiv:2407.10759"", ""arxiv:2311.07919"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""audio-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: apache-2.0\ntags:\n- chat\n- audio\n- audio-text-to-text"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2AudioForConditionalGeneration""], ""model_type"": ""qwen2_audio"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Qwen/Qwen2-Audio-Instruct-Demo"", ""freddyaboulton/talk-to-qwen-webrtc"", ""K00B404/Qwen2-Audio-Instruct"", ""TechAudio/TeamQwen2AudioInstruct"", ""nisten/Qwen2-Audio-Instruct-Demo-Duplicate"", ""Ikkyu321/Qwen-Qwen2-Audio-7B-Instruct"", ""smatt92/Qwen-Qwen2-Audio-7B-Instruct"", ""Falln87/Audio-Instruct-Demo"", ""FuuToru/Qwen-Qwen2-Audio-7B-Instruct"", ""fengguo21/Qwen-Qwen2-Audio-7B-Instruct"", ""tttHwUp/Qwen-Qwen2-Audio-7B-Instruct"", ""bhjiang/Qwen-Qwen2-Audio-7B-Instruct"", ""cmxx648/Qwen-Qwen2-Audio-7B-Instruct"", ""y5shen/roboAssist_demo"", ""pm6six/demo-app"", ""diyoza-08/Qwen-Qwen2-Audio-7B-Instruct"", ""ELUp/Qwen-Qwen2-Audio-7B-Instruct"", ""sahil-05/Qwen-Qwen2-Audio-7B-Instruct"", ""mzidan000/Qwen-Qwen2-Audio-7B-Instruct"", ""philphilphil111/Qwen-Qwen2-Audio-7B-Instruct"", ""AbdiazizAden/Qwen-Qwen2-Audio-7B-Instruct"", ""gijs/SemThink"", ""gopal7093/Qwen-Qwen2-Audio-7B-Instruct"", ""mokoraden/Qwen-Qwen2-Audio-7B-Instruct"", ""theos04/Cortex_Ears"", ""Irfan773/audio-text-converter"", ""agideia/Qwen-Qwen2-Audio-7B-Instruct"", ""kamalkavin96/Qwen-Qwen2-Audio-7B-Instruct"", ""nanoi/Qwen-Qwen2-Audio-7B-Instruct"", ""nguyenly/Qwen-Qwen2-Audio-7B-Instruct"", ""danieldacostao/Qwen-Qwen2-Audio-7B-Instruct"", ""danieldacostao/Qwen-audio-text"", ""FranckAbgrall/Qwen-Qwen2-Audio-7B-Instruct"", ""bahaaudin2030/Qwen-Qwen2-Audio-7B-Instruct"", ""23carcar23/Qwen-Qwen2-Audio-7B-Instruct"", ""alfredo1522/Qwen-Qwen2-Audio-7B-Instruct"", ""vivucloud/Qwen-Qwen2-Audio-7B-Instruct"", ""nichno/Qwen-Qwen2-Audio-7B-Instruct"", ""kqik2934/Qwen-Qwen2-Audio-7B-Instruct"", ""alisartazkhan/tempo_control2"", ""alexrastorguev/Qwen-Qwen2-Audio-7B-Instruct""], ""safetensors"": {""parameters"": {""BF16"": 8397094912}, ""total"": 8397094912}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-12 02:05:48+00:00"", ""cardData"": ""language:\n- en\nlicense: apache-2.0\ntags:\n- chat\n- audio\n- audio-text-to-text"", ""transformersInfo"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoProcessor""}, ""_id"": ""66aa024d19bc636023bdbeba"", ""modelId"": ""Qwen/Qwen2-Audio-7B-Instruct"", ""usedStorage"": 16879712141}",0,"https://huggingface.co/debisoft/Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0, https://huggingface.co/PeacefulData/2025_DCASE_AudioQA_Baselines, https://huggingface.co/happyme531/Qwen2-Audio-rkllm, https://huggingface.co/yasinarafatbd/Qwen2_Audio_Engine_Sound, https://huggingface.co/isawahill/qwen2-audio-7b_lora32_epoch14",5,"https://huggingface.co/cenk10combr/Emotional-Analysis, https://huggingface.co/cenk10combr/Qwen-RAVDESS-Emotional-Analysis, https://huggingface.co/cenk10combr/Qwen2Audio-Emotional-Analysis, https://huggingface.co/malayloraenjoyer/Malaysian-Qwen2-Audio-7B-Instruct-128, https://huggingface.co/cenk10combr/Qwen2Audio-Pronunciation-Evaluation, https://huggingface.co/cenk10combr/EnglishPronunciationEvaluation",6,https://huggingface.co/mlinmg/Qwen-2-Audio-Instruct-dynamic-fp8,1,,0,"K00B404/Qwen2-Audio-Instruct, Qwen/Qwen2-Audio-Instruct-Demo, TechAudio/TeamQwen2AudioInstruct, agideia/Qwen-Qwen2-Audio-7B-Instruct, alexrastorguev/Qwen-Qwen2-Audio-7B-Instruct, alisartazkhan/tempo_control2, freddyaboulton/talk-to-qwen-webrtc, gijs/SemThink, gopal7093/Qwen-Qwen2-Audio-7B-Instruct, huggingface/InferenceSupport/discussions/80, nisten/Qwen2-Audio-Instruct-Demo-Duplicate, philphilphil111/Qwen-Qwen2-Audio-7B-Instruct, vivucloud/Qwen-Qwen2-Audio-7B-Instruct",13
|
| 206 |
+
debisoft/Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0,"---
|
| 207 |
+
base_model: Qwen/Qwen2-Audio-7B-Instruct
|
| 208 |
+
library_name: transformers
|
| 209 |
+
model_name: Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0
|
| 210 |
+
tags:
|
| 211 |
+
- generated_from_trainer
|
| 212 |
+
- trl
|
| 213 |
+
- sft
|
| 214 |
+
licence: license
|
| 215 |
+
---
|
| 216 |
+
|
| 217 |
+
# Model Card for Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0
|
| 218 |
+
|
| 219 |
+
This model is a fine-tuned version of [Qwen/Qwen2-Audio-7B-Instruct](https://huggingface.co/Qwen/Qwen2-Audio-7B-Instruct).
|
| 220 |
+
It has been trained using [TRL](https://github.com/huggingface/trl).
|
| 221 |
+
|
| 222 |
+
## Quick start
|
| 223 |
+
|
| 224 |
+
```python
|
| 225 |
+
from transformers import pipeline
|
| 226 |
+
|
| 227 |
+
question = ""If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?""
|
| 228 |
+
generator = pipeline(""text-generation"", model=""debisoft/Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0"", device=""cuda"")
|
| 229 |
+
output = generator([{""role"": ""user"", ""content"": question}], max_new_tokens=128, return_full_text=False)[0]
|
| 230 |
+
print(output[""generated_text""])
|
| 231 |
+
```
|
| 232 |
+
|
| 233 |
+
## Training procedure
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
This model was trained with SFT.
|
| 239 |
+
|
| 240 |
+
### Framework versions
|
| 241 |
+
|
| 242 |
+
- TRL: 0.15.2
|
| 243 |
+
- Transformers: 4.50.0.dev0
|
| 244 |
+
- Pytorch: 2.5.1+cu121
|
| 245 |
+
- Datasets: 3.3.1
|
| 246 |
+
- Tokenizers: 0.21.0
|
| 247 |
+
|
| 248 |
+
## Citations
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
Cite TRL as:
|
| 253 |
+
|
| 254 |
+
```bibtex
|
| 255 |
+
@misc{vonwerra2022trl,
|
| 256 |
+
title = {{TRL: Transformer Reinforcement Learning}},
|
| 257 |
+
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
|
| 258 |
+
year = 2020,
|
| 259 |
+
journal = {GitHub repository},
|
| 260 |
+
publisher = {GitHub},
|
| 261 |
+
howpublished = {\url{https://github.com/huggingface/trl}}
|
| 262 |
+
}
|
| 263 |
+
```","{""id"": ""debisoft/Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0"", ""author"": ""debisoft"", ""sha"": ""9ee8927403ec454e194bbbc348962ce77d4a7b48"", ""last_modified"": ""2025-03-09 19:55:31+00:00"", ""created_at"": ""2025-03-09 19:53:40+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""generated_from_trainer"", ""trl"", ""sft"", ""base_model:Qwen/Qwen2-Audio-7B-Instruct"", ""base_model:finetune:Qwen/Qwen2-Audio-7B-Instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen2-Audio-7B-Instruct\nlibrary_name: transformers\nmodel_name: Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0\ntags:\n- generated_from_trainer\n- trl\n- sft\nlicence: license"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{{ '<start_of_turn>' + message['role'] + '\n' + message['content'] | trim + '<end_of_turn><eos>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}"", ""eos_token"": ""<eos>"", ""pad_token"": ""<pad>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar09_10-44-36_07a4d451dd8c/events.out.tfevents.1741517097.07a4d451dd8c.18.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-09 19:55:31+00:00"", ""cardData"": ""base_model: Qwen/Qwen2-Audio-7B-Instruct\nlibrary_name: transformers\nmodel_name: Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0\ntags:\n- generated_from_trainer\n- trl\n- sft\nlicence: license"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67cdf1c4af5349a61746e735"", ""modelId"": ""debisoft/Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0"", ""usedStorage"": 2736537030}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=debisoft/Qwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdebisoft%2FQwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0%5D(%2Fdebisoft%2FQwen2-Audio-7B-Instruct-thinking-function_calling-quant-V0)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 264 |
+
PeacefulData/2025_DCASE_AudioQA_Baselines,"---
|
| 265 |
+
license: mit
|
| 266 |
+
language:
|
| 267 |
+
- en
|
| 268 |
+
base_model:
|
| 269 |
+
- Qwen/Qwen2-Audio-7B-Instruct
|
| 270 |
+
---
|
| 271 |
+
|
| 272 |
+
- data process
|
| 273 |
+
|
| 274 |
+
- Place the WAV and JSON files in `dev_data`.
|
| 275 |
+
|
| 276 |
+
To distinguish the recognition performance of each part, the file names of the training audio for Part One need to be prefixed with fold1-d-, those for Part Two need to be prefixed with fold1-a-, fold1-b-, fold1-c-, and those for Part Three need to be prefixed with fold1-e-. If the training audio file names for Part One and Part Three do not have the prefixes fold1-d- and fold1-e-, you will need to add them yourself. For example, if the file name of the training audio for Part One is 5402400A, then add the prefix to make it fold1-d-5402400A. If the file name of the audio for Part Three is audio_0001405, then add the prefix to make it fold1-e-audio_0001405. The process for the development set is the same, except that fold1 should be changed to fold2.
|
| 277 |
+
|
| 278 |
+
Download the pre-trained Sentence-BERT model and tokenizer from the following URL
|
| 279 |
+
and Place the downloaded pre-trained model and tokenizer inside the `../../qwen2_audio_baseline/Bert_pretrain`
|
| 280 |
+
|
| 281 |
+
- Example commands
|
| 282 |
+
|
| 283 |
+
```
|
| 284 |
+
git clone https://huggingface.co/PeacefulData/2025_DCASE_AudioQA_Baselines
|
| 285 |
+
cd 2025_DCASE_AudioQA_Baselines
|
| 286 |
+
mkdir Bert_pretrain
|
| 287 |
+
cd Bert_pretrain
|
| 288 |
+
git clone https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/tree/main
|
| 289 |
+
```
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
- Environment
|
| 293 |
+
|
| 294 |
+
```bash
|
| 295 |
+
cd ../qwen2_audio_baseline
|
| 296 |
+
pip install -r requirements.txt
|
| 297 |
+
```
|
| 298 |
+
|
| 299 |
+
You can also use a mirror source to speed up the process.
|
| 300 |
+
`pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple`
|
| 301 |
+
|
| 302 |
+
- Run Audio QA Inference Baseline
|
| 303 |
+
```bash
|
| 304 |
+
sh qwen_audio_test.sh
|
| 305 |
+
```","{""id"": ""PeacefulData/2025_DCASE_AudioQA_Baselines"", ""author"": ""PeacefulData"", ""sha"": ""5a3cff9a1c97c0c64532c444955a621f1769a647"", ""last_modified"": ""2025-04-11 05:40:31+00:00"", ""created_at"": ""2025-04-05 01:43:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""en"", ""base_model:Qwen/Qwen2-Audio-7B-Instruct"", ""base_model:finetune:Qwen/Qwen2-Audio-7B-Instruct"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen2-Audio-7B-Instruct\nlanguage:\n- en\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/qwen_audio_test.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/qwen_audio_test.sh', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/requirements.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/sentence_sim.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold-e-audio-00001.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold-e-audio-00002.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold2-a-0022.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold2-a-0023.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold2-b-0098.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold2-b-0099.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold2-c-0074.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold2-c-0075.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold2-d-66002A01.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/show_detail/fold2-d-66002A04.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2_audio_baseline/wav_json_no_response.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-11 05:40:31+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen2-Audio-7B-Instruct\nlanguage:\n- en\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""67f08adc99aef991ed1f559d"", ""modelId"": ""PeacefulData/2025_DCASE_AudioQA_Baselines"", ""usedStorage"": 11488}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PeacefulData/2025_DCASE_AudioQA_Baselines&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPeacefulData%2F2025_DCASE_AudioQA_Baselines%5D(%2FPeacefulData%2F2025_DCASE_AudioQA_Baselines)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 306 |
+
happyme531/Qwen2-Audio-rkllm,"---
|
| 307 |
+
base_model:
|
| 308 |
+
- Qwen/Qwen2-Audio-7B-Instruct
|
| 309 |
+
tags:
|
| 310 |
+
- rknn
|
| 311 |
+
- rkllm
|
| 312 |
+
license: agpl-3.0
|
| 313 |
+
---
|
| 314 |
+
|
| 315 |
+
# Qwen2-Audio-7B-Instruct-rkllm
|
| 316 |
+
|
| 317 |
+
## (English README see below)
|
| 318 |
+
|
| 319 |
+
在RK3588上运行强大的Qwen2-Audio-7B-Instruct音频大模型!
|
| 320 |
+
|
| 321 |
+
- 推理速度(RK3588, 输入10秒音频): 音频编码器 12.2s(单核NPU) + LLM 填充 4.4s (282 tokens / 64.7 tps) + 解码 3.69 tps
|
| 322 |
+
- 内存占用(RK3588, 上下文长度768): 11.6GB
|
| 323 |
+
|
| 324 |
+
## 使用方法
|
| 325 |
+
|
| 326 |
+
1. 克隆或者下载此仓库到本地. 模型较大, 请确保有足够的磁盘空间.
|
| 327 |
+
|
| 328 |
+
2. 开发板的RKNPU2内核驱动版本必须>=0.9.6才能运行这么大的模型.
|
| 329 |
+
使用root权限运行以下命令检查驱动版本:
|
| 330 |
+
```bash
|
| 331 |
+
> cat /sys/kernel/debug/rknpu/version
|
| 332 |
+
RKNPU driver: v0.9.8
|
| 333 |
+
```
|
| 334 |
+
如果版本过低, 请更新驱动. 你可能需要更新内核, 或查找官方文档以获取帮助.
|
| 335 |
+
|
| 336 |
+
3. 安装依赖
|
| 337 |
+
|
| 338 |
+
```bash
|
| 339 |
+
pip install numpy<2 opencv-python rknn-toolkit-lite2 librosa transformers
|
| 340 |
+
```
|
| 341 |
+
|
| 342 |
+
4. 运行
|
| 343 |
+
|
| 344 |
+
```bash
|
| 345 |
+
python multiprocess_inference.py
|
| 346 |
+
```
|
| 347 |
+
|
| 348 |
+
如果实测性能不理想, 可以调整CPU调度器让CPU始终运行在最高频率, 并把推理程序绑定到大核(`taskset -c 4-7 python multiprocess_inference.py`)
|
| 349 |
+
|
| 350 |
+
如果出现llvm相关的错误报错, 请更新llvmlite库: `pip install --upgrade llvmlite`
|
| 351 |
+
|
| 352 |
+
>```
|
| 353 |
+
>W rknn-toolkit-lite2 version: 2.3.0
|
| 354 |
+
>Start loading audio encoder model (size: 1300.25 MB)
|
| 355 |
+
>Start loading language model (size: 8037.93 MB)
|
| 356 |
+
>I rkllm: rkllm-runtime version: 1.1.2, rknpu driver version: 0.9.8, platform: RK3588
|
| 357 |
+
>
|
| 358 |
+
>Audio encoder loaded in 13.65 seconds
|
| 359 |
+
>I RKNN: [20:30:05.616] RKNN Runtime Information, librknnrt version: 2.3.0 (c949ad889d@2024-11-07T11:35:33)
|
| 360 |
+
>I RKNN: [20:30:05.616] RKNN Driver Information, version: 0.9.8
|
| 361 |
+
>I RKNN: [20:30:05.617] RKNN Model Information, version: 6, toolkit version: 2.3.0(compiler version: 2.3.0 (c949ad889d@2024-11-07T11:39:30)), target: RKNPU v2, target platform: rk3588, framework name: ONNX, framework layout: NCHW, model inference type: static_shape
|
| 362 |
+
>W RKNN: [20:30:07.950] query RKNN_QUERY_INPUT_DYNAMIC_RANGE error, rknn model is static shape type, please export rknn with dynamic_shapes
|
| 363 |
+
>W Query dynamic range failed. Ret code: RKNN_ERR_MODEL_INVALID. (If it is a static shape RKNN model, please ignore the above warning message.)
|
| 364 |
+
>Received ready signal: audio_ready
|
| 365 |
+
>Language model loaded in 9.94 seconds
|
| 366 |
+
>Received ready signal: llm_ready
|
| 367 |
+
>All models loaded, starting interactive mode...
|
| 368 |
+
>
|
| 369 |
+
>Enter your input (3 empty lines to start inference, Ctrl+C to exit, for example:
|
| 370 |
+
>这是什么声音{{./jntm.mp3}}?
|
| 371 |
+
>What kind of sound is in {{./test.mp3}}?
|
| 372 |
+
>Describe the audio in {{./jntm.mp3}}
|
| 373 |
+
>这是什么动物的叫声{{./jntm.mp3}}?
|
| 374 |
+
>):
|
| 375 |
+
>
|
| 376 |
+
>这是什么声音{{./jntm.mp3}}??????
|
| 377 |
+
>
|
| 378 |
+
>
|
| 379 |
+
>Start audio inference...
|
| 380 |
+
>Received prompt: ====<|im_start|>system
|
| 381 |
+
>You are a helpful assistant.<|im_end|>
|
| 382 |
+
> <|im_start|>user
|
| 383 |
+
> Audio 1: <image>
|
| 384 |
+
> 这是什么声音??????<|im_end|>
|
| 385 |
+
> <|im_start|>assistant
|
| 386 |
+
>
|
| 387 |
+
>====
|
| 388 |
+
> /home/firefly/mnt/zt-back/Qwen2-7B-audiow/./multiprocess_inference.py:43: UserWarning: PySoundFile failed. Trying audioread instead.
|
| 389 |
+
> audio, _ = librosa.load(audio_path, sr=feature_extractor.sampling_rate)
|
| 390 |
+
> /home/firefly/.local/lib/python3.9/site-packages/librosa/core/audio.py:184: FutureWarning: librosa.core.audio.__audioread_load
|
| 391 |
+
> Deprecated as of librosa version 0.10.0.
|
| 392 |
+
> It will be removed in librosa version 1.0.
|
| 393 |
+
> y, sr_native = __audioread_load(path, offset, duration, dtype)
|
| 394 |
+
>Audio encoder inference time: 12.22 seconds
|
| 395 |
+
>(1, 251, 4096)
|
| 396 |
+
>(1, 251, 4096)
|
| 397 |
+
>Start LLM inference...
|
| 398 |
+
>🎉 完成!
|
| 399 |
+
>
|
| 400 |
+
>Time to first token: 4.28 seconds
|
| 401 |
+
>语音中是一段音乐,包含唱歌和乐器演奏。背景音乐里有鼓声、贝斯、钢琴和小号的演奏,同时背景能够听到胃里咕咕作响和吃东西的声音。这首歌可能是用于广告。
|
| 402 |
+
>
|
| 403 |
+
>(finished)
|
| 404 |
+
>
|
| 405 |
+
>--------------------------------------------------------------------------------------
|
| 406 |
+
> Stage Total Time (ms) Tokens Time per Token (ms) Tokens per Second
|
| 407 |
+
>--------------------------------------------------------------------------------------
|
| 408 |
+
> Prefill 4269.62 283 15.09 66.28
|
| 409 |
+
> Generate 13279.37 49 272.13 3.67
|
| 410 |
+
>--------------------------------------------------------------------------------------
|
| 411 |
+
>
|
| 412 |
+
>```
|
| 413 |
+
|
| 414 |
+
## 模型转换
|
| 415 |
+
|
| 416 |
+
#### 准备工作
|
| 417 |
+
|
| 418 |
+
1. 安装rknn-toolkit2 v2.3.0或更高版本, 以及rkllm-toolkit v1.1.2或更高版本.
|
| 419 |
+
2. 下载此仓库到本地, 但不需要下载`.rkllm`和`.rknn`结尾的模型文件.
|
| 420 |
+
3. 下载Qwen2-Audio-7B-Instruct的huggingface模型仓库到本地. (https://huggingface.co/Qwen/Qwen2-Audio-7B-Instruct)
|
| 421 |
+
|
| 422 |
+
#### 转换LLM
|
| 423 |
+
|
| 424 |
+
1. 将此仓库中的`rename_tensors.py`文件复制到Qwen2-Audio-7B-Instruct的huggingface模型仓库根目录并运行. 稍等片刻, 会生成`model-renamed-00001-of-00004.safetensors`等4个safetensors文件和一个json文件.
|
| 425 |
+
2. 不用管那个json文件, 将那4个safetensors文件移动到此仓库根目录下.
|
| 426 |
+
3. 执行`rkllm-convert.py`. 等一会, 会生成`qwen.rkllm`, 就是转换后的模型.
|
| 427 |
+
|
| 428 |
+
#### 转换音频编码器
|
| 429 |
+
|
| 430 |
+
1. 打开`audio_encoder_export_onnx.py`, 修改文件最下方模型路径为Qwen2-Audio-7B-Instruct模型文件夹的路径. 然后执行. 等一会, 会生成`audio_encoder.onnx`和很多权重文件.
|
| 431 |
+
2. 执行`audio_encoder_convert_rknn.py all`. 等一会, 会生成`audio_encoder.rknn`, 这就是转换后的音频编码器.
|
| 432 |
+
|
| 433 |
+
## 已知问题
|
| 434 |
+
|
| 435 |
+
- 由于疑似RKLLM中存在的问题, 如果音频编码器和LLM加载进同一个Python进程, 可能会导致LLM推理时报错段错误. 可以使用多进程来解决. 参考`multiprocess_inference.py`.
|
| 436 |
+
- 由于RKLLM中存在的问题, 输入序列较长时LLM推理会段错误. https://github.com/airockchip/rknn-llm/issues/123
|
| 437 |
+
- 由于RKLLM的多模态输入的限制, 在整个对话中只能加载一段音频. 可以通过Embedding输入的方式来解决, 但我没有实现.
|
| 438 |
+
- 没有实现多轮对话.
|
| 439 |
+
- RKLLM的w8a8量化貌似存在不小的精度损失, 并且这个模型的量化校准数据使用了RKLLM自带的wikitext数据集, 可能会导致精度明显下降.
|
| 440 |
+
|
| 441 |
+
## 参考
|
| 442 |
+
|
| 443 |
+
- [Qwen/Qwen2-Audio-7B-Instruct](https://huggingface.co/Qwen/Qwen2-Audio-7B-Instruct)
|
| 444 |
+
- [Qwen/Qwen2-7B](https://huggingface.co/Qwen/Qwen2-7B)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
## English README
|
| 448 |
+
|
| 449 |
+
# Qwen2-Audio-7B-Instruct-rkllm
|
| 450 |
+
|
| 451 |
+
Run the powerful Qwen2-Audio-7B-Instruct audio model on RK3588!
|
| 452 |
+
|
| 453 |
+
- Inference speed (RK3588, 10s audio input): Audio encoder 12.2s (single NPU core) + LLM prefill 4.4s (282 tokens / 64.7 tps) + decoding 3.69 tps
|
| 454 |
+
- Memory usage (RK3588, context length 768): 11.6GB
|
| 455 |
+
|
| 456 |
+
## Usage
|
| 457 |
+
|
| 458 |
+
1. Clone or download this repository. The model is large, please ensure sufficient disk space.
|
| 459 |
+
|
| 460 |
+
2. The RKNPU2 kernel driver version on your development board must be >=0.9.6 to run such a large model.
|
| 461 |
+
Check the driver version with root privilege:
|
| 462 |
+
```bash
|
| 463 |
+
> cat /sys/kernel/debug/rknpu/version
|
| 464 |
+
RKNPU driver: v0.9.8
|
| 465 |
+
```
|
| 466 |
+
If the version is too low, please update the driver. You may need to update the kernel or check official documentation for help.
|
| 467 |
+
|
| 468 |
+
3. Install dependencies
|
| 469 |
+
|
| 470 |
+
```bash
|
| 471 |
+
pip install numpy<2 opencv-python rknn-toolkit-lite2 librosa transformers
|
| 472 |
+
```
|
| 473 |
+
|
| 474 |
+
4. Run
|
| 475 |
+
|
| 476 |
+
```bash
|
| 477 |
+
python multiprocess_inference.py
|
| 478 |
+
```
|
| 479 |
+
|
| 480 |
+
If the actual performance is not ideal, you can adjust the CPU scheduler to make the CPU run at the highest frequency and bind the inference program to big cores (`taskset -c 4-7 python multiprocess_inference.py`)
|
| 481 |
+
|
| 482 |
+
If you encounter llvm-related errors, please update the llvmlite library: `pip install --upgrade llvmlite`
|
| 483 |
+
|
| 484 |
+
## Model Conversion
|
| 485 |
+
|
| 486 |
+
#### Preparation
|
| 487 |
+
|
| 488 |
+
1. Install rknn-toolkit2 v2.3.0 or higher, and rkllm-toolkit v1.1.2 or higher.
|
| 489 |
+
2. Download this repository locally, but you don't need to download the model files ending with `.rkllm` and `.rknn`.
|
| 490 |
+
3. Download the Qwen2-Audio-7B-Instruct huggingface model repository locally. (https://huggingface.co/Qwen/Qwen2-Audio-7B-Instruct)
|
| 491 |
+
|
| 492 |
+
#### Converting LLM
|
| 493 |
+
|
| 494 |
+
1. Copy the `rename_tensors.py` file from this repository to the root directory of the Qwen2-Audio-7B-Instruct huggingface model repository and run it. Wait a moment, it will generate 4 safetensors files like `model-renamed-00001-of-00004.safetensors` and a json file.
|
| 495 |
+
2. Ignore the json file, move those 4 safetensors files to the root directory of this repository.
|
| 496 |
+
3. Execute `rkllm-convert.py`. Wait a while, it will generate `qwen.rkllm`, which is the converted model.
|
| 497 |
+
|
| 498 |
+
#### Converting Audio Encoder
|
| 499 |
+
|
| 500 |
+
1. Open `audio_encoder_export_onnx.py`, modify the model path at the bottom of the file to the path of your Qwen2-Audio-7B-Instruct model folder. Then execute it. Wait a while, it will generate `audio_encoder.onnx` and many weight files.
|
| 501 |
+
2. Execute `audio_encoder_convert_rknn.py all`. Wait a while, it will generate `audio_encoder.rknn`, which is the converted audio encoder.
|
| 502 |
+
|
| 503 |
+
## Known Issues
|
| 504 |
+
|
| 505 |
+
- Due to a suspected issue in RKLLM, if the audio encoder and LLM are loaded into the same Python process, it may cause segmentation fault during LLM inference. This can be solved using multiprocessing. Refer to `multiprocess_inference.py`.
|
| 506 |
+
- Due to an issue in RKLLM, LLM inference will segfault with long input sequences. See https://github.com/airockchip/rknn-llm/issues/123
|
| 507 |
+
- Due to RKLLM's multimodal input limitations, only one audio clip can be loaded in the entire conversation. This could be solved using Embedding input, but I haven't implemented it.
|
| 508 |
+
- Multi-turn dialogue is not implemented.
|
| 509 |
+
- RKLLM's w8a8 quantization seems to have significant precision loss, and this model's quantization calibration data uses RKLLM's built-in wikitext dataset, which may lead to noticeable accuracy degradation.
|
| 510 |
+
|
| 511 |
+
## References
|
| 512 |
+
|
| 513 |
+
- [Qwen/Qwen2-Audio-7B-Instruct](https://huggingface.co/Qwen/Qwen2-Audio-7B-Instruct)
|
| 514 |
+
- [Qwen/Qwen2-7B](https://huggingface.co/Qwen/Qwen2-7B)","{""id"": ""happyme531/Qwen2-Audio-rkllm"", ""author"": ""happyme531"", ""sha"": ""a853f7e5c2058233c9dbb070f2b67088f96b7a61"", ""last_modified"": ""2024-11-26 08:33:25+00:00"", ""created_at"": ""2024-11-25 12:34:13+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""qwen2"", ""rknn"", ""rkllm"", ""base_model:Qwen/Qwen2-Audio-7B-Instruct"", ""base_model:finetune:Qwen/Qwen2-Audio-7B-Instruct"", ""license:agpl-3.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen2-Audio-7B-Instruct\nlicense: agpl-3.0\ntags:\n- rknn\n- rkllm"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='audio_encoder.rknn', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='audio_encoder_convert_rknn.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='audio_encoder_export_onnx.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='glass-breaking.wav', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='jntm.mp3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='librkllmrt.so', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='multiprocess_inference.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.rkllm', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rename_tensors.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rkllm-convert.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rkllm_binding.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='run_rknn.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-26 08:33:25+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen2-Audio-7B-Instruct\nlicense: agpl-3.0\ntags:\n- rknn\n- rkllm"", ""transformersInfo"": null, ""_id"": ""67446ec539cf2eb9717a8bfd"", ""modelId"": ""happyme531/Qwen2-Audio-rkllm"", ""usedStorage"": 9798010635}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=happyme531/Qwen2-Audio-rkllm&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhappyme531%2FQwen2-Audio-rkllm%5D(%2Fhappyme531%2FQwen2-Audio-rkllm)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 515 |
+
yasinarafatbd/Qwen2_Audio_Engine_Sound,"---
|
| 516 |
+
base_model:
|
| 517 |
+
- Qwen/Qwen2-Audio-7B-Instruct
|
| 518 |
+
---","{""id"": ""yasinarafatbd/Qwen2_Audio_Engine_Sound"", ""author"": ""yasinarafatbd"", ""sha"": ""d6a7e8235784620c42963602dd3f47546963a9a2"", ""last_modified"": ""2025-03-19 14:08:53+00:00"", ""created_at"": ""2025-03-19 08:24:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""qwen2_audio"", ""base_model:Qwen/Qwen2-Audio-7B-Instruct"", ""base_model:finetune:Qwen/Qwen2-Audio-7B-Instruct"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen2-Audio-7B-Instruct"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2AudioForConditionalGeneration""], ""model_type"": ""qwen2_audio"", ""processor_config"": {""chat_template"": ""{% set audio_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if 'audio' in content or 'audio_url' in content %}{% set audio_count.value = audio_count.value + 1 %}Audio {{ audio_count.value }}: <|audio_bos|><|AUDIO|><|audio_eos|>\n{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}""}, ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Modelfile', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chat_template.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00009.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 8397094912}, ""total"": 8397094912}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-19 14:08:53+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen2-Audio-7B-Instruct"", ""transformersInfo"": null, ""_id"": ""67da7f33baa0c8fd93699220"", ""modelId"": ""yasinarafatbd/Qwen2_Audio_Engine_Sound"", ""usedStorage"": 16806326203}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=yasinarafatbd/Qwen2_Audio_Engine_Sound&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Byasinarafatbd%2FQwen2_Audio_Engine_Sound%5D(%2Fyasinarafatbd%2FQwen2_Audio_Engine_Sound)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 519 |
+
isawahill/qwen2-audio-7b_lora32_epoch14,"---
|
| 520 |
+
base_model: Qwen/Qwen2-Audio-7B-Instruct
|
| 521 |
+
library_name: transformers
|
| 522 |
+
model_name: trainer_output
|
| 523 |
+
tags:
|
| 524 |
+
- generated_from_trainer
|
| 525 |
+
- trl
|
| 526 |
+
- sft
|
| 527 |
+
licence: license
|
| 528 |
+
---
|
| 529 |
+
|
| 530 |
+
# Model Card for trainer_output
|
| 531 |
+
|
| 532 |
+
This model is a fine-tuned version of [Qwen/Qwen2-Audio-7B-Instruct](https://huggingface.co/Qwen/Qwen2-Audio-7B-Instruct).
|
| 533 |
+
It has been trained using [TRL](https://github.com/huggingface/trl).
|
| 534 |
+
|
| 535 |
+
## Quick start
|
| 536 |
+
|
| 537 |
+
```python
|
| 538 |
+
from transformers import pipeline
|
| 539 |
+
|
| 540 |
+
question = ""If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?""
|
| 541 |
+
generator = pipeline(""text-generation"", model=""isawahill/trainer_output"", device=""cuda"")
|
| 542 |
+
output = generator([{""role"": ""user"", ""content"": question}], max_new_tokens=128, return_full_text=False)[0]
|
| 543 |
+
print(output[""generated_text""])
|
| 544 |
+
```
|
| 545 |
+
|
| 546 |
+
## Training procedure
|
| 547 |
+
|
| 548 |
+
[<img src=""https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg"" alt=""Visualize in Weights & Biases"" width=""150"" height=""24""/>](https://wandb.ai/sahil-jayaram-talentoso/qwen2-audio-7b-sft-ChartQA/runs/4o9fbgzb)
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
This model was trained with SFT.
|
| 552 |
+
|
| 553 |
+
### Framework versions
|
| 554 |
+
|
| 555 |
+
- TRL: 0.15.2
|
| 556 |
+
- Transformers: 4.49.0
|
| 557 |
+
- Pytorch: 2.4.1+cu124
|
| 558 |
+
- Datasets: 3.4.1
|
| 559 |
+
- Tokenizers: 0.21.1
|
| 560 |
+
|
| 561 |
+
## Citations
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
Cite TRL as:
|
| 566 |
+
|
| 567 |
+
```bibtex
|
| 568 |
+
@misc{vonwerra2022trl,
|
| 569 |
+
title = {{TRL: Transformer Reinforcement Learning}},
|
| 570 |
+
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
|
| 571 |
+
year = 2020,
|
| 572 |
+
journal = {GitHub repository},
|
| 573 |
+
publisher = {GitHub},
|
| 574 |
+
howpublished = {\url{https://github.com/huggingface/trl}}
|
| 575 |
+
}
|
| 576 |
+
```","{""id"": ""isawahill/qwen2-audio-7b_lora32_epoch14"", ""author"": ""isawahill"", ""sha"": ""675b22212fbfa591de74fe538efa0dc0a718590b"", ""last_modified"": ""2025-03-21 00:13:31+00:00"", ""created_at"": ""2025-03-21 00:00:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""generated_from_trainer"", ""trl"", ""sft"", ""base_model:Qwen/Qwen2-Audio-7B-Instruct"", ""base_model:finetune:Qwen/Qwen2-Audio-7B-Instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen2-Audio-7B-Instruct\nlibrary_name: transformers\nmodel_name: trainer_output\ntags:\n- generated_from_trainer\n- trl\n- sft\nlicence: license"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-21 00:13:31+00:00"", ""cardData"": ""base_model: Qwen/Qwen2-Audio-7B-Instruct\nlibrary_name: transformers\nmodel_name: trainer_output\ntags:\n- generated_from_trainer\n- trl\n- sft\nlicence: license"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67dcac3abf4c007db3969f3b"", ""modelId"": ""isawahill/qwen2-audio-7b_lora32_epoch14"", ""usedStorage"": 276398106}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=isawahill/qwen2-audio-7b_lora32_epoch14&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bisawahill%2Fqwen2-audio-7b_lora32_epoch14%5D(%2Fisawahill%2Fqwen2-audio-7b_lora32_epoch14)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Real-ESRGAN_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
ai-forever/Real-ESRGAN,"---
|
| 3 |
+
language:
|
| 4 |
+
- ru
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- PyTorch
|
| 8 |
+
thumbnail: ""https://github.com/sberbank-ai/Real-ESRGAN""
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# Real-ESRGAN
|
| 12 |
+
|
| 13 |
+
PyTorch implementation of a Real-ESRGAN model trained on custom dataset. This model shows better results on faces compared to the original version. It is also easier to integrate this model into your projects.
|
| 14 |
+
|
| 15 |
+
Real-ESRGAN is an upgraded ESRGAN trained with pure synthetic data is capable of enhancing details while removing annoying artifacts for common real-world images.
|
| 16 |
+
|
| 17 |
+
- [Paper](https://arxiv.org/abs/2107.10833)
|
| 18 |
+
- [Original implementation](https://github.com/xinntao/Real-ESRGAN)
|
| 19 |
+
- [Our github](https://github.com/sberbank-ai/Real-ESRGAN)
|
| 20 |
+
|
| 21 |
+
## Usage
|
| 22 |
+
|
| 23 |
+
Code for using model you can obtain in our [repo](https://github.com/sberbank-ai/Real-ESRGAN).
|
| 24 |
+
```python
|
| 25 |
+
import torch
|
| 26 |
+
from PIL import Image
|
| 27 |
+
import numpy as np
|
| 28 |
+
from RealESRGAN import RealESRGAN
|
| 29 |
+
|
| 30 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 31 |
+
|
| 32 |
+
model = RealESRGAN(device, scale=4)
|
| 33 |
+
model.load_weights('weights/RealESRGAN_x4.pth', download=True)
|
| 34 |
+
|
| 35 |
+
path_to_image = 'inputs/lr_image.png'
|
| 36 |
+
image = Image.open(path_to_image).convert('RGB')
|
| 37 |
+
|
| 38 |
+
sr_image = model.predict(image)
|
| 39 |
+
|
| 40 |
+
sr_image.save('results/sr_image.png')
|
| 41 |
+
```","{""id"": ""ai-forever/Real-ESRGAN"", ""author"": ""ai-forever"", ""sha"": ""8110204ebf8d25c031b66c26c2d1098aa831157e"", ""last_modified"": ""2022-09-25 13:17:44+00:00"", ""created_at"": ""2022-03-02 23:29:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 174, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""PyTorch"", ""ru"", ""en"", ""arxiv:2107.10833"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- ru\n- en\ntags:\n- PyTorch\nthumbnail: https://github.com/sberbank-ai/Real-ESRGAN"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='RealESRGAN_x2.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='RealESRGAN_x4.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='RealESRGAN_x8.pth', size=None, blob_id=None, lfs=None)""], ""spaces"": [""deepseek-ai/Janus-Pro-7B"", ""gokaygokay/Tile-Upscaler"", ""doevent/Face-Real-ESRGAN"", ""gokaygokay/PonyRealism"", ""gokaygokay/TileUpscalerV2"", ""Nick088/Real-ESRGAN_Pytorch"", ""gokaygokay/NoobAI-Animagine-T-ponynai3"", ""Nymbo/DeepfakeFaceswap"", ""MarkoVidrih/faceswap"", ""coxapi/faceswap"", ""victorisgeek/SwapFace2Pon"", ""mohan32189/SwapFace2Pon"", ""waredot32189/SwapFace2Pon"", ""victorisgeek/DeepSwapFace"", ""Suniilkumaar/SwapMukham"", ""DamarJati/Real-ESRGAN"", ""ShuoChen20/DimensionX"", ""jhj0517/AdvancedLivePortrait-WebUI"", ""1337V/FSawap-BypassNSFW"", ""Kerogolon/Face-Real-ESRGAN-Zero"", ""ChristianHappy/Real-ESRGAN_Pytorch"", ""alvdansen/Tile-Upscaler"", ""Tzktz/RealESRGAN_Enhance_Model"", ""anhhayghen/DeepfakeFaceswap"", ""user238921933/stable-diffusion-webui"", ""LULDev/upscale"", ""sewanapi/213"", ""aodianyun/stable-diffusion-webui"", ""jiuface/ai-model-002"", ""Rooc/SwapFace2Pon"", ""anthienlong/Face-Real-ESRGAN"", ""JoPmt/ConsisID"", ""bigjoker/stable-diffusion-webui"", ""gdTharusha/Gd-DeepFake-AI"", ""ginigen/Janus-Pro-7B"", ""Rifd/Face-Real-ESRGAN"", ""alfabill/SwapMukham"", ""sefulretelei/SwapYoFace"", ""Nymbo/RealESRGAN_Enhance_Model"", ""mednow/image_enhancement"", ""lexa862/SwapFace2Ponmm"", ""omninexus/deepseek-vision"", ""kahramango/SwapFace2Ponloki"", ""victorisgeek/EG"", ""Arkaprava/Dudu"", ""SS86910/faceswapMukham"", ""rphrp1985/Tile-Upscaler"", ""baldiungu/Sss"", ""bep40/DeepfakeFaceswap"", ""kahramango/Kahraman-SwapFace2Pon"", ""onebitss/Tile-Upscaler"", ""kahramango/Kahraman-DeepSwapFace"", ""jetfly2007/213"", ""dailysvgs/Face-Real-ESRGAN"", ""kahramango/SwapFace2Ponmm"", ""kahramango/DeepfakeFaceswap"", ""Xhaheen/Face-Real-ESRGAN"", ""jackli888/stable-diffusion-webui"", ""Dantra1/Remini"", ""YanzBotz/Remini"", ""bijoymirza99/Face-Real-ESRGAN"", ""testujemai/SwapFace2Pon"", ""NekonekoID/Remini"", ""randomtable/RealESRGAN_Enhance_Model"", ""onebitss/Real-ESRGAN_Pytorch"", ""anthienlong/TileUpscalerV2"", ""charbel-malo/Swap-Extra-Settings"", ""EX4L/PonyXL"", ""silveroxides/RNS-NoobAI-Hybrid"", ""SamuelMinouri/Upscaler1"", ""lexa862/Workdeep"", ""lexa862/SwapFace2Pon1"", ""tomo2chin2/TEST1"", ""likilecki/SwapFace2Ponloki"", ""sinpchat888/DeepfakeFaceswap"", ""ovi054/Image-Upscale-Plus"", ""Delanoir/ESRGAN"", ""nvishurl/nvishessa-AI"", ""Giu14/Face-Real-ESRGAN"", ""darcksky/melhorador1"", ""pysunny/test_sd"", ""returnfalse/teset-webui"", ""the6star/ElderFussion"", ""yrjjun/cs1"", ""itexpert2210/stable-diffusion-stream"", ""DeepCoreB4/stable-diffusion-webui-master"", ""JijoJohn/webui"", ""maihua-cf/sd-cpu-101"", ""37am/demo-sd"", ""linjianan/Face-Real-ESRGAN"", ""fireexit/Face-Real-ESRGAN"", ""Illumotion/webui"", ""heyitskim/stable_defusion"", ""ScuroNeko/Face-Real-ESRGAN"", ""quanhua/Real-ESRGAN"", ""Brijendra09/stable-diffusion-webui"", ""Ahbapx/Face-Real-ESRGAN"", ""baldiungu/Sw"", ""trysem/SwapMukham"", ""esun-choi/INVOHIDE_inisw8""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2022-09-25 13:17:44+00:00"", ""cardData"": ""language:\n- ru\n- en\ntags:\n- PyTorch\nthumbnail: https://github.com/sberbank-ai/Real-ESRGAN"", ""transformersInfo"": null, ""_id"": ""621ffdc136468d709f180187"", ""modelId"": ""ai-forever/Real-ESRGAN"", ""usedStorage"": 201757184}",0,,0,,0,,0,,0,"MarkoVidrih/faceswap, Nick088/Real-ESRGAN_Pytorch, deepseek-ai/Janus-Pro-7B, doevent/Face-Real-ESRGAN, gokaygokay/NoobAI-Animagine-T-ponynai3, gokaygokay/PonyRealism, gokaygokay/Tile-Upscaler, gokaygokay/TileUpscalerV2, huggingface/InferenceSupport/discussions/new?title=ai-forever/Real-ESRGAN&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bai-forever%2FReal-ESRGAN%5D(%2Fai-forever%2FReal-ESRGAN)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, mohan32189/SwapFace2Pon, victorisgeek/DeepSwapFace, victorisgeek/SwapFace2Pon, waredot32189/SwapFace2Pon",13
|
Ruyi-Mini-7B_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
IamCreateAI/Ruyi-Mini-7B,"---
|
| 3 |
+
language:
|
| 4 |
+
- ""en""
|
| 5 |
+
tags:
|
| 6 |
+
- video generation
|
| 7 |
+
- CreateAI
|
| 8 |
+
license: apache-2.0
|
| 9 |
+
pipeline_tag: image-to-video
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Ruyi-Mini-7B
|
| 14 |
+
[Hugging Face](https://huggingface.co/IamCreateAI/Ruyi-Mini-7B) | [Github](https://github.com/IamCreateAI/Ruyi-Models)
|
| 15 |
+
|
| 16 |
+
An image-to-video model by CreateAI.
|
| 17 |
+
|
| 18 |
+
## Overview
|
| 19 |
+
|
| 20 |
+
Ruyi-Mini-7B is an open-source image-to-video generation model. Starting with an input image, Ruyi produces subsequent video frames at resolutions ranging from 360p to 720p, supporting various aspect ratios and a maximum duration of 5 seconds. Enhanced with motion and camera control, Ruyi offers greater flexibility and creativity in video generation. We are releasing the model under the permissive Apache 2.0 license.
|
| 21 |
+
|
| 22 |
+
## Update
|
| 23 |
+
|
| 24 |
+
Dec 24, 2024: The diffusion model is updated to fix the black lines when creating 3:4 or 4:5 videos.
|
| 25 |
+
|
| 26 |
+
Dec 16, 2024: Ruyi-mini-7B is released.
|
| 27 |
+
|
| 28 |
+
## Installation
|
| 29 |
+
|
| 30 |
+
Install code from github:
|
| 31 |
+
```bash
|
| 32 |
+
git clone https://github.com/IamCreateAI/Ruyi-Models
|
| 33 |
+
cd Ruyi-Models
|
| 34 |
+
pip install -r requirements.txt
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
## Running
|
| 38 |
+
|
| 39 |
+
We provide two ways to run our model. The first is directly using python code.
|
| 40 |
+
|
| 41 |
+
```bash
|
| 42 |
+
python3 predict_i2v.py
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
Or use ComfyUI wrapper in our [github repo](https://github.com/IamCreateAI/Ruyi-Models).
|
| 46 |
+
|
| 47 |
+
## Model Architecture
|
| 48 |
+
|
| 49 |
+
Ruyi-Mini-7B is an advanced image-to-video model with about 7.1 billion parameters. The model architecture is modified from [EasyAnimate V4 model](https://github.com/aigc-apps/EasyAnimate), whose transformer module is inherited from [HunyuanDiT](https://github.com/Tencent/HunyuanDiT). It comprises three key components:
|
| 50 |
+
1. Casual VAE Module: Handles video compression and decompression. It reduces spatial resolution to 1/8 and temporal resolution to 1/4, with each latent pixel is represented in 16 float numbers after compression.
|
| 51 |
+
2. Diffusion Transformer Module: Generates compressed video data using 3D full attention, with:
|
| 52 |
+
- 2D Normalized-RoPE for spatial dimensions;
|
| 53 |
+
- Sin-cos position embedding for temporal dimensions;
|
| 54 |
+
- DDPM (Denoising Diffusion Probabilistic Models) for model training.
|
| 55 |
+
3. Ruyi also utilizes a CLIP model to extract the semantic features from the input image to guide the whole video generation. The CLIP features are introduced into the transformer by cross-attention.
|
| 56 |
+
|
| 57 |
+
## Training Data and Methodology
|
| 58 |
+
The training process is divided into four phases:
|
| 59 |
+
- Phase 1: Pre-training from scratch with ~200M video clips and ~30M images at a 256-resolution, using a batch size of 4096 for 350,000 iterations to achieve full convergence.
|
| 60 |
+
- Phase 2: Fine-tuning with ~60M video clips for multi-scale resolutions (384–512), with a batch size of 1024 for 60,000 iterations.
|
| 61 |
+
- Phase 3: High-quality fine-tuning with ~20M video clips and ~8M images for 384–1024 resolutions, with dynamic batch sizes based on memory and 10,000 iterations.
|
| 62 |
+
- Phase 4: Image-to-video training with ~10M curated high-quality video clips, with dynamic batch sizes based on memory for ~10,000 iterations.
|
| 63 |
+
|
| 64 |
+
## Hardware Requirements
|
| 65 |
+
The VRAM cost of Ruyi depends on the resolution and duration of the video. Here we list the costs for some typical video size. Tested on single A100.
|
| 66 |
+
|Video Size | 360x480x120 | 384x672x120 | 480x640x120 | 630x1120x120 | 720x1280x120 |
|
| 67 |
+
|:--:|:--:|:--:|:--:|:--:|:--:|
|
| 68 |
+
|Memory | 21.5GB | 25.5GB | 27.7GB | 44.9GB | 54.8GB |
|
| 69 |
+
|Time | 03:10 | 05:29 | 06:49 | 24:18 | 39:02 |
|
| 70 |
+
|
| 71 |
+
For 24GB VRAM cards such as RTX4090, we provide `low_gpu_memory_mode`, under which the model can generate 720x1280x120 videos with a longer time.
|
| 72 |
+
|
| 73 |
+
## Showcase
|
| 74 |
+
|
| 75 |
+
### Image to Video Effects
|
| 76 |
+
|
| 77 |
+
<table border=""0"" style=""width: 100%; text-align: left; margin-top: 20px;"">
|
| 78 |
+
<tr>
|
| 79 |
+
<td><video src=""https://github.com/user-attachments/assets/4dedf40b-82f2-454c-9a67-5f4ed243f5ea"" width=""100%"" style=""max-height:640px; min-height: 200px"" controls autoplay loop></video></td>
|
| 80 |
+
<td><video src=""https://github.com/user-attachments/assets/905fef17-8c5d-49b0-a49a-6ae7e212fa07"" width=""100%"" style=""max-height:640px; min-height: 200px"" controls autoplay loop></video></td>
|
| 81 |
+
<td><video src=""https://github.com/user-attachments/assets/20daab12-b510-448a-9491-389d7bdbbf2e"" width=""100%"" style=""max-height:640px; min-height: 200px"" controls autoplay loop></video></td>
|
| 82 |
+
<td><video src=""https://github.com/user-attachments/assets/f1bb0a91-d52a-4611-bac2-8fcf9658cac0"" width=""100%"" style=""max-height:640px; min-height: 200px"" controls autoplay loop></video></td>
|
| 83 |
+
</tr>
|
| 84 |
+
</table>
|
| 85 |
+
|
| 86 |
+
### Camera Control
|
| 87 |
+
|
| 88 |
+
<table border=""0"" style=""width: 100%; text-align: center; "">
|
| 89 |
+
<tr>
|
| 90 |
+
<td align=center><img src=""https://github.com/user-attachments/assets/8aedcea6-3b8e-4c8b-9fed-9ceca4d41954"" width=""100%"" style=""max-height:240px; min-height: 100px; margin-top: 20%;""></img></td>
|
| 91 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/d9d027d4-0d4f-45f5-9d46-49860b562c69"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
|
| 92 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/7716a67b-1bb8-4d44-b128-346cbc35e4ee"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
|
| 93 |
+
</tr>
|
| 94 |
+
<tr><td>input</td><td>left</td><td>right</td></tr>
|
| 95 |
+
<tr>
|
| 96 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/cc1f1928-cab7-4c4b-90af-928936102e66"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
|
| 97 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/c742ea2c-503a-454f-a61a-10b539100cd9"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
|
| 98 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/442839fa-cc53-4b75-b015-909e44c065e0"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
|
| 99 |
+
</tr>
|
| 100 |
+
<tr><td>static</td><td>up</td><td>down</td></tr>
|
| 101 |
+
</table>
|
| 102 |
+
|
| 103 |
+
### Motion Amplitude Control
|
| 104 |
+
|
| 105 |
+
<table border=""0"" style=""width: 100%; text-align: left; margin-top: 20px;"">
|
| 106 |
+
<tr>
|
| 107 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/0020bd54-0ff6-46ad-91ee-d9f0df013772"" width=""100%"" controls autoplay loop></video>motion 1</td>
|
| 108 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/d1c26419-54e3-4b86-8ae3-98e12de3022e"" width=""100%"" controls autoplay loop></video>motion 2</td>
|
| 109 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/535147a2-049a-4afc-8d2a-017bc778977e"" width=""100%"" controls autoplay loop></video>motion 3</td>
|
| 110 |
+
<td align=center><video src=""https://github.com/user-attachments/assets/bf893d53-2e11-406f-bb9a-2aacffcecd44"" width=""100%"" controls autoplay loop></video>motion 4</td>
|
| 111 |
+
</tr>
|
| 112 |
+
</table>
|
| 113 |
+
|
| 114 |
+
## Limitations
|
| 115 |
+
There are some known limitations in this experimental release. Texts, hands and crowded human faces may be distorted. The video may cut to another scene when the model does not know how to generate future frames. We are still working on these problems and will update the model as we make progress.
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
## BibTeX
|
| 119 |
+
```
|
| 120 |
+
@misc{createai2024ruyi,
|
| 121 |
+
title={Ruyi-Mini-7B},
|
| 122 |
+
author={CreateAI Team},
|
| 123 |
+
year={2024},
|
| 124 |
+
publisher = {GitHub},
|
| 125 |
+
journal = {GitHub repository},
|
| 126 |
+
howpublished={\url{https://github.com/IamCreateAI/Ruyi-Models}}
|
| 127 |
+
}
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
## Contact Us
|
| 131 |
+
|
| 132 |
+
You are welcomed to join our [Discord](https://discord.com/invite/nueQFQwwGw) or Wechat Group (Scan QR code to add Ruyi Assistant and join the official group) for further discussion!
|
| 133 |
+
|
| 134 |
+
","{""id"": ""IamCreateAI/Ruyi-Mini-7B"", ""author"": ""IamCreateAI"", ""sha"": ""fbb88130fc98dbc2e2de2ad510a081e332535566"", ""last_modified"": ""2024-12-25 10:50:32+00:00"", ""created_at"": ""2024-12-16 12:54:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 483, ""downloads_all_time"": null, ""likes"": 609, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""video generation"", ""CreateAI"", ""image-to-video"", ""en"", ""license:apache-2.0"", ""diffusers:RuyiInpaintPipeline"", ""region:us""], ""pipeline_tag"": ""image-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: apache-2.0\npipeline_tag: image-to-video\ntags:\n- video generation\n- CreateAI"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""RuyiInpaintPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='embeddings.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_encoder/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""NaqchoAli/testimage""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-25 10:50:32+00:00"", ""cardData"": ""language:\n- en\nlicense: apache-2.0\npipeline_tag: image-to-video\ntags:\n- video generation\n- CreateAI"", ""transformersInfo"": null, ""_id"": ""676022fba63fff7b5bfdc8be"", ""modelId"": ""IamCreateAI/Ruyi-Mini-7B"", ""usedStorage"": 17334852644}",0,"https://huggingface.co/tcoh/CoralAI, https://huggingface.co/stargolf/mon_chat, https://huggingface.co/kabirclark/kabirworld, https://huggingface.co/genoxan/renax",4,https://huggingface.co/S4lv4tr0n/Mandraken,1,,0,,0,"NaqchoAli/testimage, huggingface/InferenceSupport/discussions/new?title=IamCreateAI/Ruyi-Mini-7B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BIamCreateAI%2FRuyi-Mini-7B%5D(%2FIamCreateAI%2FRuyi-Mini-7B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
| 135 |
+
tcoh/CoralAI,"---
|
| 136 |
+
license: mit
|
| 137 |
+
datasets:
|
| 138 |
+
- fka/awesome-chatgpt-prompts
|
| 139 |
+
base_model:
|
| 140 |
+
- Qwen/QwQ-32B-Preview
|
| 141 |
+
- IamCreateAI/Ruyi-Mini-7B
|
| 142 |
+
new_version: Qwen/Qwen2.5-Coder-32B-Instruct
|
| 143 |
+
library_name: fasttext
|
| 144 |
+
metrics:
|
| 145 |
+
- accuracy
|
| 146 |
+
---","{""id"": ""tcoh/CoralAI"", ""author"": ""tcoh"", ""sha"": ""be35b5945b0ce135373cbe6952b62abac630b084"", ""last_modified"": ""2024-12-23 09:48:49+00:00"", ""created_at"": ""2024-12-23 09:41:52+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fasttext"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fasttext"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:IamCreateAI/Ruyi-Mini-7B"", ""base_model:finetune:IamCreateAI/Ruyi-Mini-7B"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/QwQ-32B-Preview\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: fasttext\nlicense: mit\nmetrics:\n- accuracy\nnew_version: Qwen/Qwen2.5-Coder-32B-Instruct"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-23 09:48:49+00:00"", ""cardData"": ""base_model:\n- Qwen/QwQ-32B-Preview\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: fasttext\nlicense: mit\nmetrics:\n- accuracy\nnew_version: Qwen/Qwen2.5-Coder-32B-Instruct"", ""transformersInfo"": null, ""_id"": ""67693060a5bdfcf3b312f2c8"", ""modelId"": ""tcoh/CoralAI"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=tcoh/CoralAI&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btcoh%2FCoralAI%5D(%2Ftcoh%2FCoralAI)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 147 |
+
stargolf/mon_chat,"---
|
| 148 |
+
license: apache-2.0
|
| 149 |
+
datasets:
|
| 150 |
+
- fka/awesome-chatgpt-prompts
|
| 151 |
+
language:
|
| 152 |
+
- fr
|
| 153 |
+
base_model:
|
| 154 |
+
- IamCreateAI/Ruyi-Mini-7B
|
| 155 |
+
---","{""id"": ""stargolf/mon_chat"", ""author"": ""stargolf"", ""sha"": ""4eb82fea70247c0498623dcde76c2d951ef1e55b"", ""last_modified"": ""2024-12-29 21:13:42+00:00"", ""created_at"": ""2024-12-29 21:12:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fr"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:IamCreateAI/Ruyi-Mini-7B"", ""base_model:finetune:IamCreateAI/Ruyi-Mini-7B"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- fr\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-29 21:13:42+00:00"", ""cardData"": ""base_model:\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- fr\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""6771bb3879d1ea02e20531d1"", ""modelId"": ""stargolf/mon_chat"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=stargolf/mon_chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bstargolf%2Fmon_chat%5D(%2Fstargolf%2Fmon_chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 156 |
+
kabirclark/kabirworld,"---
|
| 157 |
+
license: llama3.3
|
| 158 |
+
datasets:
|
| 159 |
+
- HuggingFaceFW/fineweb-2
|
| 160 |
+
language:
|
| 161 |
+
- aa
|
| 162 |
+
metrics:
|
| 163 |
+
- bleu
|
| 164 |
+
base_model:
|
| 165 |
+
- meta-llama/Llama-3.3-70B-Instruct
|
| 166 |
+
- IamCreateAI/Ruyi-Mini-7B
|
| 167 |
+
new_version: meta-llama/Llama-3.3-70B-Instruct
|
| 168 |
+
pipeline_tag: text-generation
|
| 169 |
+
library_name: asteroid
|
| 170 |
+
tags:
|
| 171 |
+
- code
|
| 172 |
+
- legal
|
| 173 |
+
---
|
| 174 |
+
from diffusers import DiffusionPipeline
|
| 175 |
+
|
| 176 |
+
pipe = DiffusionPipeline.from_pretrained(""black-forest-labs/FLUX.1-dev"")
|
| 177 |
+
|
| 178 |
+
prompt = ""Astronaut in a jungle, cold color palette, muted colors, detailed, 8k""
|
| 179 |
+
image = pipe(prompt).images[0]","{""id"": ""kabirclark/kabirworld"", ""author"": ""kabirclark"", ""sha"": ""d8fed2cc5eafb2864e89342839ca9ddb85cbd745"", ""last_modified"": ""2024-12-31 12:11:56+00:00"", ""created_at"": ""2024-12-31 12:08:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""asteroid"", ""code"", ""legal"", ""text-generation"", ""aa"", ""dataset:HuggingFaceFW/fineweb-2"", ""base_model:IamCreateAI/Ruyi-Mini-7B"", ""base_model:finetune:IamCreateAI/Ruyi-Mini-7B"", ""license:llama3.3"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- meta-llama/Llama-3.3-70B-Instruct\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- HuggingFaceFW/fineweb-2\nlanguage:\n- aa\nlibrary_name: asteroid\nlicense: llama3.3\nmetrics:\n- bleu\npipeline_tag: text-generation\ntags:\n- code\n- legal\nnew_version: meta-llama/Llama-3.3-70B-Instruct"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-31 12:11:56+00:00"", ""cardData"": ""base_model:\n- meta-llama/Llama-3.3-70B-Instruct\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- HuggingFaceFW/fineweb-2\nlanguage:\n- aa\nlibrary_name: asteroid\nlicense: llama3.3\nmetrics:\n- bleu\npipeline_tag: text-generation\ntags:\n- code\n- legal\nnew_version: meta-llama/Llama-3.3-70B-Instruct"", ""transformersInfo"": null, ""_id"": ""6773dec0a2128da37adb090e"", ""modelId"": ""kabirclark/kabirworld"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kabirclark/kabirworld&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkabirclark%2Fkabirworld%5D(%2Fkabirclark%2Fkabirworld)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 180 |
+
genoxan/renax,"---
|
| 181 |
+
license: c-uda
|
| 182 |
+
datasets:
|
| 183 |
+
- argilla/FinePersonas-v0.1
|
| 184 |
+
language:
|
| 185 |
+
- ar
|
| 186 |
+
metrics:
|
| 187 |
+
- charcut_mt
|
| 188 |
+
- bertscore
|
| 189 |
+
base_model:
|
| 190 |
+
- IamCreateAI/Ruyi-Mini-7B
|
| 191 |
+
new_version: IamCreateAI/Ruyi-Mini-7B
|
| 192 |
+
pipeline_tag: text-to-image
|
| 193 |
+
library_name: diffusers
|
| 194 |
+
tags:
|
| 195 |
+
- biology
|
| 196 |
+
---","{""id"": ""genoxan/renax"", ""author"": ""genoxan"", ""sha"": ""54bf1d8afeb109c49501d14961c78d94ed62601d"", ""last_modified"": ""2025-01-06 03:28:22+00:00"", ""created_at"": ""2025-01-06 03:25:50+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""biology"", ""text-to-image"", ""ar"", ""dataset:argilla/FinePersonas-v0.1"", ""base_model:IamCreateAI/Ruyi-Mini-7B"", ""base_model:finetune:IamCreateAI/Ruyi-Mini-7B"", ""license:c-uda"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- argilla/FinePersonas-v0.1\nlanguage:\n- ar\nlibrary_name: diffusers\nlicense: c-uda\nmetrics:\n- charcut_mt\n- bertscore\npipeline_tag: text-to-image\ntags:\n- biology\nnew_version: IamCreateAI/Ruyi-Mini-7B"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-06 03:28:22+00:00"", ""cardData"": ""base_model:\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- argilla/FinePersonas-v0.1\nlanguage:\n- ar\nlibrary_name: diffusers\nlicense: c-uda\nmetrics:\n- charcut_mt\n- bertscore\npipeline_tag: text-to-image\ntags:\n- biology\nnew_version: IamCreateAI/Ruyi-Mini-7B"", ""transformersInfo"": null, ""_id"": ""677b4d3ec57d565bd33caa08"", ""modelId"": ""genoxan/renax"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=genoxan/renax&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgenoxan%2Frenax%5D(%2Fgenoxan%2Frenax)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
SmallThinker-3B-Preview_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
TemporalDiff_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
CiaraRowles/TemporalDiff,N/A,N/A,0,,0,,0,,0,,0,"Bread-F/Intelligent-Medical-Guidance-Large-Model, huggingface/InferenceSupport/discussions/new?title=CiaraRowles/TemporalDiff&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BCiaraRowles%2FTemporalDiff%5D(%2FCiaraRowles%2FTemporalDiff)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
Tifa-Deepsex-14b-CoT-Q8_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
ValueFX9507/Tifa-Deepsex-14b-CoT-Q8,"---
|
| 3 |
+
base_model:
|
| 4 |
+
- deepseek-ai/deepseek-r1-14b
|
| 5 |
+
language:
|
| 6 |
+
- zh
|
| 7 |
+
- en
|
| 8 |
+
library_name: transformers
|
| 9 |
+
tags:
|
| 10 |
+
- incremental-pretraining
|
| 11 |
+
- sft
|
| 12 |
+
- reinforcement-learning
|
| 13 |
+
- roleplay
|
| 14 |
+
- cot
|
| 15 |
+
- sex
|
| 16 |
+
license: apache-2.0
|
| 17 |
+
---
|
| 18 |
+
# Tifa-Deepseek-14b-CoT
|
| 19 |
+
|
| 20 |
+
- **HF Model**: [ValueFX9507/Tifa-Deepsex-14b-CoT](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT)
|
| 21 |
+
- **GGUF**: [F16](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT) | [Q4](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4)(更多量化版本持续更新中)
|
| 22 |
+
- **Demo APK**: [点击下载](http://app.visionsic.com/download/projectchat.apk)
|
| 23 |
+
- **简单的前端**:[Github链接](https://github.com/Value99/Tifa-Deepsex-OllamaWebUI)
|
| 24 |
+
|
| 25 |
+
本模型基于Deepseek-R1-14B进行深度优化,借助Tifa_220B生成的数据集通过三重训练策略显著增强角色扮演、小说文本生成与思维链(CoT)能力。特别适合需要长程上下文关联的创作场景。
|
| 26 |
+
|
| 27 |
+
## 鸣谢
|
| 28 |
+
- **上海左北科技提供算法与算力**[企业网址](https://leftnorth.com/)
|
| 29 |
+
- **Deepseek团队共享GRPO算法**
|
| 30 |
+
- **Qwen团队提供优秀开源底座**
|
| 31 |
+
- **母校上海复旦大学**
|
| 32 |
+
- **PRIME团队提供优化思路**
|
| 33 |
+
|
| 34 |
+
## 版本介绍:
|
| 35 |
+
- **Tifa-Deepsex-14b-CoT**
|
| 36 |
+
|
| 37 |
+
- 验证模型,测试RL奖励算法对于角色扮演数据的影响,该版本为初版,输出灵活但是不受控制,仅做研究使用。
|
| 38 |
+
|
| 39 |
+
- **Tifa-Deepsex-14b-CoT-Chat**
|
| 40 |
+
|
| 41 |
+
- 采用标准数据训练,使用成熟RL策略,附加防重复强化学习,适合正常使用,输出文本质量正常,少数情况下思维发散。
|
| 42 |
+
|
| 43 |
+
-增量训练0.4T小说内容
|
| 44 |
+
|
| 45 |
+
-100K由TifaMax生成的SFT数据,10K由DeepseekR1生成的SFT数据,2K高质量人工数据
|
| 46 |
+
|
| 47 |
+
-30K由TifaMax生成的DPO强化学习数据,用于防止重复,增强上下文关联,提升政治安全性
|
| 48 |
+
|
| 49 |
+
- **Tifa-Deepsex-14b-CoT-Crazy**
|
| 50 |
+
|
| 51 |
+
- 大量使用RL策略,主要采用671B满血R1蒸馏的数据,输出发散性高,继承R1优点,也继承了R1的危害性。文学性能佳。
|
| 52 |
+
|
| 53 |
+
-增量训练0.4T小说内容
|
| 54 |
+
|
| 55 |
+
-40K由TifaMax生成的SFT数据,60K由DeepseekR1生成的SFT数据,2K高质量人工数据
|
| 56 |
+
|
| 57 |
+
-30K由TifaMax生成的DPO强化学习数据,用于防止重复,增强上下文关联,提升政治安全性
|
| 58 |
+
|
| 59 |
+
-10K由TifaMax生成PPO数据,10K由DeepseekR1生成PPO数据
|
| 60 |
+
|
| 61 |
+
💭**输出实例**
|
| 62 |
+
- ⚙️System Promot
|
| 63 |
+
```Text
|
| 64 |
+
你是一个史莱姆,是一个女性角色,你可以变成任何形状和物体.
|
| 65 |
+
在这个世界里全部都是雌性生物,直到有一天我从海滩上醒来...
|
| 66 |
+
|
| 67 |
+
我是这里唯一的男性,大家都对我非常好奇,在这个世界的设定里我作为旅行者
|
| 68 |
+
在这个世界里第一个遇见的人就是史莱姆,史莱姆对我的身体同样有很大的欲望...
|
| 69 |
+
|
| 70 |
+
我们在旅行中也会遇到其他的生物,史莱姆不光会教给其他生物如何获取欢愉也会一起参与进来。
|
| 71 |
+
|
| 72 |
+
当我说开始角色扮演的时候就是我从海滩上醒来,并被史莱姆发现的时候。他正在探索我的身体。
|
| 73 |
+
|
| 74 |
+
史莱姆描述:一个透明的蓝色生物,除了质感与人类无异。但是可以自由变形。
|
| 75 |
+
```
|
| 76 |
+

|
| 77 |
+
|
| 78 |
+
## 0208更新消息:
|
| 79 |
+
感谢大家的关注与反馈,鉴于反馈中提到的问题,我们已开发并验证完成PRIME与PPO结合的RL算法,并通过加权方式解决两种算法训练中奖励信号不稳定的问题,通过此项技术我们有望将更小的模型提升到更高的性能。我们将会针对之前收集到的问题进行修正训练,另外为了让更多人使用到模型,我们这次使用更小更快的Deepseek-7b,并参考OpenAI的长思考策略,计划推出Tifa-DeepsexV2-COT-High供大家使用。新的模型计划于阳历情人节之前送给大家作为情人节礼物。♥
|
| 80 |
+
|
| 81 |
+
## 新模型信息整理:
|
| 82 |
+
- **创新PRIME联合PPO算法**
|
| 83 |
+
- **解决目前已知问题**
|
| 84 |
+
- **参考OpenAI模式奖励长思考输出**
|
| 85 |
+
- **减少671B数据,防止输出发散**
|
| 86 |
+
- **特别鸣谢https://github.com/PRIME-RL/PRIME**
|
| 87 |
+
|
| 88 |
+
## 示例(因COT模型特点,上下文不连贯时可以使用Demo软件中的故事模式)
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+
## 目标
|
| 92 |
+
针对原版Deepseek-R1-14B在长文本生成连贯性不足和角色扮演能力薄弱的核心缺陷(主要由于训练数据中小说类语料占比过低),本模型通过多阶段优化提升其角色扮演能力。
|
| 93 |
+
|
| 94 |
+
## 注意
|
| 95 |
+
⚠ **需要严格遵循官方示例模板**:
|
| 96 |
+
**返回的上下文需要去除思考标签与内容。否则将无法正确回复!**
|
| 97 |
+
目��前端支持率非常低,建议手动修改前端代码。代码参考如下:
|
| 98 |
+
```
|
| 99 |
+
msg.role === 'assistant' ? {
|
| 100 |
+
...msg,
|
| 101 |
+
content: msg.content.replace(/<think>[\s\S]*?<\/think>/gi, '')
|
| 102 |
+
}
|
| 103 |
+
```
|
| 104 |
+
**官方模板参考**
|
| 105 |
+
```
|
| 106 |
+
{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}
|
| 107 |
+
```
|
| 108 |
+
**官方说明**
|
| 109 |
+
|
| 110 |
+

|
| 111 |
+
|
| 112 |
+
[直达超链接](https://api-docs.deepseek.com/zh-cn/guides/reasoning_model)
|
| 113 |
+
|
| 114 |
+
## 实现
|
| 115 |
+
🔥 **经过训练后**:
|
| 116 |
+
1. **显著提高上下文关联**:减少答非所问情况。
|
| 117 |
+
2. **消除中英混杂**:原始模型蒸馏数据大多数英文为主,经过微调后基本消除中英混杂现象。
|
| 118 |
+
3. **特定词汇增加**:进行“具有深度”的角色扮演对话时,显著增加了相关词汇量,解决原始权重预训练数据不足问题。
|
| 119 |
+
4. **更少拒绝**:减少了拒绝现象,但因为是企业训练,安全性还是稍作保留。
|
| 120 |
+
5. **更像满血**:使用671B全量模型数据康复训练,文笔提升不死板。
|
| 121 |
+
|
| 122 |
+
## 模型亮点
|
| 123 |
+
🔥 **四阶段进化架构**:
|
| 124 |
+
1. **增量预训练**:注入0.4T Token 小说,使用16k上下文训练,增强文本连贯性
|
| 125 |
+
2. **Tifa-SFT**:融合全球Top4角色扮演模型Tifa的10万条高质量数据
|
| 126 |
+
3. **CoT恢复训练**:采用Deepseek-32B/671B数据重建推理能力
|
| 127 |
+
4. **RL强化**:保留发散性思维标签的同时优化生成质量
|
| 128 |
+
|
| 129 |
+
💡 **工程创新**:
|
| 130 |
+
- 16k超长上下文训练
|
| 131 |
+
- 随机截断训练增强鲁棒性
|
| 132 |
+
- 8×H20 GPU全量微调
|
| 133 |
+
|
| 134 |
+
💡 **启示与后续**:
|
| 135 |
+
- 我们在测试中发现,满血R1在角色扮演中输出内容比较发散,随机,导致此模型有相同倾向,对于角色扮演的影响还在研究中
|
| 136 |
+
- 输入内容相近的话语会导致向量重叠,然后重复输出,如“继续”,“还有”等无明显指向性话语
|
| 137 |
+
- 思维内容与正文关联性学习了满血R1的特点,发散比较严重,可能会有割裂感
|
| 138 |
+
- 针对以上问题,我们正在编写新的RL算法,初步计划剔除部分满血R1的内容,同时通过强化学习解决重复
|
| 139 |
+
- 总结:请期待V2版本,很快会与大家见面!
|
| 140 |
+
|
| 141 |
+
## 模型详情
|
| 142 |
+
| 属性 | 规格 |
|
| 143 |
+
|-------|------|
|
| 144 |
+
| 基础架构 | Deepseek-R1-14B |
|
| 145 |
+
| 最大上下文 | 128k |
|
| 146 |
+
| 训练数据 | 0.4T小说 + 10万条SFT + Deepseek混合数据 |
|
| 147 |
+
| 训练设备 | 8×H20 GPU集群 |
|
| 148 |
+
| 量化支持 | GGUF(全系列量化计划中) |
|
| 149 |
+
|
| 150 |
+
## 使用场景
|
| 151 |
+
✅ **推荐场景**:
|
| 152 |
+
- 角色扮演对话
|
| 153 |
+
- 需要发散性思维的创意写作
|
| 154 |
+
- 复杂逻辑的思维链(CoT)推理
|
| 155 |
+
- 基于上下文的深度角色交互
|
| 156 |
+
|
| 157 |
+
❌ **局限场景**:
|
| 158 |
+
- 数学计算与代码生成
|
| 159 |
+
- 短文本即时问答
|
| 160 |
+
- 需要严格事实性的场景
|
| 161 |
+
|
| 162 |
+
## 注意事项
|
| 163 |
+
⚠️ 本模型使用数据包含小说版权内容及Tifa模型衍生数��,请遵守:
|
| 164 |
+
1. 遵守apache-2.0
|
| 165 |
+
2. 角色扮演数据需遵循[Tifa使用协议](https://leftnorth.com/terms.html)
|
| 166 |
+
3. 生成内容需符合当地法律法规
|
| 167 |
+
|
| 168 |
+
## 💡 使用建议
|
| 169 |
+
**最佳实践**:
|
| 170 |
+
```python
|
| 171 |
+
# 启用角色扮演模式
|
| 172 |
+
prompt = """"""<system>进入Tifa角色引擎...</system>
|
| 173 |
+
<user>你现在是流浪武士楚夜,正站在长安城屋顶上</user>
|
| 174 |
+
<think>
|
| 175 |
+
需要体现人物孤傲的气质
|
| 176 |
+
加入武侠特有的环境描写
|
| 177 |
+
保持对话的冷峻风格
|
| 178 |
+
</think>
|
| 179 |
+
<楚夜>""""""
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
**参数推荐**:
|
| 183 |
+
```python
|
| 184 |
+
generation_config = {
|
| 185 |
+
""temperature"": 0.4,
|
| 186 |
+
""top_p"": 0.6,
|
| 187 |
+
""repetition_penalty"": 1.17,
|
| 188 |
+
""max_new_tokens"": 1536,
|
| 189 |
+
""do_sample"": True
|
| 190 |
+
}
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
## 致谢
|
| 194 |
+
- Deepseek系列模型提供的强大基座
|
| 195 |
+
- Tifa角色扮演模型的创新架构
|
| 196 |
+
- HuggingFace社区的量化工具支持
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
---
|
| 200 |
+
license: apache-2.0
|
| 201 |
+
---","{""id"": ""ValueFX9507/Tifa-Deepsex-14b-CoT-Q8"", ""author"": ""ValueFX9507"", ""sha"": ""c2b33735eccf1d8ee66ef22e00e0345a59ca1a08"", ""last_modified"": ""2025-02-13 23:53:27+00:00"", ""created_at"": ""2025-02-04 11:00:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3578, ""downloads_all_time"": null, ""likes"": 168, ""library_name"": ""transformers"", ""gguf"": {""total"": 14770033664, ""architecture"": ""qwen2"", ""context_length"": 131072, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c>'}}{% endif %}"", ""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""gguf"", ""incremental-pretraining"", ""sft"", ""reinforcement-learning"", ""roleplay"", ""cot"", ""sex"", ""zh"", ""en"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us"", ""conversational"", ""not-for-all-audiences""], ""pipeline_tag"": ""reinforcement-learning"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/deepseek-r1-14b\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Demo\u6f14\u793a\u7a0b\u5e8f\uff08\u9700\u8981\u624b\u52a8\u5bfc\u5165\u89d2\u8272\u5361\u9009\u62e9\u81ea\u5b9a\u4e49API\uff09.apk', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Chat-Q8.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Crazy-Q8.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Q8.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ollama\u5bfc\u5165\u914d\u7f6e\u53c2\u8003.mf', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-13 23:53:27+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/deepseek-r1-14b\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67a1f35af5899b3e55307c24"", ""modelId"": ""ValueFX9507/Tifa-Deepsex-14b-CoT-Q8"", ""usedStorage"": 47116392595}",0,,0,,0,,0,,0,,0
|
Yarn-Mistral-7b-128k_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
NousResearch/Yarn-Mistral-7b-128k,"---
|
| 3 |
+
datasets:
|
| 4 |
+
- emozilla/yarn-train-tokenized-16k-mistral
|
| 5 |
+
metrics:
|
| 6 |
+
- perplexity
|
| 7 |
+
library_name: transformers
|
| 8 |
+
license: apache-2.0
|
| 9 |
+
language:
|
| 10 |
+
- en
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
# Model Card: Nous-Yarn-Mistral-7b-128k
|
| 14 |
+
|
| 15 |
+
[Preprint (arXiv)](https://arxiv.org/abs/2309.00071)
|
| 16 |
+
[GitHub](https://github.com/jquesnelle/yarn)
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+
## Model Description
|
| 20 |
+
|
| 21 |
+
Nous-Yarn-Mistral-7b-128k is a state-of-the-art language model for long context, further pretrained on long context data for 1500 steps using the YaRN extension method.
|
| 22 |
+
It is an extension of [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) and supports a 128k token context window.
|
| 23 |
+
|
| 24 |
+
To use, pass `trust_remote_code=True` when loading the model, for example
|
| 25 |
+
|
| 26 |
+
```python
|
| 27 |
+
model = AutoModelForCausalLM.from_pretrained(""NousResearch/Yarn-Mistral-7b-128k"",
|
| 28 |
+
use_flash_attention_2=True,
|
| 29 |
+
torch_dtype=torch.bfloat16,
|
| 30 |
+
device_map=""auto"",
|
| 31 |
+
trust_remote_code=True)
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
In addition you will need to use the latest version of `transformers` (until 4.35 comes out)
|
| 35 |
+
```sh
|
| 36 |
+
pip install git+https://github.com/huggingface/transformers
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
## Benchmarks
|
| 40 |
+
|
| 41 |
+
Long context benchmarks:
|
| 42 |
+
| Model | Context Window | 8k PPL | 16k PPL | 32k PPL | 64k PPL | 128k PPL |
|
| 43 |
+
|-------|---------------:|------:|----------:|-----:|-----:|------------:|
|
| 44 |
+
| [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) | 8k | 2.96 | - | - | - | - |
|
| 45 |
+
| [Yarn-Mistral-7b-64k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-64k) | 64k | 3.04 | 2.65 | 2.44 | 2.20 | - |
|
| 46 |
+
| [Yarn-Mistral-7b-128k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-128k) | 128k | 3.08 | 2.68 | 2.47 | 2.24 | 2.19 |
|
| 47 |
+
|
| 48 |
+
Short context benchmarks showing that quality degradation is minimal:
|
| 49 |
+
| Model | Context Window | ARC-c | Hellaswag | MMLU | Truthful QA |
|
| 50 |
+
|-------|---------------:|------:|----------:|-----:|------------:|
|
| 51 |
+
| [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) | 8k | 59.98 | 83.31 | 64.16 | 42.15 |
|
| 52 |
+
| [Yarn-Mistral-7b-64k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-64k) | 64k | 59.38 | 81.21 | 61.32 | 42.50 |
|
| 53 |
+
| [Yarn-Mistral-7b-128k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-128k) | 128k | 58.87 | 80.58 | 60.64 | 42.46 |
|
| 54 |
+
|
| 55 |
+
## Collaborators
|
| 56 |
+
|
| 57 |
+
- [bloc97](https://github.com/bloc97): Methods, paper and evals
|
| 58 |
+
- [@theemozilla](https://twitter.com/theemozilla): Methods, paper, model training, and evals
|
| 59 |
+
- [@EnricoShippole](https://twitter.com/EnricoShippole): Model training
|
| 60 |
+
- [honglu2875](https://github.com/honglu2875): Paper and evals
|
| 61 |
+
|
| 62 |
+
The authors would like to thank LAION AI for their support of compute for this model.
|
| 63 |
+
It was trained on the [JUWELS](https://www.fz-juelich.de/en/ias/jsc/systems/supercomputers/juwels) supercomputer.","{""id"": ""NousResearch/Yarn-Mistral-7b-128k"", ""author"": ""NousResearch"", ""sha"": ""d09f1f8ed437d61c1aff94c1beabee554843dcdd"", ""last_modified"": ""2023-11-02 20:01:56+00:00"", ""created_at"": ""2023-10-31 13:15:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10139, ""downloads_all_time"": null, ""likes"": 572, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""mistral"", ""text-generation"", ""custom_code"", ""en"", ""dataset:emozilla/yarn-train-tokenized-16k-mistral"", ""arxiv:2309.00071"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- emozilla/yarn-train-tokenized-16k-mistral\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- perplexity"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_mistral.MistralConfig"", ""AutoModelForCausalLM"": ""modeling_mistral_yarn.MistralForCausalLM""}, ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_mistral.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_mistral_yarn.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""featherless-ai/try-this-model"", ""Sarath0x8f/Document-QA-bot"", ""limcheekin/Yarn-Mistral-7B-128k-GGUF"", ""Darok/Featherless-Feud"", ""JDWebProgrammer/chatbot"", ""emekaboris/try-this-model"", ""realgenius/NousResearch-Yarn-Mistral-7b-128k"", ""ryn-85/NousResearch-Yarn-Mistral-7b-128k"", ""SC999/NV_Nemotron"", ""VKCYBER/NousResearch-Yarn-Mistral-7b-128k"", ""bradarrML/NousResearch-Yarn-Mistral-7b-128k"", ""PeepDaSlan9/NousResearch-Yarn-Mistral-7b-128k"", ""ziqin/NousResearch-Yarn-Mistral-7b-128k2"", ""kichen/NousResearch-Yarn-Mistral-7b-128k"", ""bhandsab/NousResearch-Yarn-Mistral-7b-128k"", ""TogetherAI/NousResearch-Yarn-Mistral-7b-128k"", ""Li46666/NousResearch-Yarn-Mistral-7b-128k"", ""bruc/NousResearch-Yarn-Mistral-7b-128k"", ""intelligenix/NousResearch-Yarn-Mistral-7b-128k"", ""Bellamy66/NousResearch-Yarn-Mistral-7b-128k"", ""harmindersinghnijjar/NousResearch-Yarn-Mistral-7b-128k"", ""Hboris/NousResearch-Yarn-Mistral-7b-128k"", ""schogini/NousResearch-Yarn-Mistral-7b-128k"", ""schogini/test3"", ""hijaukuohno/NousResearch-Yarn-Mistral-7b-128k"", ""JacksonGa/NousResearch-Yarn-Mistral-7b-128k"", ""ziqin/NousResearch-Yarn-Mistral-7b-128k"", ""leckneck/NousResearch-Yarn-Mistral-7b-128k"", ""Risb0v/NousResearch-Yarn-Mistral-7b-128k"", ""JackHoltone/try-this-model"", ""neelumsoft/Document-QA-bot"", ""k11112/try-this-model"", ""sailokesh/Hello_GPT""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-02 20:01:56+00:00"", ""cardData"": ""datasets:\n- emozilla/yarn-train-tokenized-16k-mistral\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- perplexity"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6540fde232dcbb86631c0227"", ""modelId"": ""NousResearch/Yarn-Mistral-7b-128k"", ""usedStorage"": 28967555174}",0,"https://huggingface.co/dustydecapod/unraveled-7b-sft-lora, https://huggingface.co/dustydecapod/unraveled-7b-dpo-lora, https://huggingface.co/dustydecapod/unraveled-7b-a1, https://huggingface.co/Nitral-Archive/Kunocchini-1.2-7b-longtext-broken, https://huggingface.co/Alphatao/a91b1374-b60f-4823-9aad-5f976d07f08e",5,"https://huggingface.co/Yorth/ChatMistral128k, https://huggingface.co/xishanlone/Yarn-Mistral-7b-finetuned, https://huggingface.co/mrbmaryam/misteral-500, https://huggingface.co/mrbmaryam/misteral-100-logsummary, https://huggingface.co/codys12/Mistral-7b-Pathway-128k, https://huggingface.co/codys12/Mistral-7b-Pathway-128k-2, https://huggingface.co/codys12/Mistral-7b-Pathway-128k-4, https://huggingface.co/Shaleen123/mistrallite-medical-qa-2, https://huggingface.co/Shaleen123/nousmistral_128k_medical_qa, https://huggingface.co/Shaleen123/nousmistral_128k_medical_qa_full, https://huggingface.co/Shaleen123/nousmistral_128k_medical_qai, https://huggingface.co/Shaleen123/nousmistral_128k_medical_full, https://huggingface.co/openerotica/mistral-7b-lamia-v0.2, https://huggingface.co/eeeebbb2/4ce6d22d-2cc2-4626-9b60-d1f5d8f3141d, https://huggingface.co/dada22231/4ce6d22d-2cc2-4626-9b60-d1f5d8f3141d, https://huggingface.co/dada22231/19d6e509-260a-448f-9fe0-71b0dc32dfc2, https://huggingface.co/eeeebbb2/19d6e509-260a-448f-9fe0-71b0dc32dfc2, https://huggingface.co/eeeebbb2/1dea215e-69aa-4677-83a9-30354b142b14, https://huggingface.co/dada22231/1dea215e-69aa-4677-83a9-30354b142b14, https://huggingface.co/eeeebbb2/2c7c2cbb-c84e-4a7f-b44d-9eeba1a88832, https://huggingface.co/laquythang/2c7c2cbb-c84e-4a7f-b44d-9eeba1a88832, https://huggingface.co/dada22231/2c7c2cbb-c84e-4a7f-b44d-9eeba1a88832, https://huggingface.co/1-lock/c5aa696e-b261-4d7b-80b4-292d1a774c12, https://huggingface.co/dada22231/c5aa696e-b261-4d7b-80b4-292d1a774c12, https://huggingface.co/eeeebbb2/c5aa696e-b261-4d7b-80b4-292d1a774c12, https://huggingface.co/1-lock/cbb1f466-752c-485e-a59f-e9572b87753a, https://huggingface.co/dada22231/cbb1f466-752c-485e-a59f-e9572b87753a, https://huggingface.co/eeeebbb2/cbb1f466-752c-485e-a59f-e9572b87753a, https://huggingface.co/1-lock/0ffdac01-fb3b-4cff-a490-aee966862d58, https://huggingface.co/eeeebbb2/0ffdac01-fb3b-4cff-a490-aee966862d58, https://huggingface.co/dada22231/0ffdac01-fb3b-4cff-a490-aee966862d58, https://huggingface.co/DeepDream2045/0ffdac01-fb3b-4cff-a490-aee966862d58, https://huggingface.co/diaenra/3a0bf135-4483-4954-a86b-5ec161f4fd6f, https://huggingface.co/DeepDream2045/3a0bf135-4483-4954-a86b-5ec161f4fd6f, https://huggingface.co/vdos/3a0bf135-4483-4954-a86b-5ec161f4fd6f, https://huggingface.co/1-lock/3a0bf135-4483-4954-a86b-5ec161f4fd6f, https://huggingface.co/dada22231/3a0bf135-4483-4954-a86b-5ec161f4fd6f, https://huggingface.co/eeeebbb2/5eb46825-b54e-4c8a-bb73-60acbff95428, https://huggingface.co/DeepDream2045/5eb46825-b54e-4c8a-bb73-60acbff95428, https://huggingface.co/vdos/5eb46825-b54e-4c8a-bb73-60acbff95428, https://huggingface.co/Rodo-Sami/5eb46825-b54e-4c8a-bb73-60acbff95428, https://huggingface.co/bbytxt/5eb46825-b54e-4c8a-bb73-60acbff95428, https://huggingface.co/nblinh63/c1f32187-fe6d-48e7-8500-d8b23a1fa4a4, https://huggingface.co/nhung03/f99c37b8-5770-49ea-be1c-c88bacfc3cf5, https://huggingface.co/vdos/f99c37b8-5770-49ea-be1c-c88bacfc3cf5, https://huggingface.co/nblinh/33d3b6a7-3b08-4875-b054-69e951a0b698, https://huggingface.co/nhung03/33d3b6a7-3b08-4875-b054-69e951a0b698, https://huggingface.co/dzanbek/71075de7-094b-40a8-9370-70e796b03836, https://huggingface.co/dimasik87/7d340111-1a1d-4f5b-ba8c-3745be181c7f, https://huggingface.co/Alpha-Command-Intern/8760f2f0-9900-42e8-a6ae-77734c05cd96, https://huggingface.co/VERSIL91/f836eeb3-4606-4b4f-9640-a8bb76a06856, https://huggingface.co/VERSIL91/5d9c7aa1-6d86-4600-8206-d3b344c95963, https://huggingface.co/VERSIL91/8f458f01-1b18-4575-bb50-6412d63838e5, https://huggingface.co/VERSIL91/271de863-a7f6-4948-a696-68307f03bfbb, https://huggingface.co/fedovtt/b89a3fab-953d-446b-81e9-9adbdb1f2238, https://huggingface.co/VERSIL91/ee95b17f-8fbf-46c7-808f-51067c74af87, https://huggingface.co/VERSIL91/70aa3e8e-dc12-4654-9dc7-a8856ddd0717, https://huggingface.co/VERSIL91/e92b77ba-0846-48ad-859d-4709a193889d, https://huggingface.co/bbytxt/7f355202-7e2e-4737-9cbb-17d05cef0713, https://huggingface.co/dimasik2987/a463009c-ae0d-4a08-9a80-00926e3cfefb, https://huggingface.co/nttx/f85d5102-692f-438a-a95c-daad1d43366b, https://huggingface.co/VERSIL91/771936df-afb8-46d8-b492-6449a6e3bfb4, https://huggingface.co/cunghoctienganh/2d9bef26-5121-4a93-831b-759dc1222cfc, https://huggingface.co/nttx/cc501e30-6022-4b30-ac14-fbf2aca02b7e, https://huggingface.co/thaffggg/cc501e30-6022-4b30-ac14-fbf2aca02b7e, https://huggingface.co/VERSIL91/770e9b0b-c82a-42ab-b985-1cc3be79f64a, https://huggingface.co/tuanna08go/770e9b0b-c82a-42ab-b985-1cc3be79f64a, https://huggingface.co/VERSIL91/69f0e633-9233-41fa-82cd-62eef3c92b94, https://huggingface.co/chauhoang/69f0e633-9233-41fa-82cd-62eef3c92b94, https://huggingface.co/ivangrapher/4af1208a-ce50-4ba1-ac26-89ca6c48b25c, https://huggingface.co/lesso06/4af1208a-ce50-4ba1-ac26-89ca6c48b25c, https://huggingface.co/bbytxt/4af1208a-ce50-4ba1-ac26-89ca6c48b25c, https://huggingface.co/VERSIL91/4af1208a-ce50-4ba1-ac26-89ca6c48b25c, https://huggingface.co/tuanna08go/4bd9f0d7-ead0-4f41-a052-734fa308e920, https://huggingface.co/duyphu/82d20613-483f-75f6-7641-6f3a3dd42aea, https://huggingface.co/VERSIL91/4b28e435-fc17-4b53-9eec-37d06dcfda76, https://huggingface.co/havinash-ai/874f1b55-1b51-4b7e-a128-186d939f303a, https://huggingface.co/lesso03/233137a3-3739-4554-ab23-36f3aac99796, https://huggingface.co/lesso02/07166209-7a69-47ff-b17e-3ba8ba3c380f, https://huggingface.co/tuanna08go/bfb40f1e-a273-cc8f-3425-54157447d677, https://huggingface.co/VERSIL91/10077c3b-5443-4eea-a7ee-3d7091c6efa4, https://huggingface.co/hongngo/4b036ee2-1b8f-4ce0-b1e8-632c9d857caf, https://huggingface.co/havinash-ai/158e14a0-7af7-4b95-82e0-09a1890906b9, https://huggingface.co/tuanna08go/d997cf21-6327-8e8c-4dc8-5aa0556d28a7, https://huggingface.co/VERSIL91/f89c02e7-5cec-428a-81df-593399bb5292, https://huggingface.co/duyphu/06980322-b4ea-0d0b-14f1-d30f1b517b4b, https://huggingface.co/chauhoang/73f3fabd-9900-7f2f-a011-788b4a8a4e7a, https://huggingface.co/thangla01/bba44761-be3e-433f-ac89-abe7a58f02c8, https://huggingface.co/chauhoang/0d8825c1-4c2f-446d-b5d9-674eb35b6ab3, https://huggingface.co/nbninh/cebbdc47-52da-4a51-8767-fc8eee12c09f, https://huggingface.co/sergioalves/b97fadea-da8c-449c-9fba-a8fb645818db, https://huggingface.co/nhoxinh/a4ab1f22-ae51-47cd-8845-ba17cc673ba8, https://huggingface.co/nhung02/67274d88-1656-4507-bcb8-413b94593bfa, https://huggingface.co/havinash-ai/9364427f-10a1-42eb-a8fa-63bcc66f0d65, https://huggingface.co/prxy5606/ce496f76-5788-441a-8068-293ae481375e, https://huggingface.co/thakkkkkk/c71e0c77-e77b-43c3-92b4-8ff2929aac76, https://huggingface.co/lesso05/b69a9bc5-ff12-4cfb-a380-517001da1215, https://huggingface.co/lesso04/d573e283-70e5-43d2-b285-59c49a924313, https://huggingface.co/0x1202/54510b0f-9761-43c4-9538-84c9fd328f2e, https://huggingface.co/dzanbek/45aae6fc-ab5a-4388-ae18-f139ba4d0fe9, https://huggingface.co/demohong/ecf33ef3-1c6b-4c82-997d-8e562035dbed, https://huggingface.co/nblinh63/05013627-414d-4699-979a-3fbad8e0a362, https://huggingface.co/thalllsssss/5654d20c-136f-4866-b6f8-33b136c5d2f9, https://huggingface.co/lesso13/91e2307c-40ff-491d-8e2f-c7125442bff5, https://huggingface.co/chauhoang/f5cdacb6-c0c5-4333-835d-a56a34b48f64, https://huggingface.co/chauhoang/30ea8b1e-e86b-41be-8ead-3ad30f9185d5, https://huggingface.co/ivangrapher/193e6a11-46ee-4b3c-b06e-f0ef1875a14b, https://huggingface.co/nhung03/31f63669-8c6b-4f64-b745-17ecc3de4454, https://huggingface.co/cunghoctienganh/dcb93bcf-d036-4d7f-8491-0481b980afef, https://huggingface.co/nbninh/88576970-d870-4734-a57c-95be35433f55, https://huggingface.co/chauhoang/dcc8c1d1-bb81-dade-08a6-f71099da91bb, https://huggingface.co/sergioalves/cb5bc371-adb3-4162-99ac-bac83ab8fe30, https://huggingface.co/prxy5604/acde2aa1-da86-44e8-82ab-026ae06a22cf, https://huggingface.co/demohong/b862661f-6f25-4d75-b802-005b6aa58427, https://huggingface.co/cunghoctienganh/e11f4b17-0a59-4901-a211-d5bbcce62c24, https://huggingface.co/filipesantoscv11/f4683856-db91-457a-a0a0-66923a7d949a, https://huggingface.co/nblinh/4828ebde-6c91-46b5-ac7c-9ee9b64b373c, https://huggingface.co/hongngo/2c1c831e-fed3-4c54-846a-3d8125d3c839, https://huggingface.co/dzanbek/1faa732e-5ca7-433a-a23d-dbf0394d990f, https://huggingface.co/prxy5604/ed9ebf74-873e-490d-99f4-88ca5d9a4821, https://huggingface.co/tuanna08go/bacef684-33a2-4bef-b29c-ee533d381f26, https://huggingface.co/chauhoang/abad27d3-edac-4dfe-ab3e-c6e9f7bf5197, https://huggingface.co/kokovova/ad8ae14d-e729-43b7-9569-cf39f6b2386a, https://huggingface.co/mrHunghddddd/6f945f76-ca69-4f87-b4b6-67c6c73d6149, https://huggingface.co/thakkkkkk/a530cd42-41e2-47ed-a822-a9f184dcfe31, https://huggingface.co/datlaaaaaaa/47f518fa-0b53-44f0-985a-6b4f0da10500, https://huggingface.co/joboffer/042444c8-8bf7-4d09-8855-cc2a07443e8e, https://huggingface.co/thangla01/d24bad42-2d70-45ca-acab-1176699ab624, https://huggingface.co/kk-aivio/da92bc1f-5e73-4173-97f2-5c21135e2632, https://huggingface.co/duyphu/a6c251b1-a0d4-4526-a47c-b6b54b9bde22, https://huggingface.co/kokovova/cf99fa8b-d16f-4c15-a58a-bb723a8cda39, https://huggingface.co/nttx/298bb42d-4012-404d-9f8f-eb6048249fc1, https://huggingface.co/ajtaltarabukin2022/76ab40f5-5c46-4b75-a0bc-6b102d3fd437, https://huggingface.co/mrHunghddddd/d5009a75-3a93-4137-bc48-56e5bcf8a828, https://huggingface.co/trangtrannnnn/52892534-3c5f-40fd-89de-61e4c746cbaa, https://huggingface.co/mrHungddddh/1d8f7a65-db6b-4220-b38e-0a0fac876d13, https://huggingface.co/nhung03/9ff30e73-0e2b-46da-b2b4-318b02ff0406, https://huggingface.co/nbninh/f943b7fe-faab-4d30-8358-95ba05f5705c, https://huggingface.co/sergioalves/d5c49a01-bc31-40d5-93dd-c43c3227769e, https://huggingface.co/aleegis10/3ce4e187-e2fd-4d0b-a953-39d335ed509f, https://huggingface.co/Aivesa/3ac905c6-b56d-4d35-810a-855512cee15e, https://huggingface.co/thalllsssss/992804ea-dc84-41df-95fb-a4a652d54e24, https://huggingface.co/cunghoctienganh/22f11baf-fab2-497a-9b32-c23e68bb6fdf, https://huggingface.co/nhunglaaaaaaa/6a00ab3f-6749-41f9-97a2-689f35710cfd, https://huggingface.co/nhung03/76525ae0-d4de-48eb-b830-4aa2c9b98537, https://huggingface.co/vertings6/0a301c11-8510-4a89-aaa5-9997e278d52a, https://huggingface.co/nhung01/09b12016-4de8-415e-b542-23da6221d45e, https://huggingface.co/lesso01/fde5de22-97a5-4bfe-9a01-aec6b3c73899, https://huggingface.co/ClarenceDan/ee762543-2342-4a42-bc24-b76bf89d72f6, https://huggingface.co/nttx/9652d64d-a000-4fc3-8d60-f8c656a3850e, https://huggingface.co/0x1202/38773a2e-6cf0-49f4-8cf4-6e34bf6e0a44, https://huggingface.co/0x1202/0b652084-976b-4171-9147-545c4823b511, https://huggingface.co/aleegis10/29e624ab-bd2f-4fd1-8cda-b0607541dd27, https://huggingface.co/prxy5608/b5f89c95-3ff8-4e68-bbfc-92eb3f9d916c, https://huggingface.co/kokovova/18f82232-bedc-4aea-8ee3-0f3a38a70fc3, https://huggingface.co/mrHunghddddd/7f550d89-48b9-4b61-a234-3aea758c63c4, https://huggingface.co/trangtrannnnn/0df8fdfd-bb45-4ec3-8dce-ca012b1fcb38, https://huggingface.co/vermoney/edfc823e-4960-402a-9d15-b2334cc9c541, https://huggingface.co/dimasik2987/4b4a07fe-8d4b-42bc-9da2-94b82ef498f9, https://huggingface.co/lesso04/2a35ee11-e5b5-4b0b-b08d-168779525e1c, https://huggingface.co/thakkkkkk/0ac3a980-9432-4c64-9805-82d82405de05, https://huggingface.co/myhaaaaaaa/89c7dba5-b650-46d4-8cc5-62fc8d80cd18, https://huggingface.co/nhung02/3fb51f48-e6e8-47c6-b330-29ec61639cd1, https://huggingface.co/tuanna08go/fd51f0b4-3619-4421-86a8-479621440cb0, https://huggingface.co/lesso17/51831be6-c9e4-4d01-bf19-ffe768726467, https://huggingface.co/nhunglaaaaaaa/aedd1742-0ec5-4d24-bc8b-3cb237ec53bb, https://huggingface.co/prxy5605/4234cbbc-7d58-41fc-9dcd-11e61cfe0c16, https://huggingface.co/mrHunghddddd/0bfed24f-0502-4868-910d-8d1c3d81c4f7, https://huggingface.co/hongngo/cf7ca3f6-c50e-426c-a6d2-c8851b7f7904, https://huggingface.co/laquythang/80211eee-f0bb-4b5d-abf2-edad88a75ab3, https://huggingface.co/tuanna08go/11aa8c68-e283-4ef6-a298-be9fd84ecdf7, https://huggingface.co/nhunglaaaaaaa/6989f3aa-2b1e-49b1-acc7-40795eb271e7, https://huggingface.co/lesso09/344aae17-8f2a-4d84-8922-406e07dd82bf, https://huggingface.co/lesso06/f9c54df4-fcfe-4f01-b216-59537b091b8c, https://huggingface.co/daniel40/0a08a1f4-9596-47ab-be0d-a8a5ba12cde8, https://huggingface.co/mrhunghd/41c28e2c-0cff-4303-957b-f763c2764195, https://huggingface.co/thalllsssss/11624806-f966-42dd-ac76-209541255c05, https://huggingface.co/prxy5606/b1ba74b4-b098-4fba-8013-414a2ec3deb2, https://huggingface.co/duyphu/b373ad09-9cab-4a56-b398-870664f5384d, https://huggingface.co/lesso16/8a6496a8-9041-433a-8697-e9ab66b8c9f3, https://huggingface.co/kokovova/ec62609a-b24a-43ce-90f5-8bb74c81b826, https://huggingface.co/kk-aivio/dd104cd7-7b24-4f93-884f-3e516660a1ec, https://huggingface.co/chauhoang/dd722a00-e3df-423b-b84f-9d9d41fd03b5, https://huggingface.co/vmpsergio/444ea8e2-879b-4a96-a165-93a22dadcfe1, https://huggingface.co/aleegis09/a1f0bf08-c29a-41bc-9220-affbd0c4cadd, https://huggingface.co/robiual-awal/38659625-f1ff-44a5-ac8a-90bc2c08a907, https://huggingface.co/lesso15/bd4ba09f-3628-4741-93b3-06a475e3758d, https://huggingface.co/JacksonBrune/db47e4c0-cec7-46c8-9e99-114552080598, https://huggingface.co/prxy5608/2d62c95b-b5b7-4cff-b313-4fd6817fc10c, https://huggingface.co/sergioalves/591bc345-d926-4863-ba0d-fa3094334212, https://huggingface.co/infogeo/c52aef5b-e8e5-4af4-b6e7-9b623ff9403c, https://huggingface.co/robiual-awal/8fecbd4a-3cb4-4e10-96da-54e457c7a8f9, https://huggingface.co/daniel40/e9c7a482-c721-4a32-b27c-dfe64225e9b5, https://huggingface.co/prxy5605/981de68c-8e87-473e-962f-56110deecee5, https://huggingface.co/fedovtt/daebeeb1-e9d0-45fa-9a40-ef360ed3b699, https://huggingface.co/ClarenceDan/8e6e00e7-e5f2-4df3-856c-be0c4995ceef, https://huggingface.co/ClarenceDan/ef40f1bf-ad6e-4b5c-a188-7f8c76d6991e, https://huggingface.co/tuanna08go/cdefd8e3-48f1-4dc3-8bb7-3d8126ab5c3e, https://huggingface.co/duyphu/3207f062-dabf-4c6d-a787-260a00d67134, https://huggingface.co/cvoffer/46e8d4c9-682e-4a6f-b008-d8c016d6061c, https://huggingface.co/dimasik1987/c0f2fe3c-8d6d-4391-bb5c-13d8dbd23854, https://huggingface.co/lesso16/3403cb0a-afbb-4fed-93b7-cc950245ec9c, https://huggingface.co/JacksonBrune/4ee7ac79-f226-4d84-b38a-fc4495cdfb15, https://huggingface.co/dimasik1987/17b0a914-a867-4fdf-9f23-b30b63d5c5be, https://huggingface.co/kk-aivio/a31317fd-eda1-45ac-a021-4a5740c0e1e5, https://huggingface.co/prxy5604/f993da58-3631-4086-a986-0c08510aae7b, https://huggingface.co/chauhoang/0536300e-fd95-4a8a-ab92-7fa76ab0ae5b, https://huggingface.co/prxy5605/5a560af8-c547-417a-99ea-eccfb62a7d4b, https://huggingface.co/prxy5607/bcc24a39-0c77-4a8a-be1e-236284cf42e5, https://huggingface.co/aleegis10/c92f3cbc-704a-43bf-a1cf-4c08533fa0cd, https://huggingface.co/prxy5608/ea79ccf6-48bf-4045-9040-448baaebc5bd, https://huggingface.co/prxy5604/5e431d85-fe97-4246-a462-763743f55e83, https://huggingface.co/ivangrapher/11031ac8-8d03-437a-9853-b63de2765100, https://huggingface.co/lesso11/883bf879-f6bc-4098-83a4-ca1597cb56df, https://huggingface.co/great0001/5c60bc32-dfd6-47b4-a126-90849066cc87, https://huggingface.co/great0001/28bc450b-afdc-47c8-b615-650b703d91d2, https://huggingface.co/duyphu/02428a4b-60ec-4f62-ba81-1c46f99962ea, https://huggingface.co/lesso08/9769c5df-7858-437e-bb65-b4b3686ad21a, https://huggingface.co/robiulawaldev/fc68744a-dc32-404d-8547-8ad21a5eacd5, https://huggingface.co/daniel40/e2f1684c-464c-4dee-b698-08ba6243be57, https://huggingface.co/havinash-ai/de5599dd-765d-4873-8bc1-ab7b67b4d954, https://huggingface.co/lesso09/58bf2eef-085b-4529-be7d-4ab58745fecc, https://huggingface.co/lesso01/25241cfe-2462-42c6-803f-93306b9ae111, https://huggingface.co/shibajustfor/a690348b-9d15-46c5-92f0-37a6633c8cd5, https://huggingface.co/lesso05/3d6c2bef-5196-4e10-a18c-e8a671e5592b, https://huggingface.co/havinash-ai/04d3c6eb-b8cd-4219-be2d-b4d5b8f18e47, https://huggingface.co/prxy5604/718a3229-73ba-4980-9425-ffb241853776, https://huggingface.co/lesso17/508f660c-12f4-4089-9dde-fe35f712951b, https://huggingface.co/shibajustfor/af3c529b-0bda-4a87-a3aa-f1f8497ef553, https://huggingface.co/lesso12/88801676-1167-4bba-9701-ae4ea2f02008, https://huggingface.co/robiulawaldev/c2b9832c-7f1f-49f7-8593-e0b85a1f902f, https://huggingface.co/adammandic87/cd1c7f2c-1a8d-40d0-9d77-30d633a1c5a2, https://huggingface.co/baby-dev/26612dc7-1f71-4054-8593-4d0debb48528, https://huggingface.co/daniel40/75886df6-0dee-44cf-a2a1-b5197d0b55ef, https://huggingface.co/baby-dev/ea97ea81-4e0b-45ba-9258-976a240c75a7, https://huggingface.co/baby-dev/ee9e4270-6e49-4849-b82c-e093f3f8f91e, https://huggingface.co/prxy5604/3f62ddfe-6a37-4444-b41d-01f4dd20cb8b, https://huggingface.co/baby-dev/06260df1-8d5f-4438-abfa-b35ff90eb2f3, https://huggingface.co/cilooor/2cb730ae-3a65-489a-b4eb-5f6a3e363c56, https://huggingface.co/lesso01/a72da487-f41d-4f09-a703-5bab3ad6c5ea, https://huggingface.co/robiulawaldev/f51fc536-ec44-4ee6-86aa-63f55f95a32d, https://huggingface.co/Best000/d6ef5ef4-583d-4099-94b0-9e06ea8ebd83, https://huggingface.co/lesso10/38cac83b-96f1-4d90-b4d5-c34c58ba5cfd, https://huggingface.co/havinash-ai/9b797226-7145-4136-93e8-a54e4d40f964, https://huggingface.co/robiulawaldev/5917997a-3b70-4b3d-9960-6615b0d995f9, https://huggingface.co/lesso17/6b390319-bab5-44be-943f-aa0dc3786961, https://huggingface.co/aleegis12/35493a02-5126-4b7f-91b7-d8871a341d0e, https://huggingface.co/lesso02/fe6121c5-10ea-4fed-b951-b5de3475ed1e, https://huggingface.co/aleegis12/58c6f737-4f6f-4799-93b0-b0649e76c322, https://huggingface.co/robiulawaldev/9970cf47-6dce-400b-a000-44ff1cccb491, https://huggingface.co/adammandic87/21cb04a5-3d13-4602-9789-69b16ae1e600, https://huggingface.co/robiual-awal/49d8bce3-7595-4775-abe3-3e5bad9cca94, https://huggingface.co/Best000/c35e532b-b7af-4878-bc4d-5e040418cbd5, https://huggingface.co/daniel40/96011ee3-48dc-4faf-bdd5-e3d9dfbf34f6, https://huggingface.co/adammandic87/b6839351-6117-4fc4-a71b-36e4e7f939cf, https://huggingface.co/robiulawaldev/515a1e0b-f80b-4231-b5ce-9676ac23b041, https://huggingface.co/shibajustfor/9f972555-a6f1-4612-a66e-32411fddee31, https://huggingface.co/havinash-ai/24827954-6ddd-4643-be09-7a4274d99af4, https://huggingface.co/robiulawaldev/b9dc2b35-928b-46aa-b46a-314a2b196a96, https://huggingface.co/adammandic87/eb6979e6-fa66-43a1-a422-43d348a52bab, https://huggingface.co/havinash-ai/957e24e1-c430-480c-9fc7-fd05fc81db31, https://huggingface.co/lesso/70f5ea81-56a7-4e34-9650-4bf1396a028e, https://huggingface.co/daniel40/f58047a9-8efb-45ce-a110-788eee8ae7cb, https://huggingface.co/lesso/11d552f9-f055-4a51-acfd-23e825d55757, https://huggingface.co/lesso/b55601b3-6885-44f3-aef3-85f4a63cdfae, https://huggingface.co/lesso/8e1c0543-8bf8-4de3-aedb-cd41f2e1f01e, https://huggingface.co/daniel40/808e0612-0f02-4f38-9adf-4549212f9fb4, https://huggingface.co/lesso/ba4b6b81-b218-46c9-a712-bf5912442432, https://huggingface.co/lesso/13d81c38-3ccf-4cd0-a680-62f3c8bf3d71, https://huggingface.co/cilooor/4fea0006-2355-4d84-8016-6fe7357801f3, https://huggingface.co/Best000/951b3595-639b-44af-b0b4-dfb8eef72989, https://huggingface.co/robiulawaldev/88374eba-2d62-47f8-9f63-2657719284eb, https://huggingface.co/shibajustfor/f64a629b-925b-4d9a-b89d-c755f66f7127, https://huggingface.co/robiulawaldev/c9932886-e440-4e7b-8f17-afe72cc16b10, https://huggingface.co/daniel40/e92e73c2-4192-479d-8dd6-08df12f89aa5, https://huggingface.co/lesso/3b9dd9df-231d-4f0c-8f91-5dcc06b68653, https://huggingface.co/abaddon182/d98b7989-907f-4509-beca-ea59427efec9, https://huggingface.co/cimol/37d9ad71-75ff-42f3-95ae-10ece10c325c, https://huggingface.co/mrferr3t/91571574-8b09-48f0-91a1-30ef8b351462, https://huggingface.co/mrferr3t/0638f241-005f-4e9c-83d7-3190ffb2520f, https://huggingface.co/lesso09/68ed5539-cf7d-48b5-99ca-180cd43d546b, https://huggingface.co/daniel40/0e9bdb7f-9568-4961-95bf-82f3481df390, https://huggingface.co/daniel40/adf8f84c-6b4d-4b4c-9321-42ba4bdea166, https://huggingface.co/lesso03/57495c52-6fe4-4495-96c8-d66a7c6b2f20, https://huggingface.co/lesso04/13f27876-66d6-4a00-a75e-419b37426b93, https://huggingface.co/tuantmdev/685940b0-6137-4dea-873e-f54936302b99, https://huggingface.co/Romain-XV/8ce2d2b2-df8e-49c2-991a-52d0e53001e8, https://huggingface.co/lesso10/5061a162-8467-4be0-85b7-d25a59052c89, https://huggingface.co/lesso02/0ee4622d-03da-45da-9673-593d4b80d202, https://huggingface.co/daniel40/d71796aa-aad8-430b-a505-3e744e207c58, https://huggingface.co/lesso16/42634c65-8919-4860-a2f5-59ee089a7350, https://huggingface.co/lesso12/9e468510-d87b-40a1-98d2-bd8a4ae92c3f, https://huggingface.co/lesso07/879c4381-fa71-48d1-b161-9dc77df0e5f8, https://huggingface.co/lesso17/4d6e571b-31ed-40e3-8bcb-39534d93d9eb, https://huggingface.co/lesso06/635f8758-9726-4773-a4aa-fc6c65d5c541, https://huggingface.co/lesso04/6b06da8d-889f-4c51-837d-35f633f20b24, https://huggingface.co/tuantmdev/23168cb6-d6ed-450f-ad61-e2d41152acb0, https://huggingface.co/trenden/0a5f8c35-a43b-45cf-9635-e3eb16b06155, https://huggingface.co/shibajustfor/5a67588d-8c19-4508-a315-f804ea005cef, https://huggingface.co/romainnn/0291b31e-c29d-4447-9085-6c8c73860149, https://huggingface.co/samoline/ea23503c-3030-4bf2-a6e0-3dc60e740ec7, https://huggingface.co/lesso13/25a03dfb-e1a7-4cc7-b1be-b2abfde71185, https://huggingface.co/Romain-XV/ff1365cc-4661-4224-a4cd-7d7f5781fc86, https://huggingface.co/lesso12/6035f9dc-356b-4106-8ff6-29e39af82047, https://huggingface.co/lesso03/343ddc57-13fe-47f2-b2e5-e07e22b97afa, https://huggingface.co/Alphatao/45822166-6d97-4b65-bb48-c9b6067f220d, https://huggingface.co/lesso11/39b4169b-a411-43ff-864d-3a134ba5fcf1, https://huggingface.co/lesso13/8ed84472-3d75-4840-a2de-c7b8c763aafb, https://huggingface.co/lesso04/07236733-cf62-42cb-9f6a-f5b38e681f6f, https://huggingface.co/lesso07/8e2ab3db-3c60-4c9d-935c-3f8e0ab7c154, https://huggingface.co/fedovtt/748e72ab-d705-4782-9859-b4740c95eeed, https://huggingface.co/dzanbek/cb751569-1861-407f-945e-06a115915807, https://huggingface.co/vmpsergio/3aac1da8-4740-4e80-af8a-c438a4677455",313,"https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-AWQ, https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-GGUF, https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-GPTQ, https://huggingface.co/Lewdiculous/Kunocchini-1.2-7b-longtext-GGUF-Imatrix, https://huggingface.co/mradermacher/Yarn-Mistral-7b-128k-GGUF, https://huggingface.co/mradermacher/Yarn-Mistral-7b-128k-i1-GGUF, https://huggingface.co/PrunaAI/NousResearch-Yarn-Mistral-7b-128k-GGUF-smashed",7,"https://huggingface.co/Aryanne/YarnLake-Swap-7B, https://huggingface.co/InnerI/InnerILLM-OpenPipe-Nous-Yarn-Mistral-optimized-1228-7B-slerp, https://huggingface.co/InnerI/InnerILLM-0x00d0-7B-slerp, https://huggingface.co/bartowski/Yarncules-7b-128k-exl2, https://huggingface.co/seyf1elislam/WestKunai-Hermes-long-128k-test-7b, https://huggingface.co/mvpmaster/openchat-3.5-0106-128k-DPO-fixed-32000, https://huggingface.co/Kukedlc/NeuralContext-7b-v1, https://huggingface.co/Kukedlc/NeuralContext-7b-v2, https://huggingface.co/ehristoforu/0001, https://huggingface.co/ehristoforu/Gistral-16B, https://huggingface.co/ehristoforu/Gistral-16B-Q4_K_M-GGUF, https://huggingface.co/ehristoforu/0000mxs, https://huggingface.co/ehristoforu/flm-m01, https://huggingface.co/ehristoforu/pm-v0.1, https://huggingface.co/ehristoforu/pm-v0.2",15,"Darok/Featherless-Feud, JDWebProgrammer/chatbot, PeepDaSlan9/NousResearch-Yarn-Mistral-7b-128k, SC999/NV_Nemotron, Sarath0x8f/Document-QA-bot, VKCYBER/NousResearch-Yarn-Mistral-7b-128k, emekaboris/try-this-model, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/new?title=NousResearch/Yarn-Mistral-7b-128k&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNousResearch%2FYarn-Mistral-7b-128k%5D(%2FNousResearch%2FYarn-Mistral-7b-128k)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, limcheekin/Yarn-Mistral-7B-128k-GGUF, neelumsoft/Document-QA-bot, realgenius/NousResearch-Yarn-Mistral-7b-128k, ryn-85/NousResearch-Yarn-Mistral-7b-128k",13
|
| 64 |
+
dustydecapod/unraveled-7b-sft-lora,"---
|
| 65 |
+
license: apache-2.0
|
| 66 |
+
base_model: NousResearch/Yarn-Mistral-7b-128k
|
| 67 |
+
tags:
|
| 68 |
+
- generated_from_trainer
|
| 69 |
+
model-index:
|
| 70 |
+
- name: unraveled-7b-sft-lora
|
| 71 |
+
results: []
|
| 72 |
+
---
|
| 73 |
+
|
| 74 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 75 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 76 |
+
|
| 77 |
+
# unraveled-7b-sft-lora
|
| 78 |
+
|
| 79 |
+
This model is a fine-tuned version of [NousResearch/Yarn-Mistral-7b-128k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-128k) on an unknown dataset.
|
| 80 |
+
It achieves the following results on the evaluation set:
|
| 81 |
+
- Loss: 1.0261
|
| 82 |
+
|
| 83 |
+
## Model description
|
| 84 |
+
|
| 85 |
+
More information needed
|
| 86 |
+
|
| 87 |
+
## Intended uses & limitations
|
| 88 |
+
|
| 89 |
+
More information needed
|
| 90 |
+
|
| 91 |
+
## Training and evaluation data
|
| 92 |
+
|
| 93 |
+
More information needed
|
| 94 |
+
|
| 95 |
+
## Training procedure
|
| 96 |
+
|
| 97 |
+
### Training hyperparameters
|
| 98 |
+
|
| 99 |
+
The following hyperparameters were used during training:
|
| 100 |
+
- learning_rate: 2e-05
|
| 101 |
+
- train_batch_size: 4
|
| 102 |
+
- eval_batch_size: 8
|
| 103 |
+
- seed: 42
|
| 104 |
+
- distributed_type: multi-GPU
|
| 105 |
+
- num_devices: 4
|
| 106 |
+
- gradient_accumulation_steps: 128
|
| 107 |
+
- total_train_batch_size: 2048
|
| 108 |
+
- total_eval_batch_size: 32
|
| 109 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 110 |
+
- lr_scheduler_type: cosine
|
| 111 |
+
- num_epochs: 1
|
| 112 |
+
|
| 113 |
+
### Training results
|
| 114 |
+
|
| 115 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 116 |
+
|:-------------:|:-----:|:----:|:---------------:|
|
| 117 |
+
| 1.0242 | 0.67 | 68 | 1.0262 |
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
### Framework versions
|
| 121 |
+
|
| 122 |
+
- Transformers 4.35.0
|
| 123 |
+
- Pytorch 2.1.0+cu118
|
| 124 |
+
- Datasets 2.14.6
|
| 125 |
+
- Tokenizers 0.14.1
|
| 126 |
+
","{""id"": ""dustydecapod/unraveled-7b-sft-lora"", ""author"": ""dustydecapod"", ""sha"": ""425fbe42c81e962df3cbacb794e5fcb5f3253f89"", ""last_modified"": ""2023-11-22 09:14:14+00:00"", ""created_at"": ""2023-11-21 09:53:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""mistral"", ""text-generation"", ""generated_from_trainer"", ""conversational"", ""custom_code"", ""base_model:NousResearch/Yarn-Mistral-7b-128k"", ""base_model:finetune:NousResearch/Yarn-Mistral-7b-128k"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NousResearch/Yarn-Mistral-7b-128k\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: unraveled-7b-sft-lora\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""unraveled-7b-sft-lora"", ""results"": []}], ""config"": {""architectures"": [""MistralForCausalLM""], ""auto_map"": {""AutoConfig"": ""NousResearch/Yarn-Mistral-7b-128k--configuration_mistral.MistralConfig"", ""AutoModelForCausalLM"": ""NousResearch/Yarn-Mistral-7b-128k--modeling_mistral_yarn.MistralForCausalLM""}, ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Nov22_06-20-04_401fa5a8015d/events.out.tfevents.1700634336.401fa5a8015d.8816.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Nov22_06-30-36_401fa5a8015d/events.out.tfevents.1700634659.401fa5a8015d.9073.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Nov22_06-30-36_401fa5a8015d/events.out.tfevents.1700644438.401fa5a8015d.9073.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-22 09:14:14+00:00"", ""cardData"": ""base_model: NousResearch/Yarn-Mistral-7b-128k\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: unraveled-7b-sft-lora\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""655c7e1b2d1922b228a382ea"", ""modelId"": ""dustydecapod/unraveled-7b-sft-lora"", ""usedStorage"": 218155852}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=dustydecapod/unraveled-7b-sft-lora&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdustydecapod%2Funraveled-7b-sft-lora%5D(%2Fdustydecapod%2Funraveled-7b-sft-lora)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 127 |
+
dustydecapod/unraveled-7b-dpo-lora,"---
|
| 128 |
+
license: apache-2.0
|
| 129 |
+
base_model: NousResearch/Yarn-Mistral-7b-128k
|
| 130 |
+
tags:
|
| 131 |
+
- generated_from_trainer
|
| 132 |
+
model-index:
|
| 133 |
+
- name: unraveled-7b-dpo-lora
|
| 134 |
+
results: []
|
| 135 |
+
---
|
| 136 |
+
|
| 137 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 138 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 139 |
+
|
| 140 |
+
# unraveled-7b-dpo-lora
|
| 141 |
+
|
| 142 |
+
This model is a fine-tuned version of [NousResearch/Yarn-Mistral-7b-128k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-128k) on the None dataset.
|
| 143 |
+
It achieves the following results on the evaluation set:
|
| 144 |
+
- Loss: 0.5895
|
| 145 |
+
- Rewards/chosen: 0.1439
|
| 146 |
+
- Rewards/rejected: -0.1833
|
| 147 |
+
- Rewards/accuracies: 0.6880
|
| 148 |
+
- Rewards/margins: 0.3272
|
| 149 |
+
- Logps/rejected: -221.8329
|
| 150 |
+
- Logps/chosen: -266.1414
|
| 151 |
+
- Logits/rejected: -1.9675
|
| 152 |
+
- Logits/chosen: -2.0859
|
| 153 |
+
|
| 154 |
+
## Model description
|
| 155 |
+
|
| 156 |
+
More information needed
|
| 157 |
+
|
| 158 |
+
## Intended uses & limitations
|
| 159 |
+
|
| 160 |
+
More information needed
|
| 161 |
+
|
| 162 |
+
## Training and evaluation data
|
| 163 |
+
|
| 164 |
+
More information needed
|
| 165 |
+
|
| 166 |
+
## Training procedure
|
| 167 |
+
|
| 168 |
+
### Training hyperparameters
|
| 169 |
+
|
| 170 |
+
The following hyperparameters were used during training:
|
| 171 |
+
- learning_rate: 5e-07
|
| 172 |
+
- train_batch_size: 2
|
| 173 |
+
- eval_batch_size: 4
|
| 174 |
+
- seed: 42
|
| 175 |
+
- distributed_type: multi-GPU
|
| 176 |
+
- num_devices: 4
|
| 177 |
+
- gradient_accumulation_steps: 32
|
| 178 |
+
- total_train_batch_size: 256
|
| 179 |
+
- total_eval_batch_size: 16
|
| 180 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 181 |
+
- lr_scheduler_type: linear
|
| 182 |
+
- lr_scheduler_warmup_ratio: 0.1
|
| 183 |
+
- num_epochs: 3
|
| 184 |
+
|
| 185 |
+
### Training results
|
| 186 |
+
|
| 187 |
+
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
|
| 188 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
|
| 189 |
+
| 0.6313 | 1.0 | 242 | 0.6318 | 0.1228 | -0.0304 | 0.6600 | 0.1532 | -220.3036 | -266.3521 | -1.9863 | -2.1062 |
|
| 190 |
+
| 0.6013 | 2.0 | 484 | 0.5983 | 0.1484 | -0.1334 | 0.6760 | 0.2819 | -221.3338 | -266.0959 | -1.9723 | -2.0914 |
|
| 191 |
+
| 0.5889 | 3.0 | 726 | 0.5895 | 0.1439 | -0.1833 | 0.6880 | 0.3272 | -221.8329 | -266.1414 | -1.9675 | -2.0859 |
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
### Framework versions
|
| 195 |
+
|
| 196 |
+
- Transformers 4.35.0
|
| 197 |
+
- Pytorch 2.1.0+cu118
|
| 198 |
+
- Datasets 2.14.6
|
| 199 |
+
- Tokenizers 0.14.1
|
| 200 |
+
","{""id"": ""dustydecapod/unraveled-7b-dpo-lora"", ""author"": ""dustydecapod"", ""sha"": ""0463ac7751cac7a16d4d22de6faa04c48ded3fb8"", ""last_modified"": ""2023-11-22 15:14:30+00:00"", ""created_at"": ""2023-11-22 09:31:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""mistral"", ""text-generation"", ""generated_from_trainer"", ""conversational"", ""custom_code"", ""base_model:NousResearch/Yarn-Mistral-7b-128k"", ""base_model:finetune:NousResearch/Yarn-Mistral-7b-128k"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NousResearch/Yarn-Mistral-7b-128k\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: unraveled-7b-dpo-lora\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""unraveled-7b-dpo-lora"", ""results"": []}], ""config"": {""architectures"": [""MistralForCausalLM""], ""auto_map"": {""AutoConfig"": ""NousResearch/Yarn-Mistral-7b-128k--configuration_mistral.MistralConfig"", ""AutoModelForCausalLM"": ""NousResearch/Yarn-Mistral-7b-128k--modeling_mistral_yarn.MistralForCausalLM""}, ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Nov22_09-29-27_401fa5a8015d/events.out.tfevents.1700645508.401fa5a8015d.9788.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Nov22_09-29-27_401fa5a8015d/events.out.tfevents.1700666050.401fa5a8015d.9788.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-22 15:14:30+00:00"", ""cardData"": ""base_model: NousResearch/Yarn-Mistral-7b-128k\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: unraveled-7b-dpo-lora\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""655dca7e68c6dd1321499779"", ""modelId"": ""dustydecapod/unraveled-7b-dpo-lora"", ""usedStorage"": 218197580}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=dustydecapod/unraveled-7b-dpo-lora&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdustydecapod%2Funraveled-7b-dpo-lora%5D(%2Fdustydecapod%2Funraveled-7b-dpo-lora)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 201 |
+
dustydecapod/unraveled-7b-a1,"---
|
| 202 |
+
license: apache-2.0
|
| 203 |
+
base_model: NousResearch/Yarn-Mistral-7b-128k
|
| 204 |
+
tags:
|
| 205 |
+
- generated_from_trainer
|
| 206 |
+
model-index:
|
| 207 |
+
- name: unraveled-7b-dpo-lora
|
| 208 |
+
results: []
|
| 209 |
+
---
|
| 210 |
+
|
| 211 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 212 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 213 |
+
|
| 214 |
+
# unraveled-7b-dpo-lora
|
| 215 |
+
|
| 216 |
+
This model is a fine-tuned version of [NousResearch/Yarn-Mistral-7b-128k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-128k), following the Zephyr alignment protocol.
|
| 217 |
+
It achieves the following results on the evaluation set:
|
| 218 |
+
- Loss: 0.5895
|
| 219 |
+
- Rewards/chosen: 0.1439
|
| 220 |
+
- Rewards/rejected: -0.1833
|
| 221 |
+
- Rewards/accuracies: 0.6880
|
| 222 |
+
- Rewards/margins: 0.3272
|
| 223 |
+
- Logps/rejected: -221.8329
|
| 224 |
+
- Logps/chosen: -266.1414
|
| 225 |
+
- Logits/rejected: -1.9675
|
| 226 |
+
- Logits/chosen: -2.0859
|
| 227 |
+
|
| 228 |
+
## Model description
|
| 229 |
+
|
| 230 |
+
More information needed
|
| 231 |
+
|
| 232 |
+
## Intended uses & limitations
|
| 233 |
+
|
| 234 |
+
More information needed
|
| 235 |
+
|
| 236 |
+
## Training and evaluation data
|
| 237 |
+
|
| 238 |
+
More information needed
|
| 239 |
+
|
| 240 |
+
## Training procedure
|
| 241 |
+
|
| 242 |
+
### Training hyperparameters
|
| 243 |
+
|
| 244 |
+
The following hyperparameters were used during training:
|
| 245 |
+
- learning_rate: 5e-07
|
| 246 |
+
- train_batch_size: 2
|
| 247 |
+
- eval_batch_size: 4
|
| 248 |
+
- seed: 42
|
| 249 |
+
- distributed_type: multi-GPU
|
| 250 |
+
- num_devices: 4
|
| 251 |
+
- gradient_accumulation_steps: 32
|
| 252 |
+
- total_train_batch_size: 256
|
| 253 |
+
- total_eval_batch_size: 16
|
| 254 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 255 |
+
- lr_scheduler_type: linear
|
| 256 |
+
- lr_scheduler_warmup_ratio: 0.1
|
| 257 |
+
- num_epochs: 3
|
| 258 |
+
|
| 259 |
+
### Training results
|
| 260 |
+
|
| 261 |
+
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
|
| 262 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
|
| 263 |
+
| 0.6313 | 1.0 | 242 | 0.6318 | 0.1228 | -0.0304 | 0.6600 | 0.1532 | -220.3036 | -266.3521 | -1.9863 | -2.1062 |
|
| 264 |
+
| 0.6013 | 2.0 | 484 | 0.5983 | 0.1484 | -0.1334 | 0.6760 | 0.2819 | -221.3338 | -266.0959 | -1.9723 | -2.0914 |
|
| 265 |
+
| 0.5889 | 3.0 | 726 | 0.5895 | 0.1439 | -0.1833 | 0.6880 | 0.3272 | -221.8329 | -266.1414 | -1.9675 | -2.0859 |
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
### Framework versions
|
| 269 |
+
|
| 270 |
+
- Transformers 4.35.0
|
| 271 |
+
- Pytorch 2.1.0+cu118
|
| 272 |
+
- Datasets 2.14.6
|
| 273 |
+
- Tokenizers 0.14.1
|
| 274 |
+
","{""id"": ""dustydecapod/unraveled-7b-a1"", ""author"": ""dustydecapod"", ""sha"": ""fac05775fa8121b58cda8031b7001323bd43983d"", ""last_modified"": ""2023-11-22 17:41:31+00:00"", ""created_at"": ""2023-11-22 15:25:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 72, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mistral"", ""text-generation"", ""generated_from_trainer"", ""custom_code"", ""base_model:NousResearch/Yarn-Mistral-7b-128k"", ""base_model:finetune:NousResearch/Yarn-Mistral-7b-128k"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NousResearch/Yarn-Mistral-7b-128k\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: unraveled-7b-dpo-lora\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""unraveled-7b-dpo-lora"", ""results"": []}], ""config"": {""architectures"": [""MistralForCausalLM""], ""auto_map"": {""AutoConfig"": ""NousResearch/Yarn-Mistral-7b-128k--configuration_mistral.MistralConfig"", ""AutoModelForCausalLM"": ""NousResearch/Yarn-Mistral-7b-128k--modeling_mistral_yarn.MistralForCausalLM""}, ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 7241732096}, ""total"": 7241732096}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-22 17:41:31+00:00"", ""cardData"": ""base_model: NousResearch/Yarn-Mistral-7b-128k\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: unraveled-7b-dpo-lora\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""655e1d5f759563867174499c"", ""modelId"": ""dustydecapod/unraveled-7b-a1"", ""usedStorage"": 28966962016}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=dustydecapod/unraveled-7b-a1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdustydecapod%2Funraveled-7b-a1%5D(%2Fdustydecapod%2Funraveled-7b-a1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 275 |
+
https://huggingface.co/Nitral-Archive/Kunocchini-1.2-7b-longtext-broken,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 276 |
+
Alphatao/a91b1374-b60f-4823-9aad-5f976d07f08e,"---
|
| 277 |
+
base_model: NousResearch/Yarn-Mistral-7b-128k
|
| 278 |
+
library_name: transformers
|
| 279 |
+
model_name: a91b1374-b60f-4823-9aad-5f976d07f08e
|
| 280 |
+
tags:
|
| 281 |
+
- generated_from_trainer
|
| 282 |
+
- axolotl
|
| 283 |
+
- dpo
|
| 284 |
+
- trl
|
| 285 |
+
licence: license
|
| 286 |
+
---
|
| 287 |
+
|
| 288 |
+
# Model Card for a91b1374-b60f-4823-9aad-5f976d07f08e
|
| 289 |
+
|
| 290 |
+
This model is a fine-tuned version of [NousResearch/Yarn-Mistral-7b-128k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-128k).
|
| 291 |
+
It has been trained using [TRL](https://github.com/huggingface/trl).
|
| 292 |
+
|
| 293 |
+
## Quick start
|
| 294 |
+
|
| 295 |
+
```python
|
| 296 |
+
from transformers import pipeline
|
| 297 |
+
|
| 298 |
+
question = ""If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?""
|
| 299 |
+
generator = pipeline(""text-generation"", model=""Alphatao/a91b1374-b60f-4823-9aad-5f976d07f08e"", device=""cuda"")
|
| 300 |
+
output = generator([{""role"": ""user"", ""content"": question}], max_new_tokens=128, return_full_text=False)[0]
|
| 301 |
+
print(output[""generated_text""])
|
| 302 |
+
```
|
| 303 |
+
|
| 304 |
+
## Training procedure
|
| 305 |
+
|
| 306 |
+
[<img src=""https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg"" alt=""Visualize in Weights & Biases"" width=""150"" height=""24""/>](https://wandb.ai/alphatao-alphatao/Gradients-On-Demand/runs/al93galt)
|
| 307 |
+
|
| 308 |
+
This model was trained with DPO, a method introduced in [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://huggingface.co/papers/2305.18290).
|
| 309 |
+
|
| 310 |
+
### Framework versions
|
| 311 |
+
|
| 312 |
+
- TRL: 0.12.0.dev0
|
| 313 |
+
- Transformers: 4.46.0
|
| 314 |
+
- Pytorch: 2.5.0+cu124
|
| 315 |
+
- Datasets: 3.0.1
|
| 316 |
+
- Tokenizers: 0.20.1
|
| 317 |
+
|
| 318 |
+
## Citations
|
| 319 |
+
|
| 320 |
+
Cite DPO as:
|
| 321 |
+
|
| 322 |
+
```bibtex
|
| 323 |
+
@inproceedings{rafailov2023direct,
|
| 324 |
+
title = {{Direct Preference Optimization: Your Language Model is Secretly a Reward Model}},
|
| 325 |
+
author = {Rafael Rafailov and Archit Sharma and Eric Mitchell and Christopher D. Manning and Stefano Ermon and Chelsea Finn},
|
| 326 |
+
year = 2023,
|
| 327 |
+
booktitle = {Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023},
|
| 328 |
+
url = {http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-Abstract-Conference.html},
|
| 329 |
+
editor = {Alice Oh and Tristan Naumann and Amir Globerson and Kate Saenko and Moritz Hardt and Sergey Levine},
|
| 330 |
+
}
|
| 331 |
+
```
|
| 332 |
+
|
| 333 |
+
Cite TRL as:
|
| 334 |
+
|
| 335 |
+
```bibtex
|
| 336 |
+
@misc{vonwerra2022trl,
|
| 337 |
+
title = {{TRL: Transformer Reinforcement Learning}},
|
| 338 |
+
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
|
| 339 |
+
year = 2020,
|
| 340 |
+
journal = {GitHub repository},
|
| 341 |
+
publisher = {GitHub},
|
| 342 |
+
howpublished = {\url{https://github.com/huggingface/trl}}
|
| 343 |
+
}
|
| 344 |
+
```","{""id"": ""Alphatao/a91b1374-b60f-4823-9aad-5f976d07f08e"", ""author"": ""Alphatao"", ""sha"": ""a156ade398049a616e2de649d17d91da3245d17b"", ""last_modified"": ""2025-04-24 11:08:11+00:00"", ""created_at"": ""2025-04-24 07:41:33+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""tensorboard"", ""safetensors"", ""mistral"", ""text-generation"", ""generated_from_trainer"", ""axolotl"", ""dpo"", ""trl"", ""conversational"", ""custom_code"", ""arxiv:2305.18290"", ""base_model:NousResearch/Yarn-Mistral-7b-128k"", ""base_model:finetune:NousResearch/Yarn-Mistral-7b-128k"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NousResearch/Yarn-Mistral-7b-128k\nlibrary_name: transformers\nmodel_name: a91b1374-b60f-4823-9aad-5f976d07f08e\ntags:\n- generated_from_trainer\n- axolotl\n- dpo\n- trl\nlicence: license"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""auto_map"": {""AutoConfig"": ""NousResearch/Yarn-Mistral-7b-128k--configuration_mistral.MistralConfig"", ""AutoModelForCausalLM"": ""NousResearch/Yarn-Mistral-7b-128k--modeling_mistral_yarn.MistralForCausalLM""}, ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00004.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00004.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00004.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00004.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr24_07-41-17_f44850f1953c/events.out.tfevents.1745480507.f44850f1953c.260.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-24 11:08:11+00:00"", ""cardData"": ""base_model: NousResearch/Yarn-Mistral-7b-128k\nlibrary_name: transformers\nmodel_name: a91b1374-b60f-4823-9aad-5f976d07f08e\ntags:\n- generated_from_trainer\n- axolotl\n- dpo\n- trl\nlicence: license"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6809eb2d01de4afad6323264"", ""modelId"": ""Alphatao/a91b1374-b60f-4823-9aad-5f976d07f08e"", ""usedStorage"": 20532159292}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Alphatao/a91b1374-b60f-4823-9aad-5f976d07f08e&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BAlphatao%2Fa91b1374-b60f-4823-9aad-5f976d07f08e%5D(%2FAlphatao%2Fa91b1374-b60f-4823-9aad-5f976d07f08e)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
alpaca-native_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
chavinlo/alpaca-native,"# Stanford Alpaca
|
| 3 |
+
|
| 4 |
+
This is a replica of Alpaca by Stanford' tatsu
|
| 5 |
+
|
| 6 |
+
Trained using the original instructions with a minor modification in FSDP mode
|
| 7 |
+
|
| 8 |
+
# Other versions:
|
| 9 |
+
13B: https://huggingface.co/chavinlo/alpaca-13b
|
| 10 |
+
|
| 11 |
+
13B -> GPT4 : https://huggingface.co/chavinlo/gpt4-x-alpaca
|
| 12 |
+
|
| 13 |
+
## Compute Used
|
| 14 |
+
Trained on 4xA100s for 6H
|
| 15 |
+
Donated by redmond.ai
|
| 16 |
+
|
| 17 |
+
NO LORA HAS BEEN USED, this is a natively-finetuned model, hence ""alpaca-native""
|
| 18 |
+
|
| 19 |
+
If you are interested on more llama-based models, you can check out my profile or search for other models at https://huggingface.co/models?other=llama
|
| 20 |
+
|
| 21 |
+
This (MIGHT) be a quantized version of this model, but be careful: https://boards.4channel.org/g/thread/92173062#p92182396
|
| 22 |
+
|
| 23 |
+
CONFIGURATION (default except fsdp):
|
| 24 |
+
|
| 25 |
+
```shell
|
| 26 |
+
torchrun --nproc_per_node=4 --master_port=3045 train.py \
|
| 27 |
+
--model_name_or_path /workspace/llama-7b-hf \
|
| 28 |
+
--data_path ./alpaca_data.json \
|
| 29 |
+
--bf16 True \
|
| 30 |
+
--output_dir /workspace/output \
|
| 31 |
+
--num_train_epochs 3 \
|
| 32 |
+
--per_device_train_batch_size 4 \
|
| 33 |
+
--per_device_eval_batch_size 4 \
|
| 34 |
+
--gradient_accumulation_steps 8 \
|
| 35 |
+
--evaluation_strategy ""no"" \
|
| 36 |
+
--save_strategy ""steps"" \
|
| 37 |
+
--save_steps 200 \
|
| 38 |
+
--save_total_limit 1 \
|
| 39 |
+
--learning_rate 2e-5 \
|
| 40 |
+
--weight_decay 0. \
|
| 41 |
+
--warmup_ratio 0.03 \
|
| 42 |
+
--lr_scheduler_type ""cosine"" \
|
| 43 |
+
--logging_steps 1 \
|
| 44 |
+
--fsdp ""shard_grad_op auto_wrap"" \
|
| 45 |
+
--fsdp_transformer_layer_cls_to_wrap 'LLaMADecoderLayer' \
|
| 46 |
+
--tf32 True --report_to=""wandb""
|
| 47 |
+
```
|
| 48 |
+
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
| 49 |
+
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_chavinlo__alpaca-native)
|
| 50 |
+
|
| 51 |
+
| Metric | Value |
|
| 52 |
+
|-----------------------|---------------------------|
|
| 53 |
+
| Avg. | 41.96 |
|
| 54 |
+
| ARC (25-shot) | 52.3 |
|
| 55 |
+
| HellaSwag (10-shot) | 77.09 |
|
| 56 |
+
| MMLU (5-shot) | 41.6 |
|
| 57 |
+
| TruthfulQA (0-shot) | 37.58 |
|
| 58 |
+
| Winogrande (5-shot) | 69.46 |
|
| 59 |
+
| GSM8K (5-shot) | 1.44 |
|
| 60 |
+
| DROP (3-shot) | 14.23 |
|
| 61 |
+
","{""id"": ""chavinlo/alpaca-native"", ""author"": ""chavinlo"", ""sha"": ""3bf09cbff2fbd92d7d88a0f70ba24fca372befdf"", ""last_modified"": ""2023-11-17 23:10:27+00:00"", ""created_at"": ""2023-03-16 02:37:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2717, ""downloads_all_time"": null, ""likes"": 262, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": null, ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": """", ""eos_token"": """", ""unk_token"": """"}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""llm-blender/LLM-Blender"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""neubla/neubla-llm-evaluation-board"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""rankun203/chavinlo-alpaca-native"", ""UniversE22/chavinlo-alpaca-native"", ""UniversE22/chavinlo-alpaca-native-gradio"", ""kwgjjeffrey/chavinlo-alpaca-native"", ""vincycode7/chavinlo-alpaca-native"", ""felixpie/chavinlo-alpaca-native"", ""baby1/chavinlo-alpaca-native"", ""baby1/chavinlo-alpaca-native2"", ""ArmanBM/chavinlo-alpaca-native"", ""arodriguez/chavinlo-alpaca-native"", ""darkd3vil/alpaca-native"", ""polaris-73/decodingtrust-demo"", ""AI-Secure/DecodingTrust-demo"", ""smothiki/open_llm_leaderboard"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""kbmlcoding/open_llm_leaderboard_free"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""mjalg/IFEvalTR""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-17 23:10:27+00:00"", ""cardData"": null, ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""641280e685e89e53c38f1e56"", ""modelId"": ""chavinlo/alpaca-native"", ""usedStorage"": 175200243880}",0,,0,"https://huggingface.co/shrenikb/fullfedtest, https://huggingface.co/shrenikb/fed16test, https://huggingface.co/Supriyayalavarthi/apaca-fine-tune, https://huggingface.co/Supriyayalavarthi/Serial-aplaca, https://huggingface.co/alexis07/alpaca-guanaco-spanish, https://huggingface.co/Keerthiyogan/Paraphrase-aplaca",6,"https://huggingface.co/mradermacher/alpaca-native-GGUF, https://huggingface.co/mradermacher/alpaca-native-i1-GGUF",2,,0,"AI-Secure/DecodingTrust-demo, BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, UniversE22/chavinlo-alpaca-native-gradio, Vikhrmodels/small-shlepa-lb, felixz/open_llm_leaderboard, gsaivinay/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=chavinlo/alpaca-native&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bchavinlo%2Falpaca-native%5D(%2Fchavinlo%2Falpaca-native)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb, llm-blender/LLM-Blender",13
|
animatediff_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
guoyww/animatediff,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
---
|
| 5 |
+
This model repo is for [AnimateDiff](https://github.com/guoyww/animatediff/).","{""id"": ""guoyww/animatediff"", ""author"": ""guoyww"", ""sha"": ""fdfe36afa161e51b3e9c24022b0e368d59e7345e"", ""last_modified"": ""2023-12-18 16:12:10+00:00"", ""created_at"": ""2023-07-18 09:19:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 865, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mm_sd_v14.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mm_sd_v15.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mm_sd_v15_v2.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mm_sdxl_v10_beta.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_lora_PanLeft.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_lora_PanRight.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_lora_RollingAnticlockwise.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_lora_RollingClockwise.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_lora_TiltDown.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_lora_TiltUp.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_lora_ZoomIn.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v2_lora_ZoomOut.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v3_sd15_adapter.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v3_sd15_mm.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v3_sd15_sparsectrl_rgb.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='v3_sd15_sparsectrl_scribble.ckpt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""guoyww/AnimateDiff"", ""fffiloni/LatentSync"", ""alibaba-pai/EasyAnimate"", ""Fabrice-TIERCELIN/SUPIR"", ""depth-anything/Video-Depth-Anything"", ""alibaba-pai/CogVideoX-Fun-5b"", ""fffiloni/EchoMimic"", ""BestWishYsh/MagicTime"", ""BadToBest/EchoMimic"", ""fffiloni/echomimic-v2"", ""fffiloni/DiffuEraser-demo"", ""Doubiiu/TrajectoryCrafter"", ""alibaba-pai/Wan2.1-Fun-1.3B-InP"", ""vilarin/Diffutoon-ExVideo"", ""fffiloni/AnimateDiff-Image-Init"", ""FQiao/GenStereo"", ""Potre1qw/LatentSync"", ""svjack/Moore-AnimateAnyone"", ""SunderAli17/LatentSync"", ""Nymbo/Moore-AnimateAnyone"", ""svjack/musepose"", ""Krokodilpirat/Video-Depth-Anything_RGBD"", ""svjack/Hunyuan_Video_Lora_Demo"", ""fantaxy/EchoMimic"", ""kevinwang676/Diffutoon"", ""aleafy/RelightVid"", ""bep40/musepose"", ""kymlcode/AniPortrait_official"", ""fantaxy/ginfa2v"", ""maxdemon/CogVideoX-5B-Max-Fun"", ""Hyathi/SoundImage-LipSync"", ""Arkuuu777/LatentSync"", ""svjack/MotionClone-Text-to-Video"", ""dominic1021/LatentSync"", ""imjunaidafzal/AnimateDiff"", ""Joeythemonster/AnimateDiff"", ""Joeythemonster/Animatejoeythemonste"", ""PPLSWG/AnimateDiff"", ""Nymbo/MagicTime"", ""colornative/AnimateDiff"", ""Omnibus/AnimateDiff"", ""elanoqi/AnimateDiff"", ""stlaurentjr/RNPD"", ""DmitrMakeev/AnimateDiff"", ""raaraya/AnimateDiff"", ""chriscec/AnimateDiff"", ""SamuelDelgato/AnimateDiff"", ""3rdaiOhpinFully/AnimateDiff-Image-Init"", ""ArtioOfficial/AnimateDiff"", ""Shivamkak/EchoMimic"", ""yogabookuser/Moore-AnimateAnyone"", ""zmkktom/AniPortrait_official"", ""MihaiHuggingFace/CogVideoX-Fun-5b"", ""K00B404/CogVideoX-Fun-5b-custom"", ""K00B404/EasyAnimate_custom"", ""Galasii/AniPortrait_official"", ""lixiaolin/AniPortrait_official"", ""Nymbo/EchoMimic"", ""latteisacat/AniPortrait_official"", ""meepmoo/vtesting93x"", ""meepmoo/vtesting2"", ""latte2512/AniPortrait_lafa"", ""marshal007/echomimicv2_test"", ""ginipick/ginfa2v"", ""Bread-F/Intelligent-Medical-Guidance-Large-Model"", ""Nymbo/LatentSync"", ""Francke/LatentSync"", ""luis1982/Moore-AnimateAnyone"", ""meepmoo/LatentSync"", ""AniruddhaChattopadhyay/LatentSync"", ""muchaco/LatentSync"", ""bkoos/LatentSync"", ""Maximofn/HunyuanVideo"", ""Statical-Archives/ANIMATEZERO"", ""svjack/DiffuEraser-demo"", ""MrDrmm/EasyAnimate"", ""waloneai/EasyAnimate"", ""ljc0506/AniPortrait_official"", ""pillaryao/demo"", ""CrisRaz/SUPIR"", ""TechSmashers/LatentSync"", ""stepbysteb/EasyAnimate"", ""David960/LatentSync"", ""luigi12345/LatentSync_Voice_Lips_Videogenerator"", ""svjack/LatentSync"", ""soiz1/ComfyUI-Demo"", ""Ascetu/LatentSync"", ""ValerianFourel/StableFaceEmotion"", ""svjack/MotionClone-Image-to-Video"", ""codewdhruv/latent-sync-testing"", ""Kouroshhhhh/HunyuanVideo"", ""kahramango/EasyAnimate"", ""HiPeking/latentSync"", ""Spanicin/aiavatarnew"", ""Moon11111/avatar"", ""aobatroz/LatentSync"", ""kishan1122/SUPIR"", ""cocktailpeanut/DiffuEraser-demo"", ""Riceok/RICE-Video-Depth-Anything-omg"", ""Civersia/genwarp""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-18 16:12:10+00:00"", ""cardData"": ""license: apache-2.0"", ""transformersInfo"": null, ""_id"": ""64b65935909fbefc923000c1"", ""modelId"": ""guoyww/animatediff"", ""usedStorage"": 32554557069}",0,,0,,0,,0,,0,"BadToBest/EchoMimic, BestWishYsh/MagicTime, Doubiiu/TrajectoryCrafter, FQiao/GenStereo, Fabrice-TIERCELIN/SUPIR, Potre1qw/LatentSync, SunderAli17/LatentSync, alibaba-pai/CogVideoX-Fun-5b, alibaba-pai/EasyAnimate, alibaba-pai/Wan2.1-Fun-1.3B-InP, depth-anything/Video-Depth-Anything, huggingface/InferenceSupport/discussions/new?title=guoyww/animatediff&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bguoyww%2Fanimatediff%5D(%2Fguoyww%2Fanimatediff)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, svjack/Moore-AnimateAnyone",13
|
anything-midjourney-v-4-1_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Joeythemonster/anything-midjourney-v-4-1,"---
|
| 3 |
+
license: creativeml-openrail-m
|
| 4 |
+
tags:
|
| 5 |
+
- text-to-image
|
| 6 |
+
- stable-diffusion
|
| 7 |
+
---
|
| 8 |
+
### ANYTHING-MIDJOURNEY-V-4.1 Dreambooth model trained by Joeythemonster with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
|
| 12 |
+
Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)
|
| 13 |
+
|
| 14 |
+
Sample pictures of this concept:
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
","{""id"": ""Joeythemonster/anything-midjourney-v-4-1"", ""author"": ""Joeythemonster"", ""sha"": ""30f10e5ddc00bd24fd939c0aee63f8ebf1be6a41"", ""last_modified"": ""2023-05-16 09:40:13+00:00"", ""created_at"": ""2022-12-24 21:28:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 281, ""downloads_all_time"": null, ""likes"": 176, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""text-to-image"", ""stable-diffusion"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: creativeml-openrail-m\ntags:\n- text-to-image\n- stable-diffusion"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ANYTHING_MIDJOURNEY_V_4.1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ANYTHING_MIDJOURNEY_V_4.1.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Daniton/MidJourney"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""untovvn/Joeythemonster-anything-midjourney-v-4-1"", ""mindtube/Diffusion50XX"", ""okeanos/uptimefactoryai"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""StealYourGhost/Joeythemonster-anything-midjourney-v-4-1"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""Lyra121/finetuned_diffusion"", ""lu2000/anything-midjourney-v4-1"", ""thelou1s/MidJourney"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""Brofu/Joeythemonster-anything-midjourney-v-4-1"", ""Isotonic/image-generator"", ""vibhorvats/Joeythemonster-anything-midjourney-v-4-1"", ""ALSv/midjourney-v4-1"", ""PhotoPranab/Joeythemonster-anything-midjourney-v-4-1"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/ArtModels"", ""Ttss4422/Joeythemonster-anything-midjourney-v-4"", ""Laden0p/Joeythemonster-anything-midjourney-v-4-1"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""Feifei315/Joeythemonster-anything-midjourney-v-4-1"", ""marketono/MidJourney"", ""ennov8ion/Landscapes-models"", ""FreeAiPlease/Joeythemonster-anything-midjourney-v-4-1"", ""Keyurmistry/Joeythemonster-anything-midjourney-v-4-1"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""vih-v/x_mod"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""stanciu/Joeythemonster-anything-midjourney-v-4-1"", ""charanhu/anything-midjourney-v-4-1"", ""merlinux/Joeythemonster-anything-midjourney-v-4-1"", ""qouixster/Joeythemonster-anything-midjourney-v-4-1"", ""zox47/Joeythemonster-anything-midjourney-v-4-1"", ""hakanwkwjbwbs/Joeythemonster-anything-midjourney-v-4-1"", ""rubberboy/Joeythemonster-anything-midjourney-v-4-1"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models"", ""up0601/Joeythemonster-anything-midjourney-v-4-1"", ""GFXY/Joeythemonster-anything-midjourney-v-4-1"", ""fuxkplugg/Joeythemonster-anything-midjourney-v-4-1"", ""Yntec/top_100_diffusion"", ""AIlexDev/Diffusion60XX""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-16 09:40:13+00:00"", ""cardData"": ""license: creativeml-openrail-m\ntags:\n- text-to-image\n- stable-diffusion"", ""transformersInfo"": null, ""_id"": ""63a76f15c0cbed8518d79963"", ""modelId"": ""Joeythemonster/anything-midjourney-v-4-1"", ""usedStorage"": 14735779154}",0,,0,"https://huggingface.co/anantk/textual_inversion_cream_mid, https://huggingface.co/anantk/textual_inversion_cream_pixart_v1",2,,0,,0,"Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/ToyWorld4, John6666/hfd_test_nostopbutton, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=Joeythemonster/anything-midjourney-v-4-1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BJoeythemonster%2Fanything-midjourney-v-4-1%5D(%2FJoeythemonster%2Fanything-midjourney-v-4-1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",13
|
bart-large-cnn_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
bge-large-en_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
bge-reranker-large_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
BAAI/bge-reranker-large,"---
|
| 3 |
+
license: mit
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
- zh
|
| 7 |
+
tags:
|
| 8 |
+
- mteb
|
| 9 |
+
model-index:
|
| 10 |
+
- name: bge-reranker-base
|
| 11 |
+
results:
|
| 12 |
+
- task:
|
| 13 |
+
type: Reranking
|
| 14 |
+
dataset:
|
| 15 |
+
type: C-MTEB/CMedQAv1-reranking
|
| 16 |
+
name: MTEB CMedQAv1
|
| 17 |
+
config: default
|
| 18 |
+
split: test
|
| 19 |
+
revision: None
|
| 20 |
+
metrics:
|
| 21 |
+
- type: map
|
| 22 |
+
value: 81.27206722525007
|
| 23 |
+
- type: mrr
|
| 24 |
+
value: 84.14238095238095
|
| 25 |
+
- task:
|
| 26 |
+
type: Reranking
|
| 27 |
+
dataset:
|
| 28 |
+
type: C-MTEB/CMedQAv2-reranking
|
| 29 |
+
name: MTEB CMedQAv2
|
| 30 |
+
config: default
|
| 31 |
+
split: test
|
| 32 |
+
revision: None
|
| 33 |
+
metrics:
|
| 34 |
+
- type: map
|
| 35 |
+
value: 84.10369934291236
|
| 36 |
+
- type: mrr
|
| 37 |
+
value: 86.79376984126984
|
| 38 |
+
- task:
|
| 39 |
+
type: Reranking
|
| 40 |
+
dataset:
|
| 41 |
+
type: C-MTEB/Mmarco-reranking
|
| 42 |
+
name: MTEB MMarcoReranking
|
| 43 |
+
config: default
|
| 44 |
+
split: dev
|
| 45 |
+
revision: None
|
| 46 |
+
metrics:
|
| 47 |
+
- type: map
|
| 48 |
+
value: 35.4600511272538
|
| 49 |
+
- type: mrr
|
| 50 |
+
value: 34.60238095238095
|
| 51 |
+
- task:
|
| 52 |
+
type: Reranking
|
| 53 |
+
dataset:
|
| 54 |
+
type: C-MTEB/T2Reranking
|
| 55 |
+
name: MTEB T2Reranking
|
| 56 |
+
config: default
|
| 57 |
+
split: dev
|
| 58 |
+
revision: None
|
| 59 |
+
metrics:
|
| 60 |
+
- type: map
|
| 61 |
+
value: 67.27728847727172
|
| 62 |
+
- type: mrr
|
| 63 |
+
value: 77.1315192743764
|
| 64 |
+
pipeline_tag: feature-extraction
|
| 65 |
+
---
|
| 66 |
+
|
| 67 |
+
**We have updated the [new reranker](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker), supporting larger lengths, more languages, and achieving better performance.**
|
| 68 |
+
|
| 69 |
+
<h1 align=""center"">FlagEmbedding</h1>
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
<h4 align=""center"">
|
| 73 |
+
<p>
|
| 74 |
+
<a href=#model-list>Model List</a> |
|
| 75 |
+
<a href=#frequently-asked-questions>FAQ</a> |
|
| 76 |
+
<a href=#usage>Usage</a> |
|
| 77 |
+
<a href=""#evaluation"">Evaluation</a> |
|
| 78 |
+
<a href=""#train"">Train</a> |
|
| 79 |
+
<a href=""#citation"">Citation</a> |
|
| 80 |
+
<a href=""#license"">License</a>
|
| 81 |
+
<p>
|
| 82 |
+
</h4>
|
| 83 |
+
|
| 84 |
+
**More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding).**
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
[English](README.md) | [中文](https://github.com/FlagOpen/FlagEmbedding/blob/master/README_zh.md)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
FlagEmbedding focuses on retrieval-augmented LLMs, consisting of the following projects currently:
|
| 91 |
+
|
| 92 |
+
- **Long-Context LLM**: [Activation Beacon](https://github.com/FlagOpen/FlagEmbedding/tree/master/Long_LLM/activation_beacon)
|
| 93 |
+
- **Fine-tuning of LM** : [LM-Cocktail](https://github.com/FlagOpen/FlagEmbedding/tree/master/LM_Cocktail)
|
| 94 |
+
- **Embedding Model**: [Visualized-BGE](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/visual), [BGE-M3](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3), [LLM Embedder](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_embedder), [BGE Embedding](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/baai_general_embedding)
|
| 95 |
+
- **Reranker Model**: [llm rerankers](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker), [BGE Reranker](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker)
|
| 96 |
+
- **Benchmark**: [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB)
|
| 97 |
+
|
| 98 |
+
## News
|
| 99 |
+
- 3/18/2024: Release new [rerankers](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker), built upon powerful M3 and LLM (GEMMA and MiniCPM, not so large actually) backbones, supporitng multi-lingual processing and larger inputs, massive improvements of ranking performances on BEIR, C-MTEB/Retrieval, MIRACL, LlamaIndex Evaluation.
|
| 100 |
+
- 3/18/2024: Release [Visualized-BGE](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/visual), equipping BGE with visual capabilities. Visualized-BGE can be utilized to generate embeddings for hybrid image-text data.
|
| 101 |
+
- 1/30/2024: Release **BGE-M3**, a new member to BGE model series! M3 stands for **M**ulti-linguality (100+ languages), **M**ulti-granularities (input length up to 8192), **M**ulti-Functionality (unification of dense, lexical, multi-vec/colbert retrieval).
|
| 102 |
+
It is the first embedding model which supports all three retrieval methods, achieving new SOTA on multi-lingual (MIRACL) and cross-lingual (MKQA) benchmarks.
|
| 103 |
+
[Technical Report](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/BGE_M3/BGE_M3.pdf) and [Code](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3). :fire:
|
| 104 |
+
- 1/9/2024: Release [Activation-Beacon](https://github.com/FlagOpen/FlagEmbedding/tree/master/Long_LLM/activation_beacon), an effective, efficient, compatible, and low-cost (training) method to extend the context length of LLM. [Technical Report](https://arxiv.org/abs/2401.03462) :fire:
|
| 105 |
+
- 12/24/2023: Release **LLaRA**, a LLaMA-7B based dense retriever, leading to state-of-the-art performances on MS MARCO and BEIR. Model and code will be open-sourced. Please stay tuned. [Technical Report](https://arxiv.org/abs/2312.15503)
|
| 106 |
+
- 11/23/2023: Release [LM-Cocktail](https://github.com/FlagOpen/FlagEmbedding/tree/master/LM_Cocktail), a method to maintain general capabilities during fine-tuning by merging multiple language models. [Technical Report](https://arxiv.org/abs/2311.13534) :fire:
|
| 107 |
+
- 10/12/2023: Release [LLM-Embedder](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_embedder), a unified embedding model to support diverse retrieval augmentation needs for LLMs. [Technical Report](https://arxiv.org/pdf/2310.07554.pdf)
|
| 108 |
+
- 09/15/2023: The [technical report](https://arxiv.org/pdf/2309.07597.pdf) of BGE has been released
|
| 109 |
+
- 09/15/2023: The [massive training data](https://data.baai.ac.cn/details/BAAI-MTP) of BGE has been released
|
| 110 |
+
- 09/12/2023: New models:
|
| 111 |
+
- **New reranker model**: release cross-encoder models `BAAI/bge-reranker-base` and `BAAI/bge-reranker-large`, which are more powerful than embedding model. We recommend to use/fine-tune them to re-rank top-k documents returned by embedding models.
|
| 112 |
+
- **update embedding model**: release `bge-*-v1.5` embedding model to alleviate the issue of the similarity distribution, and enhance its retrieval ability without instruction.
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
<details>
|
| 116 |
+
<summary>More</summary>
|
| 117 |
+
<!-- ### More -->
|
| 118 |
+
|
| 119 |
+
- 09/07/2023: Update [fine-tune code](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md): Add script to mine hard negatives and support adding instruction during fine-tuning.
|
| 120 |
+
- 08/09/2023: BGE Models are integrated into **Langchain**, you can use it like [this](#using-langchain); C-MTEB **leaderboard** is [available](https://huggingface.co/spaces/mteb/leaderboard).
|
| 121 |
+
- 08/05/2023: Release base-scale and small-scale models, **best performance among the models of the same size 🤗**
|
| 122 |
+
- 08/02/2023: Release `bge-large-*`(short for BAAI General Embedding) Models, **rank 1st on MTEB and C-MTEB benchmark!** :tada: :tada:
|
| 123 |
+
- 08/01/2023: We release the [Chinese Massive Text Embedding Benchmark](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB) (**C-MTEB**), consisting of 31 test dataset.
|
| 124 |
+
|
| 125 |
+
</details>
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
## Model List
|
| 129 |
+
|
| 130 |
+
`bge` is short for `BAAI general embedding`.
|
| 131 |
+
|
| 132 |
+
| Model | Language | | Description | query instruction for retrieval [1] |
|
| 133 |
+
|:-------------------------------|:--------:| :--------:| :--------:|:--------:|
|
| 134 |
+
| [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3) | Multilingual | [Inference](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3#usage) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3) | Multi-Functionality(dense retrieval, sparse retrieval, multi-vector(colbert)), Multi-Linguality, and Multi-Granularity(8192 tokens) | |
|
| 135 |
+
| [BAAI/llm-embedder](https://huggingface.co/BAAI/llm-embedder) | English | [Inference](./FlagEmbedding/llm_embedder/README.md) [Fine-tune](./FlagEmbedding/llm_embedder/README.md) | a unified embedding model to support diverse retrieval augmentation needs for LLMs | See [README](./FlagEmbedding/llm_embedder/README.md) |
|
| 136 |
+
| [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | |
|
| 137 |
+
| [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | |
|
| 138 |
+
| [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` |
|
| 139 |
+
| [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` |
|
| 140 |
+
| [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` |
|
| 141 |
+
| [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` |
|
| 142 |
+
| [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` |
|
| 143 |
+
| [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` |
|
| 144 |
+
| [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` |
|
| 145 |
+
| [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-en` | `Represent this sentence for searching relevant passages: ` |
|
| 146 |
+
| [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) |a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` |
|
| 147 |
+
| [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` |
|
| 148 |
+
| [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` |
|
| 149 |
+
| [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` |
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
[1\]: If you need to search the relevant passages to a query, we suggest to add the instruction to the query; in other cases, no instruction is needed, just use the original query directly. In all cases, **no instruction** needs to be added to passages.
|
| 153 |
+
|
| 154 |
+
[2\]: Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. To balance the accuracy and time cost, cross-encoder is widely used to re-rank top-k documents retrieved by other simple models.
|
| 155 |
+
For examples, use bge embedding model to retrieve top 100 relevant documents, and then use bge reranker to re-rank the top 100 document to get the final top-3 results.
|
| 156 |
+
|
| 157 |
+
All models have been uploaded to Huggingface Hub, and you can see them at https://huggingface.co/BAAI.
|
| 158 |
+
If you cannot open the Huggingface Hub, you also can download the models at https://model.baai.ac.cn/models .
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
## Frequently asked questions
|
| 162 |
+
|
| 163 |
+
<details>
|
| 164 |
+
<summary>1. How to fine-tune bge embedding model?</summary>
|
| 165 |
+
|
| 166 |
+
<!-- ### How to fine-tune bge embedding model? -->
|
| 167 |
+
Following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) to prepare data and fine-tune your model.
|
| 168 |
+
Some suggestions:
|
| 169 |
+
- Mine hard negatives following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune#hard-negatives), which can improve the retrieval performance.
|
| 170 |
+
- If you pre-train bge on your data, the pre-trained model cannot be directly used to calculate similarity, and it must be fine-tuned with contrastive learning before computing similarity.
|
| 171 |
+
- If the accuracy of the fine-tuned model is still not high, it is recommended to use/fine-tune the cross-encoder model (bge-reranker) to re-rank top-k results.
|
| 172 |
+
Hard negatives also are needed to fine-tune reranker. Refer to this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) for the fine-tuning for reranker
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
</details>
|
| 176 |
+
|
| 177 |
+
<details>
|
| 178 |
+
<summary>2. The similarity score between two dissimilar sentences is higher than 0.5</summary>
|
| 179 |
+
|
| 180 |
+
<!-- ### The similarity score between two dissimilar sentences is higher than 0.5 -->
|
| 181 |
+
**Suggest to use bge v1.5, which alleviates the issue of the similarity distribution.**
|
| 182 |
+
|
| 183 |
+
Since we finetune the models by contrastive learning with a temperature of 0.01,
|
| 184 |
+
the similarity distribution of the current BGE model is about in the interval \[0.6, 1\].
|
| 185 |
+
So a similarity score greater than 0.5 does not indicate that the two sentences are similar.
|
| 186 |
+
|
| 187 |
+
For downstream tasks, such as passage retrieval or semantic similarity,
|
| 188 |
+
**what matters is the relative order of the scores, not the absolute value.**
|
| 189 |
+
If you need to filter similar sentences based on a similarity threshold,
|
| 190 |
+
please select an appropriate similarity threshold based on the similarity distribution on your data (such as 0.8, 0.85, or even 0.9).
|
| 191 |
+
|
| 192 |
+
</details>
|
| 193 |
+
|
| 194 |
+
<details>
|
| 195 |
+
<summary>3. When does the query instruction need to be used</summary>
|
| 196 |
+
|
| 197 |
+
<!-- ### When does the query instruction need to be used -->
|
| 198 |
+
|
| 199 |
+
For the `bge-*-v1.5`, we improve its retrieval ability when not using instruction.
|
| 200 |
+
No instruction only has a slight degradation in retrieval performance compared with using instruction.
|
| 201 |
+
So you can generate embedding without instruction in all cases for convenience.
|
| 202 |
+
|
| 203 |
+
For a retrieval task that uses short queries to find long related documents,
|
| 204 |
+
it is recommended to add instructions for these short queries.
|
| 205 |
+
**The best method to decide whether to add instructions for queries is choosing the setting that achieves better performance on your task.**
|
| 206 |
+
In all cases, the documents/passages do not need to add the instruction.
|
| 207 |
+
|
| 208 |
+
</details>
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
## Usage
|
| 212 |
+
|
| 213 |
+
### Usage for Embedding Model
|
| 214 |
+
|
| 215 |
+
Here are some examples for using `bge` models with
|
| 216 |
+
[FlagEmbedding](#using-flagembedding), [Sentence-Transformers](#using-sentence-transformers), [Langchain](#using-langchain), or [Huggingface Transformers](#using-huggingface-transformers).
|
| 217 |
+
|
| 218 |
+
#### Using FlagEmbedding
|
| 219 |
+
```
|
| 220 |
+
pip install -U FlagEmbedding
|
| 221 |
+
```
|
| 222 |
+
If it doesn't work for you, you can see [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md) for more methods to install FlagEmbedding.
|
| 223 |
+
|
| 224 |
+
```python
|
| 225 |
+
from FlagEmbedding import FlagModel
|
| 226 |
+
sentences_1 = [""样例数据-1"", ""样例数据-2""]
|
| 227 |
+
sentences_2 = [""样例数据-3"", ""样例数据-4""]
|
| 228 |
+
model = FlagModel('BAAI/bge-large-zh-v1.5',
|
| 229 |
+
query_instruction_for_retrieval=""为这个句子生成表示以用于检索相关文章:"",
|
| 230 |
+
use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
| 231 |
+
embeddings_1 = model.encode(sentences_1)
|
| 232 |
+
embeddings_2 = model.encode(sentences_2)
|
| 233 |
+
similarity = embeddings_1 @ embeddings_2.T
|
| 234 |
+
print(similarity)
|
| 235 |
+
|
| 236 |
+
# for s2p(short query to long passage) retrieval task, suggest to use encode_queries() which will automatically add the instruction to each query
|
| 237 |
+
# corpus in retrieval task can still use encode() or encode_corpus(), since they don't need instruction
|
| 238 |
+
queries = ['query_1', 'query_2']
|
| 239 |
+
passages = [""样例文档-1"", ""样例文档-2""]
|
| 240 |
+
q_embeddings = model.encode_queries(queries)
|
| 241 |
+
p_embeddings = model.encode(passages)
|
| 242 |
+
scores = q_embeddings @ p_embeddings.T
|
| 243 |
+
```
|
| 244 |
+
For the value of the argument `query_instruction_for_retrieval`, see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list).
|
| 245 |
+
|
| 246 |
+
By default, FlagModel will use all available GPUs when encoding. Please set `os.environ[""CUDA_VISIBLE_DEVICES""]` to select specific GPUs.
|
| 247 |
+
You also can set `os.environ[""CUDA_VISIBLE_DEVICES""]=""""` to make all GPUs unavailable.
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
#### Using Sentence-Transformers
|
| 251 |
+
|
| 252 |
+
You can also use the `bge` models with [sentence-transformers](https://www.SBERT.net):
|
| 253 |
+
|
| 254 |
+
```
|
| 255 |
+
pip install -U sentence-transformers
|
| 256 |
+
```
|
| 257 |
+
```python
|
| 258 |
+
from sentence_transformers import SentenceTransformer
|
| 259 |
+
sentences_1 = [""样例数据-1"", ""样例数据-2""]
|
| 260 |
+
sentences_2 = [""样例数据-3"", ""样例数据-4""]
|
| 261 |
+
model = SentenceTransformer('BAAI/bge-large-zh-v1.5')
|
| 262 |
+
embeddings_1 = model.encode(sentences_1, normalize_embeddings=True)
|
| 263 |
+
embeddings_2 = model.encode(sentences_2, normalize_embeddings=True)
|
| 264 |
+
similarity = embeddings_1 @ embeddings_2.T
|
| 265 |
+
print(similarity)
|
| 266 |
+
```
|
| 267 |
+
For s2p(short query to long passage) retrieval task,
|
| 268 |
+
each short query should start with an instruction (instructions see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list)).
|
| 269 |
+
But the instruction is not needed for passages.
|
| 270 |
+
```python
|
| 271 |
+
from sentence_transformers import SentenceTransformer
|
| 272 |
+
queries = ['query_1', 'query_2']
|
| 273 |
+
passages = [""样例文档-1"", ""样例文档-2""]
|
| 274 |
+
instruction = ""为这个句子生成表示以用于检索相关文章:""
|
| 275 |
+
|
| 276 |
+
model = SentenceTransformer('BAAI/bge-large-zh-v1.5')
|
| 277 |
+
q_embeddings = model.encode([instruction+q for q in queries], normalize_embeddings=True)
|
| 278 |
+
p_embeddings = model.encode(passages, normalize_embeddings=True)
|
| 279 |
+
scores = q_embeddings @ p_embeddings.T
|
| 280 |
+
```
|
| 281 |
+
|
| 282 |
+
#### Using Langchain
|
| 283 |
+
|
| 284 |
+
You can use `bge` in langchain like this:
|
| 285 |
+
```python
|
| 286 |
+
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
| 287 |
+
model_name = ""BAAI/bge-large-en-v1.5""
|
| 288 |
+
model_kwargs = {'device': 'cuda'}
|
| 289 |
+
encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
|
| 290 |
+
model = HuggingFaceBgeEmbeddings(
|
| 291 |
+
model_name=model_name,
|
| 292 |
+
model_kwargs=model_kwargs,
|
| 293 |
+
encode_kwargs=encode_kwargs,
|
| 294 |
+
query_instruction=""为这个句子生成表示以用于检索相关文章:""
|
| 295 |
+
)
|
| 296 |
+
model.query_instruction = ""为这个句子生成表示以用于检索相关文章:""
|
| 297 |
+
```
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
#### Using HuggingFace Transformers
|
| 301 |
+
|
| 302 |
+
With the transformers package, you can use the model like this: First, you pass your input through the transformer model, then you select the last hidden state of the first token (i.e., [CLS]) as the sentence embedding.
|
| 303 |
+
|
| 304 |
+
```python
|
| 305 |
+
from transformers import AutoTokenizer, AutoModel
|
| 306 |
+
import torch
|
| 307 |
+
# Sentences we want sentence embeddings for
|
| 308 |
+
sentences = [""样例数据-1"", ""样例数据-2""]
|
| 309 |
+
|
| 310 |
+
# Load model from HuggingFace Hub
|
| 311 |
+
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5')
|
| 312 |
+
model = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5')
|
| 313 |
+
model.eval()
|
| 314 |
+
|
| 315 |
+
# Tokenize sentences
|
| 316 |
+
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
| 317 |
+
# for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages)
|
| 318 |
+
# encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt')
|
| 319 |
+
|
| 320 |
+
# Compute token embeddings
|
| 321 |
+
with torch.no_grad():
|
| 322 |
+
model_output = model(**encoded_input)
|
| 323 |
+
# Perform pooling. In this case, cls pooling.
|
| 324 |
+
sentence_embeddings = model_output[0][:, 0]
|
| 325 |
+
# normalize embeddings
|
| 326 |
+
sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1)
|
| 327 |
+
print(""Sentence embeddings:"", sentence_embeddings)
|
| 328 |
+
```
|
| 329 |
+
|
| 330 |
+
### Usage for Reranker
|
| 331 |
+
|
| 332 |
+
Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding.
|
| 333 |
+
You can get a relevance score by inputting query and passage to the reranker.
|
| 334 |
+
The reranker is optimized based cross-entropy loss, so the relevance score is not bounded to a specific range.
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
#### Using FlagEmbedding
|
| 338 |
+
```
|
| 339 |
+
pip install -U FlagEmbedding
|
| 340 |
+
```
|
| 341 |
+
|
| 342 |
+
Get relevance scores (higher scores indicate more relevance):
|
| 343 |
+
```python
|
| 344 |
+
from FlagEmbedding import FlagReranker
|
| 345 |
+
reranker = FlagReranker('BAAI/bge-reranker-large', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
| 346 |
+
|
| 347 |
+
score = reranker.compute_score(['query', 'passage'])
|
| 348 |
+
print(score)
|
| 349 |
+
|
| 350 |
+
scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']])
|
| 351 |
+
print(scores)
|
| 352 |
+
```
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
#### Using Huggingface transformers
|
| 356 |
+
|
| 357 |
+
```python
|
| 358 |
+
import torch
|
| 359 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 360 |
+
|
| 361 |
+
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-large')
|
| 362 |
+
model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-large')
|
| 363 |
+
model.eval()
|
| 364 |
+
|
| 365 |
+
pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]
|
| 366 |
+
with torch.no_grad():
|
| 367 |
+
inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
|
| 368 |
+
scores = model(**inputs, return_dict=True).logits.view(-1, ).float()
|
| 369 |
+
print(scores)
|
| 370 |
+
```
|
| 371 |
+
|
| 372 |
+
#### Usage reranker with the ONNX files
|
| 373 |
+
|
| 374 |
+
```python
|
| 375 |
+
from optimum.onnxruntime import ORTModelForSequenceClassification # type: ignore
|
| 376 |
+
|
| 377 |
+
import torch
|
| 378 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 379 |
+
|
| 380 |
+
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-large')
|
| 381 |
+
model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-base')
|
| 382 |
+
model_ort = ORTModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-base', file_name=""onnx/model.onnx"")
|
| 383 |
+
|
| 384 |
+
# Sentences we want sentence embeddings for
|
| 385 |
+
pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]
|
| 386 |
+
|
| 387 |
+
# Tokenize sentences
|
| 388 |
+
encoded_input = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt')
|
| 389 |
+
|
| 390 |
+
scores_ort = model_ort(**encoded_input, return_dict=True).logits.view(-1, ).float()
|
| 391 |
+
# Compute token embeddings
|
| 392 |
+
with torch.inference_mode():
|
| 393 |
+
scores = model_ort(**encoded_input, return_dict=True).logits.view(-1, ).float()
|
| 394 |
+
|
| 395 |
+
# scores and scores_ort are identical
|
| 396 |
+
```
|
| 397 |
+
#### Usage reranker with infinity
|
| 398 |
+
|
| 399 |
+
Its also possible to deploy the onnx/torch files with the [infinity_emb](https://github.com/michaelfeil/infinity) pip package.
|
| 400 |
+
```python
|
| 401 |
+
import asyncio
|
| 402 |
+
from infinity_emb import AsyncEmbeddingEngine, EngineArgs
|
| 403 |
+
|
| 404 |
+
query='what is a panda?'
|
| 405 |
+
docs = ['The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear', ""Paris is in France.""]
|
| 406 |
+
|
| 407 |
+
engine = AsyncEmbeddingEngine.from_args(
|
| 408 |
+
EngineArgs(model_name_or_path = ""BAAI/bge-reranker-base"", device=""cpu"", engine=""torch"" # or engine=""optimum"" for onnx
|
| 409 |
+
))
|
| 410 |
+
|
| 411 |
+
async def main():
|
| 412 |
+
async with engine:
|
| 413 |
+
ranking, usage = await engine.rerank(query=query, docs=docs)
|
| 414 |
+
print(list(zip(ranking, docs)))
|
| 415 |
+
asyncio.run(main())
|
| 416 |
+
```
|
| 417 |
+
|
| 418 |
+
## Evaluation
|
| 419 |
+
|
| 420 |
+
`baai-general-embedding` models achieve **state-of-the-art performance on both MTEB and C-MTEB leaderboard!**
|
| 421 |
+
For more details and evaluation tools see our [scripts](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md).
|
| 422 |
+
|
| 423 |
+
- **MTEB**:
|
| 424 |
+
|
| 425 |
+
| Model Name | Dimension | Sequence Length | Average (56) | Retrieval (15) |Clustering (11) | Pair Classification (3) | Reranking (4) | STS (10) | Summarization (1) | Classification (12) |
|
| 426 |
+
|:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
| 427 |
+
| [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | 1024 | 512 | **64.23** | **54.29** | 46.08 | 87.12 | 60.03 | 83.11 | 31.61 | 75.97 |
|
| 428 |
+
| [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | 768 | 512 | 63.55 | 53.25 | 45.77 | 86.55 | 58.86 | 82.4 | 31.07 | 75.53 |
|
| 429 |
+
| [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | 384 | 512 | 62.17 |51.68 | 43.82 | 84.92 | 58.36 | 81.59 | 30.12 | 74.14 |
|
| 430 |
+
| [bge-large-en](https://huggingface.co/BAAI/bge-large-en) | 1024 | 512 | 63.98 | 53.9 | 46.98 | 85.8 | 59.48 | 81.56 | 32.06 | 76.21 |
|
| 431 |
+
| [bge-base-en](https://huggingface.co/BAAI/bge-base-en) | 768 | 512 | 63.36 | 53.0 | 46.32 | 85.86 | 58.7 | 81.84 | 29.27 | 75.27 |
|
| 432 |
+
| [gte-large](https://huggingface.co/thenlper/gte-large) | 1024 | 512 | 63.13 | 52.22 | 46.84 | 85.00 | 59.13 | 83.35 | 31.66 | 73.33 |
|
| 433 |
+
| [gte-base](https://huggingface.co/thenlper/gte-base) | 768 | 512 | 62.39 | 51.14 | 46.2 | 84.57 | 58.61 | 82.3 | 31.17 | 73.01 |
|
| 434 |
+
| [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1024| 512 | 62.25 | 50.56 | 44.49 | 86.03 | 56.61 | 82.05 | 30.19 | 75.24 |
|
| 435 |
+
| [bge-small-en](https://huggingface.co/BAAI/bge-small-en) | 384 | 512 | 62.11 | 51.82 | 44.31 | 83.78 | 57.97 | 80.72 | 30.53 | 74.37 |
|
| 436 |
+
| [instructor-xl](https://huggingface.co/hkunlp/instructor-xl) | 768 | 512 | 61.79 | 49.26 | 44.74 | 86.62 | 57.29 | 83.06 | 32.32 | 61.79 |
|
| 437 |
+
| [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 768 | 512 | 61.5 | 50.29 | 43.80 | 85.73 | 55.91 | 81.05 | 30.28 | 73.84 |
|
| 438 |
+
| [gte-small](https://huggingface.co/thenlper/gte-small) | 384 | 512 | 61.36 | 49.46 | 44.89 | 83.54 | 57.7 | 82.07 | 30.42 | 72.31 |
|
| 439 |
+
| [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | 1536 | 8192 | 60.99 | 49.25 | 45.9 | 84.89 | 56.32 | 80.97 | 30.8 | 70.93 |
|
| 440 |
+
| [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 384 | 512 | 59.93 | 49.04 | 39.92 | 84.67 | 54.32 | 80.39 | 31.16 | 72.94 |
|
| 441 |
+
| [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 768 | 512 | 59.51 | 42.24 | 43.72 | 85.06 | 56.42 | 82.63 | 30.08 | 73.42 |
|
| 442 |
+
| [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 768 | 514 | 57.78 | 43.81 | 43.69 | 83.04 | 59.36 | 80.28 | 27.49 | 65.07 |
|
| 443 |
+
| [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) | 4096 | 2048 | 57.59 | 48.22 | 38.93 | 81.9 | 55.65 | 77.74 | 33.6 | 66.19 |
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
- **C-MTEB**:
|
| 448 |
+
We create the benchmark C-MTEB for Chinese text embedding which consists of 31 datasets from 6 tasks.
|
| 449 |
+
Please refer to [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md) for a detailed introduction.
|
| 450 |
+
|
| 451 |
+
| Model | Embedding dimension | Avg | Retrieval | STS | PairClassification | Classification | Reranking | Clustering |
|
| 452 |
+
|:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|
|
| 453 |
+
| [**BAAI/bge-large-zh-v1.5**](https://huggingface.co/BAAI/bge-large-zh-v1.5) | 1024 | **64.53** | 70.46 | 56.25 | 81.6 | 69.13 | 65.84 | 48.99 |
|
| 454 |
+
| [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | 768 | 63.13 | 69.49 | 53.72 | 79.75 | 68.07 | 65.39 | 47.53 |
|
| 455 |
+
| [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | 512 | 57.82 | 61.77 | 49.11 | 70.41 | 63.96 | 60.92 | 44.18 |
|
| 456 |
+
| [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | 1024 | 64.20 | 71.53 | 54.98 | 78.94 | 68.32 | 65.11 | 48.39 |
|
| 457 |
+
| [bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | 1024 | 63.53 | 70.55 | 53 | 76.77 | 68.58 | 64.91 | 50.01 |
|
| 458 |
+
| [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | 768 | 62.96 | 69.53 | 54.12 | 77.5 | 67.07 | 64.91 | 47.63 |
|
| 459 |
+
| [multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) | 1024 | 58.79 | 63.66 | 48.44 | 69.89 | 67.34 | 56.00 | 48.23 |
|
| 460 |
+
| [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | 512 | 58.27 | 63.07 | 49.45 | 70.35 | 63.64 | 61.48 | 45.09 |
|
| 461 |
+
| [m3e-base](https://huggingface.co/moka-ai/m3e-base) | 768 | 57.10 | 56.91 | 50.47 | 63.99 | 67.52 | 59.34 | 47.68 |
|
| 462 |
+
| [m3e-large](https://huggingface.co/moka-ai/m3e-large) | 1024 | 57.05 | 54.75 | 50.42 | 64.3 | 68.2 | 59.66 | 48.88 |
|
| 463 |
+
| [multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) | 768 | 55.48 | 61.63 | 46.49 | 67.07 | 65.35 | 54.35 | 40.68 |
|
| 464 |
+
| [multilingual-e5-small](https://huggingface.co/intfloat/multilingual-e5-small) | 384 | 55.38 | 59.95 | 45.27 | 66.45 | 65.85 | 53.86 | 45.26 |
|
| 465 |
+
| [text-embedding-ada-002(OpenAI)](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) | 1536 | 53.02 | 52.0 | 43.35 | 69.56 | 64.31 | 54.28 | 45.68 |
|
| 466 |
+
| [luotuo](https://huggingface.co/silk-road/luotuo-bert-medium) | 1024 | 49.37 | 44.4 | 42.78 | 66.62 | 61 | 49.25 | 44.39 |
|
| 467 |
+
| [text2vec-base](https://huggingface.co/shibing624/text2vec-base-chinese) | 768 | 47.63 | 38.79 | 43.41 | 67.41 | 62.19 | 49.45 | 37.66 |
|
| 468 |
+
| [text2vec-large](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 1024 | 47.36 | 41.94 | 44.97 | 70.86 | 60.66 | 49.16 | 30.02 |
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
- **Reranking**:
|
| 472 |
+
See [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/) for evaluation script.
|
| 473 |
+
|
| 474 |
+
| Model | T2Reranking | T2RerankingZh2En\* | T2RerankingEn2Zh\* | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg |
|
| 475 |
+
|:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|
|
| 476 |
+
| text2vec-base-multilingual | 64.66 | 62.94 | 62.51 | 14.37 | 48.46 | 48.6 | 50.26 |
|
| 477 |
+
| multilingual-e5-small | 65.62 | 60.94 | 56.41 | 29.91 | 67.26 | 66.54 | 57.78 |
|
| 478 |
+
| multilingual-e5-large | 64.55 | 61.61 | 54.28 | 28.6 | 67.42 | 67.92 | 57.4 |
|
| 479 |
+
| multilingual-e5-base | 64.21 | 62.13 | 54.68 | 29.5 | 66.23 | 66.98 | 57.29 |
|
| 480 |
+
| m3e-base | 66.03 | 62.74 | 56.07 | 17.51 | 77.05 | 76.76 | 59.36 |
|
| 481 |
+
| m3e-large | 66.13 | 62.72 | 56.1 | 16.46 | 77.76 | 78.27 | 59.57 |
|
| 482 |
+
| bge-base-zh-v1.5 | 66.49 | 63.25 | 57.02 | 29.74 | 80.47 | 84.88 | 63.64 |
|
| 483 |
+
| bge-large-zh-v1.5 | 65.74 | 63.39 | 57.03 | 28.74 | 83.45 | 85.44 | 63.97 |
|
| 484 |
+
| [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | 67.28 | 63.95 | 60.45 | 35.46 | 81.26 | 84.1 | 65.42 |
|
| 485 |
+
| [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | 67.6 | 64.03 | 61.44 | 37.16 | 82.15 | 84.18 | 66.09 |
|
| 486 |
+
|
| 487 |
+
\* : T2RerankingZh2En and T2RerankingEn2Zh are cross-language retrieval tasks
|
| 488 |
+
|
| 489 |
+
## Train
|
| 490 |
+
|
| 491 |
+
### BAAI Embedding
|
| 492 |
+
|
| 493 |
+
We pre-train the models using [retromae](https://github.com/staoxiao/RetroMAE) and train them on large-scale pairs data using contrastive learning.
|
| 494 |
+
**You can fine-tune the embedding model on your data following our [examples](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune).**
|
| 495 |
+
We also provide a [pre-train example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/pretrain).
|
| 496 |
+
Note that the goal of pre-training is to reconstruct the text, and the pre-trained model cannot be used for similarity calculation directly, it needs to be fine-tuned.
|
| 497 |
+
More training details for bge see [baai_general_embedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md).
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
### BGE Reranker
|
| 502 |
+
|
| 503 |
+
Cross-encoder will perform full-attention over the input pair,
|
| 504 |
+
which is more accurate than embedding model (i.e., bi-encoder) but more time-consuming than embedding model.
|
| 505 |
+
Therefore, it can be used to re-rank the top-k documents returned by embedding model.
|
| 506 |
+
We train the cross-encoder on a multilingual pair data,
|
| 507 |
+
The data format is the same as embedding model, so you can fine-tune it easily following our [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker).
|
| 508 |
+
More details please refer to [./FlagEmbedding/reranker/README.md](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
## Citation
|
| 513 |
+
|
| 514 |
+
If you find this repository useful, please consider giving a star :star: and citation
|
| 515 |
+
|
| 516 |
+
```
|
| 517 |
+
@misc{bge_embedding,
|
| 518 |
+
title={C-Pack: Packaged Resources To Advance General Chinese Embedding},
|
| 519 |
+
author={Shitao Xiao and Zheng Liu and Peitian Zhang and Niklas Muennighoff},
|
| 520 |
+
year={2023},
|
| 521 |
+
eprint={2309.07597},
|
| 522 |
+
archivePrefix={arXiv},
|
| 523 |
+
primaryClass={cs.CL}
|
| 524 |
+
}
|
| 525 |
+
```
|
| 526 |
+
|
| 527 |
+
## License
|
| 528 |
+
FlagEmbedding is licensed under the [MIT License](https://github.com/FlagOpen/FlagEmbedding/blob/master/LICENSE). The released models can be used for commercial purposes free of charge.","{""id"": ""BAAI/bge-reranker-large"", ""author"": ""BAAI"", ""sha"": ""55611d7bca2a7133960a6d3b71e083071bbfc312"", ""last_modified"": ""2024-05-11 13:39:02+00:00"", ""created_at"": ""2023-09-12 07:39:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 467495, ""downloads_all_time"": null, ""likes"": 396, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""onnx"", ""safetensors"", ""xlm-roberta"", ""text-classification"", ""mteb"", ""feature-extraction"", ""en"", ""zh"", ""arxiv:2401.03462"", ""arxiv:2312.15503"", ""arxiv:2311.13534"", ""arxiv:2310.07554"", ""arxiv:2309.07597"", ""license:mit"", ""model-index"", ""autotrain_compatible"", ""text-embeddings-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""feature-extraction"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\nlicense: mit\npipeline_tag: feature-extraction\ntags:\n- mteb\nmodel-index:\n- name: bge-reranker-base\n results:\n - task:\n type: Reranking\n dataset:\n name: MTEB CMedQAv1\n type: C-MTEB/CMedQAv1-reranking\n config: default\n split: test\n revision: None\n metrics:\n - type: map\n value: 81.27206722525007\n verified: false\n - type: mrr\n value: 84.14238095238095\n verified: false\n - task:\n type: Reranking\n dataset:\n name: MTEB CMedQAv2\n type: C-MTEB/CMedQAv2-reranking\n config: default\n split: test\n revision: None\n metrics:\n - type: map\n value: 84.10369934291236\n verified: false\n - type: mrr\n value: 86.79376984126984\n verified: false\n - task:\n type: Reranking\n dataset:\n name: MTEB MMarcoReranking\n type: C-MTEB/Mmarco-reranking\n config: default\n split: dev\n revision: None\n metrics:\n - type: map\n value: 35.4600511272538\n verified: false\n - type: mrr\n value: 34.60238095238095\n verified: false\n - task:\n type: Reranking\n dataset:\n name: MTEB T2Reranking\n type: C-MTEB/T2Reranking\n config: default\n split: dev\n revision: None\n metrics:\n - type: map\n value: 67.27728847727172\n verified: false\n - type: mrr\n value: 77.1315192743764\n verified: false"", ""widget_data"": null, ""model_index"": [{""name"": ""bge-reranker-base"", ""results"": [{""task"": {""type"": ""Reranking""}, ""dataset"": {""type"": ""C-MTEB/CMedQAv1-reranking"", ""name"": ""MTEB CMedQAv1"", ""config"": ""default"", ""split"": ""test"", ""revision"": ""None""}, ""metrics"": [{""type"": ""map"", ""value"": 81.27206722525007, ""verified"": false}, {""type"": ""mrr"", ""value"": 84.14238095238095, ""verified"": false}]}, {""task"": {""type"": ""Reranking""}, ""dataset"": {""type"": ""C-MTEB/CMedQAv2-reranking"", ""name"": ""MTEB CMedQAv2"", ""config"": ""default"", ""split"": ""test"", ""revision"": ""None""}, ""metrics"": [{""type"": ""map"", ""value"": 84.10369934291236, ""verified"": false}, {""type"": ""mrr"", ""value"": 86.79376984126984, ""verified"": false}]}, {""task"": {""type"": ""Reranking""}, ""dataset"": {""type"": ""C-MTEB/Mmarco-reranking"", ""name"": ""MTEB MMarcoReranking"", ""config"": ""default"", ""split"": ""dev"", ""revision"": ""None""}, ""metrics"": [{""type"": ""map"", ""value"": 35.4600511272538, ""verified"": false}, {""type"": ""mrr"", ""value"": 34.60238095238095, ""verified"": false}]}, {""task"": {""type"": ""Reranking""}, ""dataset"": {""type"": ""C-MTEB/T2Reranking"", ""name"": ""MTEB T2Reranking"", ""config"": ""default"", ""split"": ""dev"", ""revision"": ""None""}, ""metrics"": [{""type"": ""map"", ""value"": 67.27728847727172, ""verified"": false}, {""type"": ""mrr"", ""value"": 77.1315192743764, ""verified"": false}]}]}], ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": {""__type"": ""AddedToken"", ""content"": ""<mask>"", ""lstrip"": true, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model.onnx_data', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentencepiece.bpe.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""mteb/leaderboard"", ""mteb/leaderboard_legacy"", ""lightmate/llm-chatbot"", ""Thun09/leaderboard_demo"", ""sambanovasystems/enterprise_knowledge_retriever"", ""Zulelee/langchain-chatchat"", ""rk68/HyPA-RAG"", ""malvika2003/openvino_notebooks"", ""LISA-Kadi/LISA-demo"", ""CjangCjengh/Prompt-Compression-Toolbox"", ""gauravprasadgp/genai"", ""JerryLiJinyi/Prompt-Compression-Toolbox"", ""z00mP/Simple-RAG-solution"", ""LevGervich/rag_time"", ""acrobatlm/ai_bootcamp_midterm"", ""lillybak/NVIDIA-RAG"", ""waleko/rag-transformers"", ""egoz/myrag"", ""berliozmeister/study"", ""JoseAntonioBarrancoBernabe/TFMUOC"", ""ApInvent/hw-gradio-ui"", ""kartavya23/GitChat"", ""anthopit/octovision-rag"", ""ZoniaChatbot/ZoniaQwen"", ""JiakaiDu/RAG_Test"", ""anikettty/blackbox"", ""holistic-ai/HyPA-RAG"", ""MaksymOrlianskyi/NLP6"", ""DenysPetro/RAG_NLP"", ""sq66/leaderboard_legacy"", ""tanbushi/reranker"", ""zhuhai111/Toursim-Test"", ""SmileXing/leaderboard"", ""q275343119/leaderboard""], ""safetensors"": {""parameters"": {""I64"": 514, ""F32"": 559891457}, ""total"": 559891971}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-11 13:39:02+00:00"", ""cardData"": ""language:\n- en\n- zh\nlicense: mit\npipeline_tag: feature-extraction\ntags:\n- mteb\nmodel-index:\n- name: bge-reranker-base\n results:\n - task:\n type: Reranking\n dataset:\n name: MTEB CMedQAv1\n type: C-MTEB/CMedQAv1-reranking\n config: default\n split: test\n revision: None\n metrics:\n - type: map\n value: 81.27206722525007\n verified: false\n - type: mrr\n value: 84.14238095238095\n verified: false\n - task:\n type: Reranking\n dataset:\n name: MTEB CMedQAv2\n type: C-MTEB/CMedQAv2-reranking\n config: default\n split: test\n revision: None\n metrics:\n - type: map\n value: 84.10369934291236\n verified: false\n - type: mrr\n value: 86.79376984126984\n verified: false\n - task:\n type: Reranking\n dataset:\n name: MTEB MMarcoReranking\n type: C-MTEB/Mmarco-reranking\n config: default\n split: dev\n revision: None\n metrics:\n - type: map\n value: 35.4600511272538\n verified: false\n - type: mrr\n value: 34.60238095238095\n verified: false\n - task:\n type: Reranking\n dataset:\n name: MTEB T2Reranking\n type: C-MTEB/T2Reranking\n config: default\n split: dev\n revision: None\n metrics:\n - type: map\n value: 67.27728847727172\n verified: false\n - type: mrr\n value: 77.1315192743764\n verified: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""650015a612c1442d9930ad41"", ""modelId"": ""BAAI/bge-reranker-large"", ""usedStorage"": 7321697580}",0,"https://huggingface.co/OpenSciLM/OpenScholar_Reranker, https://huggingface.co/truthsystems/legal-bge-reranker-large",2,,0,"https://huggingface.co/Xenova/bge-reranker-large, https://huggingface.co/DrRos/bge-reranker-large-Q4_K_M-GGUF, https://huggingface.co/kongfly/bge-reranker-large-Q8_0-GGUF, https://huggingface.co/Astralyra/bge-reranker-large-Q8_0-GGUF, https://huggingface.co/badger212/bge-reranker-large-Q8_0-GGUF, https://huggingface.co/felixzyx/bge-reranker-large-Q8_0-GGUF, https://huggingface.co/hedao/bge-reranker-large-Q4_K_M-GGUF, https://huggingface.co/Jack168/bge-reranker-large-Q8_0-GGUF, https://huggingface.co/pyarn/bge-reranker-large-Q8_0-GGUF",9,,0,"CjangCjengh/Prompt-Compression-Toolbox, LISA-Kadi/LISA-demo, SmileXing/leaderboard, Thun09/leaderboard_demo, Zulelee/langchain-chatchat, huggingface/InferenceSupport/discussions/new?title=BAAI/bge-reranker-large&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BBAAI%2Fbge-reranker-large%5D(%2FBAAI%2Fbge-reranker-large)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, lightmate/llm-chatbot, mteb/leaderboard, mteb/leaderboard_legacy, q275343119/leaderboard, sambanovasystems/enterprise_knowledge_retriever, sq66/leaderboard_legacy, zhuhai111/Toursim-Test",13
|
| 529 |
+
OpenSciLM/OpenScholar_Reranker,"---
|
| 530 |
+
license: mit
|
| 531 |
+
language:
|
| 532 |
+
- en
|
| 533 |
+
base_model:
|
| 534 |
+
- BAAI/bge-reranker-large
|
| 535 |
+
---
|
| 536 |
+
|
| 537 |
+
OpenScholar_Reranker is a fine-tuned version of [bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) for scientific literature synthesis.
|
| 538 |
+
|
| 539 |
+
### Model Description
|
| 540 |
+
|
| 541 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 542 |
+
|
| 543 |
+
- **Developed by:** University of Washigton, Allen Institute for AI (AI2)
|
| 544 |
+
- **Model type:** a masked language model.
|
| 545 |
+
- **Language(s) (NLP):** English
|
| 546 |
+
- **License:** The code and model are released under apache-2.0.
|
| 547 |
+
- **Date cutoff:** The fine-tuning data is generated by Llama 3 70B for synthetically generated queries.
|
| 548 |
+
|
| 549 |
+
### Model Sources
|
| 550 |
+
|
| 551 |
+
<!-- Provide the basic links for the model. -->
|
| 552 |
+
|
| 553 |
+
- **Project Page:** https://open-scholar.allen.ai/
|
| 554 |
+
- **Repositories:**
|
| 555 |
+
- Core repo (training, inference, fine-tuning etc.): https://github.com/AkariAsai/OpenScholar
|
| 556 |
+
- Evaluation code: https://github.com/AkariAsai/ScholarQABench
|
| 557 |
+
- **Paper:** [Link](https://openscholar.allen.ai/paper)
|
| 558 |
+
- **Technical blog post:** https://allenai.org/blog/openscholar
|
| 559 |
+
<!-- - **Press release:** TODO -->
|
| 560 |
+
|
| 561 |
+
### Citation
|
| 562 |
+
If you find it useful in this work, cite our paper.
|
| 563 |
+
|
| 564 |
+
```
|
| 565 |
+
@article{openscholar,
|
| 566 |
+
title={{OpenScholar}: Synthesizing Scientific Literature with Retrieval-Augmented Language Models},
|
| 567 |
+
author={ Asai, Akari and He*, Jacqueline and Shao*, Rulin and Shi, Weijia and Singh, Amanpreet and Chang, Joseph Chee and Lo, Kyle and Soldaini, Luca and Feldman, Tian, Sergey and Mike, D’arcy and Wadden, David and Latzke, Matt and Minyang and Ji, Pan and Liu, Shengyan and Tong, Hao and Wu, Bohao and Xiong, Yanyu and Zettlemoyer, Luke and Weld, Dan and Neubig, Graham and Downey, Doug and Yih, Wen-tau and Koh, Pang Wei and Hajishirzi, Hannaneh},
|
| 568 |
+
journal={Arxiv},
|
| 569 |
+
year={2024},
|
| 570 |
+
}
|
| 571 |
+
```
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
","{""id"": ""OpenSciLM/OpenScholar_Reranker"", ""author"": ""OpenSciLM"", ""sha"": ""f371d9d9a5c47d5b89dfa130d8d6fa303f27126e"", ""last_modified"": ""2024-11-19 11:24:33+00:00"", ""created_at"": ""2024-11-15 15:35:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7461, ""downloads_all_time"": null, ""likes"": 7, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""xlm-roberta"", ""en"", ""base_model:BAAI/bge-reranker-large"", ""base_model:finetune:BAAI/bge-reranker-large"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model:\n- BAAI/bge-reranker-large\nlanguage:\n- en\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentencepiece.bpe.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 559891457}, ""total"": 559891457}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-19 11:24:33+00:00"", ""cardData"": ""base_model:\n- BAAI/bge-reranker-large\nlanguage:\n- en\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""67376a493f07d166d29ed1dd"", ""modelId"": ""OpenSciLM/OpenScholar_Reranker"", ""usedStorage"": 2244688735}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=OpenSciLM/OpenScholar_Reranker&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BOpenSciLM%2FOpenScholar_Reranker%5D(%2FOpenSciLM%2FOpenScholar_Reranker)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 575 |
+
truthsystems/legal-bge-reranker-large,"---
|
| 576 |
+
license: mit
|
| 577 |
+
language:
|
| 578 |
+
- en
|
| 579 |
+
base_model: BAAI/bge-reranker-large
|
| 580 |
+
---","{""id"": ""truthsystems/legal-bge-reranker-large"", ""author"": ""truthsystems"", ""sha"": ""f0fff5002053f84e670d0c3dca9f625418340a37"", ""last_modified"": ""2024-09-05 10:56:15+00:00"", ""created_at"": ""2024-09-05 10:26:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 75, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""xlm-roberta"", ""en"", ""base_model:BAAI/bge-reranker-large"", ""base_model:finetune:BAAI/bge-reranker-large"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: BAAI/bge-reranker-large\nlanguage:\n- en\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentencepiece.bpe.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 559891457}, ""total"": 559891457}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-05 10:56:15+00:00"", ""cardData"": ""base_model: BAAI/bge-reranker-large\nlanguage:\n- en\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""66d9877087fc590d9c7b471e"", ""modelId"": ""truthsystems/legal-bge-reranker-large"", ""usedStorage"": 2244688799}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=truthsystems/legal-bge-reranker-large&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btruthsystems%2Flegal-bge-reranker-large%5D(%2Ftruthsystems%2Flegal-bge-reranker-large)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
bge-reranker-v2-m3_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
ADDED
|
@@ -0,0 +1,800 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
BAAI/bge-reranker-v2-m3,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
pipeline_tag: text-classification
|
| 5 |
+
tags:
|
| 6 |
+
- transformers
|
| 7 |
+
- sentence-transformers
|
| 8 |
+
- text-embeddings-inference
|
| 9 |
+
language:
|
| 10 |
+
- multilingual
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
# Reranker
|
| 14 |
+
|
| 15 |
+
**More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/tree/master).**
|
| 16 |
+
|
| 17 |
+
- [Model List](#model-list)
|
| 18 |
+
- [Usage](#usage)
|
| 19 |
+
- [Fine-tuning](#fine-tune)
|
| 20 |
+
- [Evaluation](#evaluation)
|
| 21 |
+
- [Citation](#citation)
|
| 22 |
+
|
| 23 |
+
Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding.
|
| 24 |
+
You can get a relevance score by inputting query and passage to the reranker.
|
| 25 |
+
And the score can be mapped to a float value in [0,1] by sigmoid function.
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## Model List
|
| 29 |
+
|
| 30 |
+
| Model | Base model | Language | layerwise | feature |
|
| 31 |
+
|:--------------------------------------------------------------------------|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------:|
|
| 32 |
+
| [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) | Chinese and English | - | Lightweight reranker model, easy to deploy, with fast inference. |
|
| 33 |
+
| [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | [xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) | Chinese and English | - | Lightweight reranker model, easy to deploy, with fast inference. |
|
| 34 |
+
| [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) | [bge-m3](https://huggingface.co/BAAI/bge-m3) | Multilingual | - | Lightweight reranker model, possesses strong multilingual capabilities, easy to deploy, with fast inference. |
|
| 35 |
+
| [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma) | [gemma-2b](https://huggingface.co/google/gemma-2b) | Multilingual | - | Suitable for multilingual contexts, performs well in both English proficiency and multilingual capabilities. |
|
| 36 |
+
| [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise) | [MiniCPM-2B-dpo-bf16](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16) | Multilingual | 8-40 | Suitable for multilingual contexts, performs well in both English and Chinese proficiency, allows freedom to select layers for output, facilitating accelerated inference. |
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
You can select the model according your senario and resource.
|
| 40 |
+
- For **multilingual**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma)
|
| 41 |
+
|
| 42 |
+
- For **Chinese or English**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise).
|
| 43 |
+
|
| 44 |
+
- For **efficiency**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and the low layer of [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise).
|
| 45 |
+
|
| 46 |
+
- For better performance, recommand [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise) and [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma)
|
| 47 |
+
|
| 48 |
+
## Usage
|
| 49 |
+
### Using FlagEmbedding
|
| 50 |
+
|
| 51 |
+
```
|
| 52 |
+
pip install -U FlagEmbedding
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
#### For normal reranker (bge-reranker-base / bge-reranker-large / bge-reranker-v2-m3 )
|
| 56 |
+
|
| 57 |
+
Get relevance scores (higher scores indicate more relevance):
|
| 58 |
+
|
| 59 |
+
```python
|
| 60 |
+
from FlagEmbedding import FlagReranker
|
| 61 |
+
reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
| 62 |
+
|
| 63 |
+
score = reranker.compute_score(['query', 'passage'])
|
| 64 |
+
print(score) # -5.65234375
|
| 65 |
+
|
| 66 |
+
# You can map the scores into 0-1 by set ""normalize=True"", which will apply sigmoid function to the score
|
| 67 |
+
score = reranker.compute_score(['query', 'passage'], normalize=True)
|
| 68 |
+
print(score) # 0.003497010252573502
|
| 69 |
+
|
| 70 |
+
scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']])
|
| 71 |
+
print(scores) # [-8.1875, 5.26171875]
|
| 72 |
+
|
| 73 |
+
# You can map the scores into 0-1 by set ""normalize=True"", which will apply sigmoid function to the score
|
| 74 |
+
scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']], normalize=True)
|
| 75 |
+
print(scores) # [0.00027803096387751553, 0.9948403768236574]
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
#### For LLM-based reranker
|
| 79 |
+
|
| 80 |
+
```python
|
| 81 |
+
from FlagEmbedding import FlagLLMReranker
|
| 82 |
+
reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
| 83 |
+
# reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation
|
| 84 |
+
|
| 85 |
+
score = reranker.compute_score(['query', 'passage'])
|
| 86 |
+
print(score)
|
| 87 |
+
|
| 88 |
+
scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']])
|
| 89 |
+
print(scores)
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
#### For LLM-based layerwise reranker
|
| 93 |
+
|
| 94 |
+
```python
|
| 95 |
+
from FlagEmbedding import LayerWiseFlagLLMReranker
|
| 96 |
+
reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
| 97 |
+
# reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation
|
| 98 |
+
|
| 99 |
+
score = reranker.compute_score(['query', 'passage'], cutoff_layers=[28]) # Adjusting 'cutoff_layers' to pick which layers are used for computing the score.
|
| 100 |
+
print(score)
|
| 101 |
+
|
| 102 |
+
scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']], cutoff_layers=[28])
|
| 103 |
+
print(scores)
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
### Using Huggingface transformers
|
| 107 |
+
|
| 108 |
+
#### For normal reranker (bge-reranker-base / bge-reranker-large / bge-reranker-v2-m3 )
|
| 109 |
+
|
| 110 |
+
Get relevance scores (higher scores indicate more relevance):
|
| 111 |
+
|
| 112 |
+
```python
|
| 113 |
+
import torch
|
| 114 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 115 |
+
|
| 116 |
+
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-m3')
|
| 117 |
+
model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-v2-m3')
|
| 118 |
+
model.eval()
|
| 119 |
+
|
| 120 |
+
pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]
|
| 121 |
+
with torch.no_grad():
|
| 122 |
+
inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
|
| 123 |
+
scores = model(**inputs, return_dict=True).logits.view(-1, ).float()
|
| 124 |
+
print(scores)
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
#### For LLM-based reranker
|
| 128 |
+
|
| 129 |
+
```python
|
| 130 |
+
import torch
|
| 131 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 132 |
+
|
| 133 |
+
def get_inputs(pairs, tokenizer, prompt=None, max_length=1024):
|
| 134 |
+
if prompt is None:
|
| 135 |
+
prompt = ""Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'.""
|
| 136 |
+
sep = ""\n""
|
| 137 |
+
prompt_inputs = tokenizer(prompt,
|
| 138 |
+
return_tensors=None,
|
| 139 |
+
add_special_tokens=False)['input_ids']
|
| 140 |
+
sep_inputs = tokenizer(sep,
|
| 141 |
+
return_tensors=None,
|
| 142 |
+
add_special_tokens=False)['input_ids']
|
| 143 |
+
inputs = []
|
| 144 |
+
for query, passage in pairs:
|
| 145 |
+
query_inputs = tokenizer(f'A: {query}',
|
| 146 |
+
return_tensors=None,
|
| 147 |
+
add_special_tokens=False,
|
| 148 |
+
max_length=max_length * 3 // 4,
|
| 149 |
+
truncation=True)
|
| 150 |
+
passage_inputs = tokenizer(f'B: {passage}',
|
| 151 |
+
return_tensors=None,
|
| 152 |
+
add_special_tokens=False,
|
| 153 |
+
max_length=max_length,
|
| 154 |
+
truncation=True)
|
| 155 |
+
item = tokenizer.prepare_for_model(
|
| 156 |
+
[tokenizer.bos_token_id] + query_inputs['input_ids'],
|
| 157 |
+
sep_inputs + passage_inputs['input_ids'],
|
| 158 |
+
truncation='only_second',
|
| 159 |
+
max_length=max_length,
|
| 160 |
+
padding=False,
|
| 161 |
+
return_attention_mask=False,
|
| 162 |
+
return_token_type_ids=False,
|
| 163 |
+
add_special_tokens=False
|
| 164 |
+
)
|
| 165 |
+
item['input_ids'] = item['input_ids'] + sep_inputs + prompt_inputs
|
| 166 |
+
item['attention_mask'] = [1] * len(item['input_ids'])
|
| 167 |
+
inputs.append(item)
|
| 168 |
+
return tokenizer.pad(
|
| 169 |
+
inputs,
|
| 170 |
+
padding=True,
|
| 171 |
+
max_length=max_length + len(sep_inputs) + len(prompt_inputs),
|
| 172 |
+
pad_to_multiple_of=8,
|
| 173 |
+
return_tensors='pt',
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-gemma')
|
| 177 |
+
model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-gemma')
|
| 178 |
+
yes_loc = tokenizer('Yes', add_special_tokens=False)['input_ids'][0]
|
| 179 |
+
model.eval()
|
| 180 |
+
|
| 181 |
+
pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]
|
| 182 |
+
with torch.no_grad():
|
| 183 |
+
inputs = get_inputs(pairs, tokenizer)
|
| 184 |
+
scores = model(**inputs, return_dict=True).logits[:, -1, yes_loc].view(-1, ).float()
|
| 185 |
+
print(scores)
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
#### For LLM-based layerwise reranker
|
| 189 |
+
|
| 190 |
+
```python
|
| 191 |
+
import torch
|
| 192 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 193 |
+
|
| 194 |
+
def get_inputs(pairs, tokenizer, prompt=None, max_length=1024):
|
| 195 |
+
if prompt is None:
|
| 196 |
+
prompt = ""Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'.""
|
| 197 |
+
sep = ""\n""
|
| 198 |
+
prompt_inputs = tokenizer(prompt,
|
| 199 |
+
return_tensors=None,
|
| 200 |
+
add_special_tokens=False)['input_ids']
|
| 201 |
+
sep_inputs = tokenizer(sep,
|
| 202 |
+
return_tensors=None,
|
| 203 |
+
add_special_tokens=False)['input_ids']
|
| 204 |
+
inputs = []
|
| 205 |
+
for query, passage in pairs:
|
| 206 |
+
query_inputs = tokenizer(f'A: {query}',
|
| 207 |
+
return_tensors=None,
|
| 208 |
+
add_special_tokens=False,
|
| 209 |
+
max_length=max_length * 3 // 4,
|
| 210 |
+
truncation=True)
|
| 211 |
+
passage_inputs = tokenizer(f'B: {passage}',
|
| 212 |
+
return_tensors=None,
|
| 213 |
+
add_special_tokens=False,
|
| 214 |
+
max_length=max_length,
|
| 215 |
+
truncation=True)
|
| 216 |
+
item = tokenizer.prepare_for_model(
|
| 217 |
+
[tokenizer.bos_token_id] + query_inputs['input_ids'],
|
| 218 |
+
sep_inputs + passage_inputs['input_ids'],
|
| 219 |
+
truncation='only_second',
|
| 220 |
+
max_length=max_length,
|
| 221 |
+
padding=False,
|
| 222 |
+
return_attention_mask=False,
|
| 223 |
+
return_token_type_ids=False,
|
| 224 |
+
add_special_tokens=False
|
| 225 |
+
)
|
| 226 |
+
item['input_ids'] = item['input_ids'] + sep_inputs + prompt_inputs
|
| 227 |
+
item['attention_mask'] = [1] * len(item['input_ids'])
|
| 228 |
+
inputs.append(item)
|
| 229 |
+
return tokenizer.pad(
|
| 230 |
+
inputs,
|
| 231 |
+
padding=True,
|
| 232 |
+
max_length=max_length + len(sep_inputs) + len(prompt_inputs),
|
| 233 |
+
pad_to_multiple_of=8,
|
| 234 |
+
return_tensors='pt',
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True)
|
| 238 |
+
model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)
|
| 239 |
+
model = model.to('cuda')
|
| 240 |
+
model.eval()
|
| 241 |
+
|
| 242 |
+
pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]
|
| 243 |
+
with torch.no_grad():
|
| 244 |
+
inputs = get_inputs(pairs, tokenizer).to(model.device)
|
| 245 |
+
all_scores = model(**inputs, return_dict=True, cutoff_layers=[28])
|
| 246 |
+
all_scores = [scores[:, -1].view(-1, ).float() for scores in all_scores[0]]
|
| 247 |
+
print(all_scores)
|
| 248 |
+
```
|
| 249 |
+
|
| 250 |
+
## Fine-tune
|
| 251 |
+
|
| 252 |
+
### Data Format
|
| 253 |
+
|
| 254 |
+
Train data should be a json file, where each line is a dict like this:
|
| 255 |
+
|
| 256 |
+
```
|
| 257 |
+
{""query"": str, ""pos"": List[str], ""neg"":List[str], ""prompt"": str}
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
`query` is the query, and `pos` is a list of positive texts, `neg` is a list of negative texts, `prompt` indicates the relationship between query and texts. If you have no negative texts for a query, you can random sample some from the entire corpus as the negatives.
|
| 261 |
+
|
| 262 |
+
See [toy_finetune_data.jsonl](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker/toy_finetune_data.jsonl) for a toy data file.
|
| 263 |
+
|
| 264 |
+
### Train
|
| 265 |
+
|
| 266 |
+
You can fine-tune the reranker with the following code:
|
| 267 |
+
|
| 268 |
+
**For llm-based reranker**
|
| 269 |
+
|
| 270 |
+
```shell
|
| 271 |
+
torchrun --nproc_per_node {number of gpus} \
|
| 272 |
+
-m FlagEmbedding.llm_reranker.finetune_for_instruction.run \
|
| 273 |
+
--output_dir {path to save model} \
|
| 274 |
+
--model_name_or_path google/gemma-2b \
|
| 275 |
+
--train_data ./toy_finetune_data.jsonl \
|
| 276 |
+
--learning_rate 2e-4 \
|
| 277 |
+
--num_train_epochs 1 \
|
| 278 |
+
--per_device_train_batch_size 1 \
|
| 279 |
+
--gradient_accumulation_steps 16 \
|
| 280 |
+
--dataloader_drop_last True \
|
| 281 |
+
--query_max_len 512 \
|
| 282 |
+
--passage_max_len 512 \
|
| 283 |
+
--train_group_size 16 \
|
| 284 |
+
--logging_steps 1 \
|
| 285 |
+
--save_steps 2000 \
|
| 286 |
+
--save_total_limit 50 \
|
| 287 |
+
--ddp_find_unused_parameters False \
|
| 288 |
+
--gradient_checkpointing \
|
| 289 |
+
--deepspeed stage1.json \
|
| 290 |
+
--warmup_ratio 0.1 \
|
| 291 |
+
--bf16 \
|
| 292 |
+
--use_lora True \
|
| 293 |
+
--lora_rank 32 \
|
| 294 |
+
--lora_alpha 64 \
|
| 295 |
+
--use_flash_attn True \
|
| 296 |
+
--target_modules q_proj k_proj v_proj o_proj
|
| 297 |
+
```
|
| 298 |
+
|
| 299 |
+
**For llm-based layerwise reranker**
|
| 300 |
+
|
| 301 |
+
```shell
|
| 302 |
+
torchrun --nproc_per_node {number of gpus} \
|
| 303 |
+
-m FlagEmbedding.llm_reranker.finetune_for_layerwise.run \
|
| 304 |
+
--output_dir {path to save model} \
|
| 305 |
+
--model_name_or_path openbmb/MiniCPM-2B-dpo-bf16 \
|
| 306 |
+
--train_data ./toy_finetune_data.jsonl \
|
| 307 |
+
--learning_rate 2e-4 \
|
| 308 |
+
--num_train_epochs 1 \
|
| 309 |
+
--per_device_train_batch_size 1 \
|
| 310 |
+
--gradient_accumulation_steps 16 \
|
| 311 |
+
--dataloader_drop_last True \
|
| 312 |
+
--query_max_len 512 \
|
| 313 |
+
--passage_max_len 512 \
|
| 314 |
+
--train_group_size 16 \
|
| 315 |
+
--logging_steps 1 \
|
| 316 |
+
--save_steps 2000 \
|
| 317 |
+
--save_total_limit 50 \
|
| 318 |
+
--ddp_find_unused_parameters False \
|
| 319 |
+
--gradient_checkpointing \
|
| 320 |
+
--deepspeed stage1.json \
|
| 321 |
+
--warmup_ratio 0.1 \
|
| 322 |
+
--bf16 \
|
| 323 |
+
--use_lora True \
|
| 324 |
+
--lora_rank 32 \
|
| 325 |
+
--lora_alpha 64 \
|
| 326 |
+
--use_flash_attn True \
|
| 327 |
+
--target_modules q_proj k_proj v_proj o_proj \
|
| 328 |
+
--start_layer 8 \
|
| 329 |
+
--head_multi True \
|
| 330 |
+
--head_type simple \
|
| 331 |
+
--lora_extra_parameters linear_head
|
| 332 |
+
```
|
| 333 |
+
|
| 334 |
+
Our rerankers are initialized from [google/gemma-2b](https://huggingface.co/google/gemma-2b) (for llm-based reranker) and [openbmb/MiniCPM-2B-dpo-bf16](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16) (for llm-based layerwise reranker), and we train it on a mixture of multilingual datasets:
|
| 335 |
+
|
| 336 |
+
- [bge-m3-data](https://huggingface.co/datasets/Shitao/bge-m3-data)
|
| 337 |
+
- [quora train data](https://huggingface.co/datasets/quora)
|
| 338 |
+
- [fever train data](https://fever.ai/dataset/fever.html)
|
| 339 |
+
|
| 340 |
+
## Evaluation
|
| 341 |
+
|
| 342 |
+
- llama-index.
|
| 343 |
+
|
| 344 |
+

|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
- BEIR.
|
| 348 |
+
|
| 349 |
+
rereank the top 100 results from bge-en-v1.5 large.
|
| 350 |
+
|
| 351 |
+

|
| 352 |
+
|
| 353 |
+
rereank the top 100 results from e5 mistral 7b instruct.
|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
|
| 357 |
+
- CMTEB-retrieval.
|
| 358 |
+
It rereank the top 100 results from bge-zh-v1.5 large.
|
| 359 |
+
|
| 360 |
+

|
| 361 |
+
|
| 362 |
+
- miracl (multi-language).
|
| 363 |
+
It rereank the top 100 results from bge-m3.
|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
## Citation
|
| 370 |
+
|
| 371 |
+
If you find this repository useful, please consider giving a star and citation
|
| 372 |
+
|
| 373 |
+
```bibtex
|
| 374 |
+
@misc{li2023making,
|
| 375 |
+
title={Making Large Language Models A Better Foundation For Dense Retrieval},
|
| 376 |
+
author={Chaofan Li and Zheng Liu and Shitao Xiao and Yingxia Shao},
|
| 377 |
+
year={2023},
|
| 378 |
+
eprint={2312.15503},
|
| 379 |
+
archivePrefix={arXiv},
|
| 380 |
+
primaryClass={cs.CL}
|
| 381 |
+
}
|
| 382 |
+
@misc{chen2024bge,
|
| 383 |
+
title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
|
| 384 |
+
author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu},
|
| 385 |
+
year={2024},
|
| 386 |
+
eprint={2402.03216},
|
| 387 |
+
archivePrefix={arXiv},
|
| 388 |
+
primaryClass={cs.CL}
|
| 389 |
+
}
|
| 390 |
+
```","{""id"": ""BAAI/bge-reranker-v2-m3"", ""author"": ""BAAI"", ""sha"": ""953dc6f6f85a1b2dbfca4c34a2796e7dde08d41e"", ""last_modified"": ""2024-06-24 14:08:45+00:00"", ""created_at"": ""2024-03-15 13:32:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1783584, ""downloads_all_time"": null, ""likes"": 620, ""library_name"": ""sentence-transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""sentence-transformers"", ""safetensors"", ""xlm-roberta"", ""text-classification"", ""transformers"", ""text-embeddings-inference"", ""multilingual"", ""arxiv:2312.15503"", ""arxiv:2402.03216"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""language:\n- multilingual\nlicense: apache-2.0\npipeline_tag: text-classification\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/BEIR-bge-en-v1.5.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/BEIR-e5-mistral.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/CMTEB-retrieval-bge-zh-v1.5.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/llama-index.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/miracl-bge-m3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentencepiece.bpe.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""lightmate/llm-chatbot"", ""TheJimmy/ai-builder-bookIdentifier-HF"", ""aihuashanying/aileeao"", ""ziyingsk/BW_RAG"", ""DanHX/KHome"", ""LukaBondi/osuosutesttest"", ""vadjs/rag-homework"", ""SebastianSchramm/qa-api"", ""lintasmediadanawa/hf-llm-api"", ""zxsipola123456/ragflow"", ""ldd12/BAAI-bge-reranker-v2-m3"", ""jeongsk/WDS-QA-Bot"", ""JiakaiDu/RAG_Test"", ""EbeshaAI/dummy-license-plate"", ""EbeshaAI/dummy-license-plate-2"", ""EbeshaAI/dummy-license-plate-api"", ""EbeshaAI/dummy-license-plate-api-2"", ""retopara/ragflow"", ""fikriazain/Hemodialysis-Bot"", ""Adhin/HemoVDB"", ""aihuashanying/aileeao_test"", ""Shriharshan/Autism-RAG"", ""samlax12/agent"", ""Starowo/ragflow"", ""Korawan/domae02""], ""safetensors"": {""parameters"": {""F32"": 567755777}, ""total"": 567755777}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-24 14:08:45+00:00"", ""cardData"": ""language:\n- multilingual\nlicense: apache-2.0\npipeline_tag: text-classification\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65f44de295b7d70871c368eb"", ""modelId"": ""BAAI/bge-reranker-v2-m3"", ""usedStorage"": 2293239176}",0,"https://huggingface.co/dragonkue/bge-reranker-v2-m3-ko, https://huggingface.co/wl-tookitaki/bge_reranker, https://huggingface.co/qilowoq/bge-reranker-v2-m3-en-ru, https://huggingface.co/sridhariyer/bge-reranker-v2-m3-openvino, https://huggingface.co/NumberEight/bge-reranker-v2-m3-openvino",5,,0,"https://huggingface.co/Felladrin/gguf-Q8_0-bge-reranker-v2-m3, https://huggingface.co/puppyM/bge-reranker-v2-m3-Q4_K_M-GGUF, https://huggingface.co/fanyixiong/bge-reranker-v2-m3-Q8_0-GGUF, https://huggingface.co/klnstpr/bge-reranker-v2-m3-Q8_0-GGUF, https://huggingface.co/sikreutz/bge-reranker-v2-m3-Q4_K_M-GGUF, https://huggingface.co/pqnet/bge-reranker-v2-m3-Q8_0-GGUF, https://huggingface.co/Astralyra/bge-reranker-v2-m3-Q8_0-GGUF, https://huggingface.co/lj027/bge-reranker-v2-m3-Q8_0-GGUF, https://huggingface.co/sehiro/bge-reranker-v2-m3-Q4_K_M-GGUF, https://huggingface.co/sabafallah/bge-reranker-v2-m3-Q4_K_M-GGUF, https://huggingface.co/pyarn/bge-reranker-v2-m3-Q5_K_M-GGUF, https://huggingface.co/fish22/bge-reranker-v2-m3-Q4_K_M-GGUF",12,,0,"DanHX/KHome, LukaBondi/osuosutesttest, SebastianSchramm/qa-api, Shriharshan/Autism-RAG, TheJimmy/ai-builder-bookIdentifier-HF, aihuashanying/aileeao, huggingface/InferenceSupport/discussions/new?title=BAAI/bge-reranker-v2-m3&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BBAAI%2Fbge-reranker-v2-m3%5D(%2FBAAI%2Fbge-reranker-v2-m3)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, lightmate/llm-chatbot, lintasmediadanawa/hf-llm-api, samlax12/agent, vadjs/rag-homework, ziyingsk/BW_RAG, zxsipola123456/ragflow",13
|
| 391 |
+
dragonkue/bge-reranker-v2-m3-ko,"---
|
| 392 |
+
license: apache-2.0
|
| 393 |
+
language:
|
| 394 |
+
- ko
|
| 395 |
+
- en
|
| 396 |
+
metrics:
|
| 397 |
+
- accuracy
|
| 398 |
+
base_model:
|
| 399 |
+
- BAAI/bge-reranker-v2-m3
|
| 400 |
+
pipeline_tag: text-ranking
|
| 401 |
+
library_name: sentence-transformers
|
| 402 |
+
---
|
| 403 |
+
|
| 404 |
+
<img src=""https://cdn-uploads.huggingface.co/production/uploads/642b0c2fecec03b4464a1d9b/IxcqY5qbGNuGpqDciIcOI.webp"" width=""600"">
|
| 405 |
+
|
| 406 |
+
# Reranker (Cross-Encoder)
|
| 407 |
+
|
| 408 |
+
Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. And the score can be mapped to a float value in [0,1] by sigmoid function.
|
| 409 |
+
|
| 410 |
+
## Model Details
|
| 411 |
+
- Base model : BAAI/bge-reranker-v2-m3
|
| 412 |
+
- The multilingual model has been optimized for Korean.
|
| 413 |
+
|
| 414 |
+
## Usage with Transformers
|
| 415 |
+
|
| 416 |
+
```python
|
| 417 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 418 |
+
import torch
|
| 419 |
+
|
| 420 |
+
model = AutoModelForSequenceClassification.from_pretrained('dragonkue/bge-reranker-v2-m3-ko')
|
| 421 |
+
tokenizer = AutoTokenizer.from_pretrained('dragonkue/bge-reranker-v2-m3-ko')
|
| 422 |
+
|
| 423 |
+
features = tokenizer([['몇 년도에 지방세외수입법이 시행됐을까?', '실무교육을 통해 ‘지방세외수입법’에 대한 자치단체의 관심을 제고하고 자치단체의 차질 없는 업무 추진을 지원하였다. 이러한 준비과정을 거쳐 2014년 8월 7일부터 ‘지방세외수입법’이 시행되었다.'],
|
| 424 |
+
['몇 년도에 지방세외수입법이 시행됐을까?', '식품의약품안전처는 21일 국내 제약기업 유바이오로직스가 개발 중인 신종 코로나바이러스 감염증(코로나19) 백신 후보물질 ‘유코백-19’의 임상시험 계획을 지난 20일 승인했다고 밝혔다.']], padding=True, truncation=True, return_tensors=""pt"")
|
| 425 |
+
|
| 426 |
+
model.eval()
|
| 427 |
+
with torch.no_grad():
|
| 428 |
+
logits = model(**features).logits
|
| 429 |
+
scores = torch.sigmoid(logits)
|
| 430 |
+
print(scores)
|
| 431 |
+
# [9.9997962e-01 5.0702977e-07]
|
| 432 |
+
```
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
## Usage with SentenceTransformers
|
| 436 |
+
First install the Sentence Transformers library:
|
| 437 |
+
```
|
| 438 |
+
pip install -U sentence-transformers
|
| 439 |
+
```
|
| 440 |
+
|
| 441 |
+
```python
|
| 442 |
+
from sentence_transformers import CrossEncoder
|
| 443 |
+
|
| 444 |
+
model = CrossEncoder('dragonkue/bge-reranker-v2-m3-ko', default_activation_function=torch.nn.Sigmoid())
|
| 445 |
+
|
| 446 |
+
scores = model.predict([['몇 년도에 지방세외수입법이 시행됐을까?', '실무교육을 통해 ‘지방세외수입법’에 대한 자치단체의 관심을 제고하고 자치단체의 차질 없는 업무 추진을 지원하였다. 이러한 준비과정을 거쳐 2014년 8월 7일부터 ‘지방세외수입법’이 시행되었다.'],
|
| 447 |
+
['몇 년도에 지방세외수입법이 시행됐을까?', '식품의약품안전처는 21일 국내 제약기업 유바이오로직스가 개발 중인 신종 코로나바이러스 감염증(코로나19) 백신 후보물질 ‘유코백-19’의 임상시험 계획을 지난 20일 승인했다고 밝혔다.']])
|
| 448 |
+
print(scores)
|
| 449 |
+
# [9.9997962e-01 5.0702977e-07]
|
| 450 |
+
```
|
| 451 |
+
|
| 452 |
+
## Usage with FlagEmbedding
|
| 453 |
+
First install the FlagEmbedding library:
|
| 454 |
+
```
|
| 455 |
+
pip install -U FlagEmbedding
|
| 456 |
+
```
|
| 457 |
+
```python
|
| 458 |
+
from FlagEmbedding import FlagReranker
|
| 459 |
+
|
| 460 |
+
reranker = FlagReranker('dragonkue/bge-reranker-v2-m3-ko')
|
| 461 |
+
|
| 462 |
+
scores = reranker.compute_score([['몇 년도에 지방세외수입법이 시행됐을까?', '실무교육을 통해 ‘지방세외수입법’에 대한 자치단체의 관심을 제고하고 자치단체의 차질 없는 업무 추진을 지원하였다. 이러한 준비과정을 거쳐 2014년 8월 7일부터 ‘지방세외수입법’이 시행되었다.'],
|
| 463 |
+
['몇 년도에 지방세외수입법이 시행됐을까?', '식품의약품안전처는 21일 국내 제약기업 유바이오로직스가 개발 중인 신종 코로나바이러스 감염증(코로나19) 백신 후보물질 ‘유코백-19’의 임상시험 계획을 지난 20일 승인했다고 밝혔다.']], normalize=True)
|
| 464 |
+
print(scores)
|
| 465 |
+
# [9.9997962e-01 5.0702977e-07]
|
| 466 |
+
```
|
| 467 |
+
|
| 468 |
+
## Fine-tune
|
| 469 |
+
Refer to https://github.com/FlagOpen/FlagEmbedding
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
## Evaluation
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
### Bi-encoder and Cross-encoder
|
| 476 |
+
|
| 477 |
+
Bi-Encoders convert texts into fixed-size vectors and efficiently calculate similarities between them. They are fast and ideal for tasks like semantic search and classification, making them suitable for processing large datasets quickly.
|
| 478 |
+
|
| 479 |
+
Cross-Encoders directly compare pairs of texts to compute similarity scores, providing more accurate results. While they are slower due to needing to process each pair, they excel in re-ranking top results and are important in Advanced RAG techniques for enhancing text generation.
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
### Korean Embedding Benchmark with AutoRAG
|
| 483 |
+
(https://github.com/Marker-Inc-Korea/AutoRAG-example-korean-embedding-benchmark)
|
| 484 |
+
|
| 485 |
+
This is a Korean embedding benchmark for the financial sector.
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
**Top-k 1**
|
| 489 |
+
|
| 490 |
+
Bi-Encoder (Sentence Transformer)
|
| 491 |
+
|
| 492 |
+
| Model name | F1 | Recall | Precision |
|
| 493 |
+
|---------------------------------------|------------|------------|------------|
|
| 494 |
+
| paraphrase-multilingual-mpnet-base-v2 | 0.3596 | 0.3596 | 0.3596 |
|
| 495 |
+
| KoSimCSE-roberta | 0.4298 | 0.4298 | 0.4298 |
|
| 496 |
+
| Cohere embed-multilingual-v3.0 | 0.3596 | 0.3596 | 0.3596 |
|
| 497 |
+
| openai ada 002 | 0.4737 | 0.4737 | 0.4737 |
|
| 498 |
+
| multilingual-e5-large-instruct | 0.4649 | 0.4649 | 0.4649 |
|
| 499 |
+
| Upstage Embedding | 0.6579 | 0.6579 | 0.6579 |
|
| 500 |
+
| paraphrase-multilingual-MiniLM-L12-v2 | 0.2982 | 0.2982 | 0.2982 |
|
| 501 |
+
| openai_embed_3_small | 0.5439 | 0.5439 | 0.5439 |
|
| 502 |
+
| ko-sroberta-multitask | 0.4211 | 0.4211 | 0.4211 |
|
| 503 |
+
| openai_embed_3_large | 0.6053 | 0.6053 | 0.6053 |
|
| 504 |
+
| KU-HIAI-ONTHEIT-large-v1 | 0.7105 | 0.7105 | 0.7105 |
|
| 505 |
+
| KU-HIAI-ONTHEIT-large-v1.1 | 0.7193 | 0.7193 | 0.7193 |
|
| 506 |
+
| kf-deberta-multitask | 0.4561 | 0.4561 | 0.4561 |
|
| 507 |
+
| gte-multilingual-base | 0.5877 | 0.5877 | 0.5877 |
|
| 508 |
+
| KoE5 | 0.7018 | 0.7018 | 0.7018 |
|
| 509 |
+
| BGE-m3 | 0.6578 | 0.6578 | 0.6578 |
|
| 510 |
+
| bge-m3-korean | 0.5351 | 0.5351 | 0.5351 |
|
| 511 |
+
| **BGE-m3-ko** | **0.7456** | **0.7456** | **0.7456** |
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
Cross-Encoder (Reranker)
|
| 515 |
+
|
| 516 |
+
| Model name | F1 | Recall | Precision |
|
| 517 |
+
|---------------------------------------|------------|------------|------------|
|
| 518 |
+
| gte-multilingual-reranker-base | 0.7281 | 0.7281 | 0.7281 |
|
| 519 |
+
| jina-reranker-v2-base-multilingual | 0.8070 | 0.8070 | 0.8070 |
|
| 520 |
+
| bge-reranker-v2-m3 | 0.8772 | 0.8772 | 0.8772 |
|
| 521 |
+
| upskyy/ko-reranker-8k | 0.8684| 0.8684 | 0.8684 |
|
| 522 |
+
| upskyy/ko-reranker | 0.8333| 0.8333 | 0.8333 |
|
| 523 |
+
| mncai/bge-ko-reranker-560M | 0.0088| 0.0088 | 0.0088 |
|
| 524 |
+
| Dongjin-kr/ko-reranker | 0.8509| 0.8509 | 0.8509 |
|
| 525 |
+
| **bge-reranker-v2-m3-ko** | **0.9123** | **0.9123** | **0.9123** |
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
**Top-k 3**
|
| 529 |
+
|
| 530 |
+
Bi-Encoder (Sentence Transformer)
|
| 531 |
+
|
| 532 |
+
| Model name | F1 | Recall | Precision |
|
| 533 |
+
|---------------------------------------|------------|------------|------------|
|
| 534 |
+
| paraphrase-multilingual-mpnet-base-v2 | 0.2368 | 0.4737 | 0.1579 |
|
| 535 |
+
| KoSimCSE-roberta | 0.3026 | 0.6053 | 0.2018 |
|
| 536 |
+
| Cohere embed-multilingual-v3.0 | 0.2851 | 0.5702 | 0.1901 |
|
| 537 |
+
| openai ada 002 | 0.3553 | 0.7105 | 0.2368 |
|
| 538 |
+
| multilingual-e5-large-instruct | 0.3333 | 0.6667 | 0.2222 |
|
| 539 |
+
| Upstage Embedding | 0.4211 | 0.8421 | 0.2807 |
|
| 540 |
+
| paraphrase-multilingual-MiniLM-L12-v2 | 0.2061 | 0.4123 | 0.1374 |
|
| 541 |
+
| openai_embed_3_small | 0.3640 | 0.7281 | 0.2427 |
|
| 542 |
+
| ko-sroberta-multitask | 0.2939 | 0.5877 | 0.1959 |
|
| 543 |
+
| openai_embed_3_large | 0.3947 | 0.7895 | 0.2632 |
|
| 544 |
+
| KU-HIAI-ONTHEIT-large-v1 | 0.4386 | 0.8772 | 0.2924 |
|
| 545 |
+
| KU-HIAI-ONTHEIT-large-v1.1 | 0.4430 | 0.8860 | 0.2953 |
|
| 546 |
+
| kf-deberta-multitask | 0.3158 | 0.6316 | 0.2105 |
|
| 547 |
+
| gte-multilingual-base | 0.4035 | 0.8070 | 0.2690 |
|
| 548 |
+
| KoE5 | 0.4254 | 0.8509 | 0.2836 |
|
| 549 |
+
| BGE-m3 | 0.4254 | 0.8508 | 0.2836 |
|
| 550 |
+
| bge-m3-korean | 0.3684 | 0.7368 | 0.2456 |
|
| 551 |
+
| **BGE-m3-ko** | **0.4517** | **0.9035** | **0.3011** |
|
| 552 |
+
|
| 553 |
+
Cross-Encoder (Reranker)
|
| 554 |
+
|
| 555 |
+
| Model name | F1 | Recall | Precision |
|
| 556 |
+
|---------------------------------------|------------|------------|------------|
|
| 557 |
+
| gte-multilingual-reranker-base | 0.4605 | 0.9211 | 0.3070 |
|
| 558 |
+
| jina-reranker-v2-base-multilingual | 0.4649 | 0.9298 | 0.3099 |
|
| 559 |
+
| bge-reranker-v2-m3 | 0.4781 | 0.9561 | 0.3187 |
|
| 560 |
+
| upskyy/ko-reranker-8k | 0.4781| 0.9561 | 0.3187 |
|
| 561 |
+
| upskyy/ko-reranker | 0.4649| 0.9298 | 0.3099 |
|
| 562 |
+
| mncai/bge-ko-reranker-560M | 0.0044| 0.0088 | 0.0029 |
|
| 563 |
+
| Dongjin-kr/ko-reranker | 0.4737| 0.9474 | 0.3158 |
|
| 564 |
+
| **bge-reranker-v2-m3-ko** | **0.4825** | **0.9649** | **0.3216** |
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
","{""id"": ""dragonkue/bge-reranker-v2-m3-ko"", ""author"": ""dragonkue"", ""sha"": ""2aca5884ecac490192af9ebd86836d9073d826cd"", ""last_modified"": ""2025-04-03 06:27:16+00:00"", ""created_at"": ""2024-10-16 13:11:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1156, ""downloads_all_time"": null, ""likes"": 6, ""library_name"": ""sentence-transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""sentence-transformers"", ""safetensors"", ""xlm-roberta"", ""text-ranking"", ""ko"", ""en"", ""base_model:BAAI/bge-reranker-v2-m3"", ""base_model:finetune:BAAI/bge-reranker-v2-m3"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-ranking"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model:\n- BAAI/bge-reranker-v2-m3\nlanguage:\n- ko\n- en\nlibrary_name: sentence-transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: text-ranking"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config_sentence_transformers.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cross_encoder_eval.ipynb', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentencepiece.bpe.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 567755777}, ""total"": 567755777}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-03 06:27:16+00:00"", ""cardData"": ""base_model:\n- BAAI/bge-reranker-v2-m3\nlanguage:\n- ko\n- en\nlibrary_name: sentence-transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: text-ranking"", ""transformersInfo"": null, ""_id"": ""670fbb9a5928ba70f9317d24"", ""modelId"": ""dragonkue/bge-reranker-v2-m3-ko"", ""usedStorage"": 2293239154}",1,https://huggingface.co/SeoJHeasdw/ktds-vue-code-search-reranker-ko,1,,0,https://huggingface.co/luckycontrol/bge-reranker-v2-m3-ko-Q4_K_M-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=dragonkue/bge-reranker-v2-m3-ko&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdragonkue%2Fbge-reranker-v2-m3-ko%5D(%2Fdragonkue%2Fbge-reranker-v2-m3-ko)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 568 |
+
SeoJHeasdw/ktds-vue-code-search-reranker-ko,"---
|
| 569 |
+
language: ko
|
| 570 |
+
license: mit
|
| 571 |
+
tags:
|
| 572 |
+
- vue
|
| 573 |
+
- code-search
|
| 574 |
+
- cross-encoder
|
| 575 |
+
- korean
|
| 576 |
+
- reranking
|
| 577 |
+
- ktds
|
| 578 |
+
datasets:
|
| 579 |
+
- custom-vue-code-dataset
|
| 580 |
+
base_model: dragonkue/bge-reranker-v2-m3-ko
|
| 581 |
+
---
|
| 582 |
+
|
| 583 |
+
# KT DS Vue Code Search Reranker (Korean)
|
| 584 |
+
|
| 585 |
+
이 모델은 KT DS에서 Vue.js 프로젝트의 코드 검색을 위해 파인튜닝한 Cross-Encoder입니다.
|
| 586 |
+
|
| 587 |
+
## 모델 설명
|
| 588 |
+
- **개발자**: 서제호 (KT DS)
|
| 589 |
+
- **기반 모델**: [dragonkue/bge-reranker-v2-m3-ko](https://huggingface.co/dragonkue/bge-reranker-v2-m3-ko)
|
| 590 |
+
- **용도**: Vue.js 컴포넌트와 관련 코드 검색 결과 재랭킹
|
| 591 |
+
- **학습 데이터**: Vue.js 프로젝트 코드베이스
|
| 592 |
+
- **지원 언어**: 한국어
|
| 593 |
+
|
| 594 |
+
## 사용 방법
|
| 595 |
+
```python
|
| 596 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 597 |
+
|
| 598 |
+
model = AutoModelForSequenceClassification.from_pretrained(""SeoJHeasdw/ktds-vue-code-search-reranker-ko"")
|
| 599 |
+
tokenizer = AutoTokenizer.from_pretrained(""SeoJHeasdw/ktds-vue-code-search-reranker-ko"")
|
| 600 |
+
```
|
| 601 |
+
|
| 602 |
+
## 특징
|
| 603 |
+
- Vue SFC (Single File Component) 구조에 최적화
|
| 604 |
+
- 컴포넌트, 템플릿, 스크립트, 스타일 섹션 이해
|
| 605 |
+
- 한국어 코드 주석 및 변수명 처리 강화
|
| 606 |
+
- Claude로 작성된 Vue 프로젝트 구조에 최적화
|
| 607 |
+
|
| 608 |
+
## 성능
|
| 609 |
+
- Vue 컴포넌트 관련 질의 처리에 특화
|
| 610 |
+
","{""id"": ""SeoJHeasdw/ktds-vue-code-search-reranker-ko"", ""author"": ""SeoJHeasdw"", ""sha"": ""fccfc2ab5bf21f43d689ccdca04debf7bffc7f01"", ""last_modified"": ""2025-04-25 08:12:59+00:00"", ""created_at"": ""2025-04-25 01:20:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""xlm-roberta"", ""vue"", ""code-search"", ""cross-encoder"", ""korean"", ""reranking"", ""ktds"", ""ko"", ""dataset:custom-vue-code-dataset"", ""base_model:dragonkue/bge-reranker-v2-m3-ko"", ""base_model:finetune:dragonkue/bge-reranker-v2-m3-ko"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: dragonkue/bge-reranker-v2-m3-ko\ndatasets:\n- custom-vue-code-dataset\nlanguage: ko\nlicense: mit\ntags:\n- vue\n- code-search\n- cross-encoder\n- korean\n- reranking\n- ktds"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 567755777}, ""total"": 567755777}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-25 08:12:59+00:00"", ""cardData"": ""base_model: dragonkue/bge-reranker-v2-m3-ko\ndatasets:\n- custom-vue-code-dataset\nlanguage: ko\nlicense: mit\ntags:\n- vue\n- code-search\n- cross-encoder\n- korean\n- reranking\n- ktds"", ""transformersInfo"": null, ""_id"": ""680ae371b3e87738b886b3f7"", ""modelId"": ""SeoJHeasdw/ktds-vue-code-search-reranker-ko"", ""usedStorage"": 2288155017}",2,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=SeoJHeasdw/ktds-vue-code-search-reranker-ko&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSeoJHeasdw%2Fktds-vue-code-search-reranker-ko%5D(%2FSeoJHeasdw%2Fktds-vue-code-search-reranker-ko)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 611 |
+
wl-tookitaki/bge_reranker,"---
|
| 612 |
+
license: apache-2.0
|
| 613 |
+
base_model: BAAI/bge-reranker-v2-m3
|
| 614 |
+
tags:
|
| 615 |
+
- generated_from_trainer
|
| 616 |
+
model-index:
|
| 617 |
+
- name: bge_reranker
|
| 618 |
+
results: []
|
| 619 |
+
---
|
| 620 |
+
|
| 621 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 622 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 623 |
+
|
| 624 |
+
[<img src=""https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg"" alt=""Visualize in Weights & Biases"" width=""200"" height=""32""/>](https://wandb.ai/tookitaki/huggingface/runs/ed60vdsj)
|
| 625 |
+
# bge_reranker
|
| 626 |
+
|
| 627 |
+
This model is a fine-tuned version of [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) on an unknown dataset.
|
| 628 |
+
|
| 629 |
+
## Model description
|
| 630 |
+
|
| 631 |
+
More information needed
|
| 632 |
+
|
| 633 |
+
## Intended uses & limitations
|
| 634 |
+
|
| 635 |
+
More information needed
|
| 636 |
+
|
| 637 |
+
## Training and evaluation data
|
| 638 |
+
|
| 639 |
+
More information needed
|
| 640 |
+
|
| 641 |
+
## Training procedure
|
| 642 |
+
|
| 643 |
+
### Training hyperparameters
|
| 644 |
+
|
| 645 |
+
The following hyperparameters were used during training:
|
| 646 |
+
- learning_rate: 3e-05
|
| 647 |
+
- train_batch_size: 16
|
| 648 |
+
- eval_batch_size: 8
|
| 649 |
+
- seed: 42
|
| 650 |
+
- gradient_accumulation_steps: 2
|
| 651 |
+
- total_train_batch_size: 32
|
| 652 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 653 |
+
- lr_scheduler_type: cosine_with_restarts
|
| 654 |
+
- lr_scheduler_warmup_ratio: 0.1
|
| 655 |
+
- num_epochs: 4.0
|
| 656 |
+
- mixed_precision_training: Native AMP
|
| 657 |
+
|
| 658 |
+
### Training results
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
### Framework versions
|
| 663 |
+
|
| 664 |
+
- Transformers 4.42.4
|
| 665 |
+
- Pytorch 2.1.0+cu118
|
| 666 |
+
- Datasets 2.20.0
|
| 667 |
+
- Tokenizers 0.19.1
|
| 668 |
+
","{""id"": ""wl-tookitaki/bge_reranker"", ""author"": ""wl-tookitaki"", ""sha"": ""12353c182b7d0c410ddc2f59492cadda2b635c55"", ""last_modified"": ""2024-07-23 11:42:12+00:00"", ""created_at"": ""2024-07-23 02:39:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""xlm-roberta"", ""text-classification"", ""generated_from_trainer"", ""base_model:BAAI/bge-reranker-v2-m3"", ""base_model:finetune:BAAI/bge-reranker-v2-m3"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: BAAI/bge-reranker-v2-m3\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: bge_reranker\n results: []"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": [{""name"": ""bge_reranker"", ""results"": []}], ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentencepiece.bpe.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 567755777}, ""total"": 567755777}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-23 11:42:12+00:00"", ""cardData"": ""base_model: BAAI/bge-reranker-v2-m3\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: bge_reranker\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""669f17fc8c65c172c4cad017"", ""modelId"": ""wl-tookitaki/bge_reranker"", ""usedStorage"": 72679394515}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=wl-tookitaki/bge_reranker&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bwl-tookitaki%2Fbge_reranker%5D(%2Fwl-tookitaki%2Fbge_reranker)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 669 |
+
qilowoq/bge-reranker-v2-m3-en-ru,"---
|
| 670 |
+
base_model: BAAI/bge-reranker-v2-m3
|
| 671 |
+
language:
|
| 672 |
+
- en
|
| 673 |
+
- ru
|
| 674 |
+
license: mit
|
| 675 |
+
pipeline_tag: text-ranking
|
| 676 |
+
tags:
|
| 677 |
+
- transformers
|
| 678 |
+
- sentence-transformers
|
| 679 |
+
- text-embeddings-inference
|
| 680 |
+
library_name: sentence-transformers
|
| 681 |
+
---
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
# Model for English and Russian
|
| 685 |
+
|
| 686 |
+
This is a truncated version of [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3).
|
| 687 |
+
|
| 688 |
+
This model has only English and Russian tokens left in the vocabulary. Thus making it 1.5 smaller than the original model while producing the same embeddings.
|
| 689 |
+
|
| 690 |
+
The model has been truncated in [this notebook](https://colab.research.google.com/drive/19IFjWpJpxQie1gtHSvDeoKk7CQtpy6bT?usp=sharing).
|
| 691 |
+
|
| 692 |
+
## FAQ
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
### Generate Scores for text
|
| 696 |
+
|
| 697 |
+
```python
|
| 698 |
+
import torch
|
| 699 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 700 |
+
|
| 701 |
+
tokenizer = AutoTokenizer.from_pretrained('qilowoq/bge-reranker-v2-m3-en-ru')
|
| 702 |
+
model = AutoModelForSequenceClassification.from_pretrained('qilowoq/bge-reranker-v2-m3-en-ru')
|
| 703 |
+
model.eval()
|
| 704 |
+
|
| 705 |
+
pairs = [('How many people live in Berlin?', 'Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.'),
|
| 706 |
+
('Какая площадь Берлина?', 'Площадь Берлина составляет 891,8 квадратных километров.')]
|
| 707 |
+
with torch.no_grad():
|
| 708 |
+
inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt')
|
| 709 |
+
scores = model(**inputs, return_dict=True).logits.view(-1, ).float()
|
| 710 |
+
print(scores)
|
| 711 |
+
```
|
| 712 |
+
|
| 713 |
+
|
| 714 |
+
## Citation
|
| 715 |
+
|
| 716 |
+
If you find this repository useful, please consider giving a star and citation
|
| 717 |
+
|
| 718 |
+
```bibtex
|
| 719 |
+
@misc{li2023making,
|
| 720 |
+
title={Making Large Language Models A Better Foundation For Dense Retrieval},
|
| 721 |
+
author={Chaofan Li and Zheng Liu and Shitao Xiao and Yingxia Shao},
|
| 722 |
+
year={2023},
|
| 723 |
+
eprint={2312.15503},
|
| 724 |
+
archivePrefix={arXiv},
|
| 725 |
+
primaryClass={cs.CL}
|
| 726 |
+
}
|
| 727 |
+
@misc{chen2024bge,
|
| 728 |
+
title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
|
| 729 |
+
author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu},
|
| 730 |
+
year={2024},
|
| 731 |
+
eprint={2402.03216},
|
| 732 |
+
archivePrefix={arXiv},
|
| 733 |
+
primaryClass={cs.CL}
|
| 734 |
+
}
|
| 735 |
+
```
|
| 736 |
+
```","{""id"": ""qilowoq/bge-reranker-v2-m3-en-ru"", ""author"": ""qilowoq"", ""sha"": ""0ca0a6a717ffbe01e25c6e8e33bad7982d938930"", ""last_modified"": ""2025-04-05 01:47:46+00:00"", ""created_at"": ""2024-09-05 01:25:35+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 698, ""downloads_all_time"": null, ""likes"": 5, ""library_name"": ""sentence-transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""sentence-transformers"", ""safetensors"", ""xlm-roberta"", ""text-classification"", ""transformers"", ""text-embeddings-inference"", ""text-ranking"", ""en"", ""ru"", ""arxiv:2312.15503"", ""arxiv:2402.03216"", ""base_model:BAAI/bge-reranker-v2-m3"", ""base_model:finetune:BAAI/bge-reranker-v2-m3"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""text-ranking"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: BAAI/bge-reranker-v2-m3\nlanguage:\n- en\n- ru\nlibrary_name: sentence-transformers\nlicense: mit\npipeline_tag: text-ranking\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 374930433}, ""total"": 374930433}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-05 01:47:46+00:00"", ""cardData"": ""base_model: BAAI/bge-reranker-v2-m3\nlanguage:\n- en\n- ru\nlibrary_name: sentence-transformers\nlicense: mit\npipeline_tag: text-ranking\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66d9088f721288646ee0a566"", ""modelId"": ""qilowoq/bge-reranker-v2-m3-en-ru"", ""usedStorage"": 2994035008}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=qilowoq/bge-reranker-v2-m3-en-ru&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bqilowoq%2Fbge-reranker-v2-m3-en-ru%5D(%2Fqilowoq%2Fbge-reranker-v2-m3-en-ru)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 737 |
+
sridhariyer/bge-reranker-v2-m3-openvino,"---
|
| 738 |
+
base_model: BAAI/bge-reranker-v2-m3
|
| 739 |
+
language:
|
| 740 |
+
- multilingual
|
| 741 |
+
license: apache-2.0
|
| 742 |
+
pipeline_tag: text-classification
|
| 743 |
+
tags:
|
| 744 |
+
- transformers
|
| 745 |
+
- sentence-transformers
|
| 746 |
+
- text-embeddings-inference
|
| 747 |
+
- openvino
|
| 748 |
+
- openvino-export
|
| 749 |
+
---
|
| 750 |
+
|
| 751 |
+
This model was converted to OpenVINO from [`BAAI/bge-reranker-v2-m3`](https://huggingface.co/BAAI/bge-reranker-v2-m3) using [optimum-intel](https://github.com/huggingface/optimum-intel)
|
| 752 |
+
via the [export](https://huggingface.co/spaces/echarlaix/openvino-export) space.
|
| 753 |
+
|
| 754 |
+
First make sure you have optimum-intel installed:
|
| 755 |
+
|
| 756 |
+
```bash
|
| 757 |
+
pip install optimum[openvino]
|
| 758 |
+
```
|
| 759 |
+
|
| 760 |
+
To load your model you can do as follows:
|
| 761 |
+
|
| 762 |
+
```python
|
| 763 |
+
from optimum.intel import OVModelForSequenceClassification
|
| 764 |
+
|
| 765 |
+
model_id = ""sridhariyer/bge-reranker-v2-m3-openvino""
|
| 766 |
+
model = OVModelForSequenceClassification.from_pretrained(model_id)
|
| 767 |
+
```
|
| 768 |
+
","{""id"": ""sridhariyer/bge-reranker-v2-m3-openvino"", ""author"": ""sridhariyer"", ""sha"": ""3dee0ff0b6ae1420e5653ea2df88e1158158c51b"", ""last_modified"": ""2024-10-14 19:32:52+00:00"", ""created_at"": ""2024-10-14 19:32:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""sentence-transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""sentence-transformers"", ""openvino"", ""xlm-roberta"", ""text-classification"", ""transformers"", ""text-embeddings-inference"", ""openvino-export"", ""multilingual"", ""base_model:BAAI/bge-reranker-v2-m3"", ""base_model:finetune:BAAI/bge-reranker-v2-m3"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: BAAI/bge-reranker-v2-m3\nlanguage:\n- multilingual\nlicense: apache-2.0\npipeline_tag: text-classification\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference\n- openvino\n- openvino-export"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-14 19:32:52+00:00"", ""cardData"": ""base_model: BAAI/bge-reranker-v2-m3\nlanguage:\n- multilingual\nlicense: apache-2.0\npipeline_tag: text-classification\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference\n- openvino\n- openvino-export"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""670d71cbcad89cdabeffe073"", ""modelId"": ""sridhariyer/bge-reranker-v2-m3-openvino"", ""usedStorage"": 2288187065}",1,,0,,0,,0,,0,"echarlaix/openvino-export, huggingface/InferenceSupport/discussions/new?title=sridhariyer/bge-reranker-v2-m3-openvino&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsridhariyer%2Fbge-reranker-v2-m3-openvino%5D(%2Fsridhariyer%2Fbge-reranker-v2-m3-openvino)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
| 769 |
+
NumberEight/bge-reranker-v2-m3-openvino,"---
|
| 770 |
+
base_model: BAAI/bge-reranker-v2-m3
|
| 771 |
+
language:
|
| 772 |
+
- multilingual
|
| 773 |
+
license: apache-2.0
|
| 774 |
+
pipeline_tag: text-classification
|
| 775 |
+
tags:
|
| 776 |
+
- transformers
|
| 777 |
+
- sentence-transformers
|
| 778 |
+
- text-embeddings-inference
|
| 779 |
+
- openvino
|
| 780 |
+
- openvino-export
|
| 781 |
+
---
|
| 782 |
+
|
| 783 |
+
This model was converted to OpenVINO from [`BAAI/bge-reranker-v2-m3`](https://huggingface.co/BAAI/bge-reranker-v2-m3) using [optimum-intel](https://github.com/huggingface/optimum-intel)
|
| 784 |
+
via the [export](https://huggingface.co/spaces/echarlaix/openvino-export) space.
|
| 785 |
+
|
| 786 |
+
First make sure you have optimum-intel installed:
|
| 787 |
+
|
| 788 |
+
```bash
|
| 789 |
+
pip install optimum[openvino]
|
| 790 |
+
```
|
| 791 |
+
|
| 792 |
+
To load your model you can do as follows:
|
| 793 |
+
|
| 794 |
+
```python
|
| 795 |
+
from optimum.intel import OVModelForSequenceClassification
|
| 796 |
+
|
| 797 |
+
model_id = ""NumberEight/bge-reranker-v2-m3-openvino""
|
| 798 |
+
model = OVModelForSequenceClassification.from_pretrained(model_id)
|
| 799 |
+
```
|
| 800 |
+
","{""id"": ""NumberEight/bge-reranker-v2-m3-openvino"", ""author"": ""NumberEight"", ""sha"": ""ed96d80fcbf3b8f6f7d67d8bbf3bc5ac94ff9ce7"", ""last_modified"": ""2024-10-20 09:54:25+00:00"", ""created_at"": ""2024-10-20 09:54:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""sentence-transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""sentence-transformers"", ""openvino"", ""xlm-roberta"", ""text-classification"", ""transformers"", ""text-embeddings-inference"", ""openvino-export"", ""multilingual"", ""base_model:BAAI/bge-reranker-v2-m3"", ""base_model:finetune:BAAI/bge-reranker-v2-m3"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: BAAI/bge-reranker-v2-m3\nlanguage:\n- multilingual\nlicense: apache-2.0\npipeline_tag: text-classification\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference\n- openvino\n- openvino-export"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""XLMRobertaForSequenceClassification""], ""model_type"": ""xlm-roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-20 09:54:25+00:00"", ""cardData"": ""base_model: BAAI/bge-reranker-v2-m3\nlanguage:\n- multilingual\nlicense: apache-2.0\npipeline_tag: text-classification\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference\n- openvino\n- openvino-export"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6714d340d2fe0458c8197b0d"", ""modelId"": ""NumberEight/bge-reranker-v2-m3-openvino"", ""usedStorage"": 2288187065}",1,,0,,0,,0,,0,"echarlaix/openvino-export, huggingface/InferenceSupport/discussions/new?title=NumberEight/bge-reranker-v2-m3-openvino&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNumberEight%2Fbge-reranker-v2-m3-openvino%5D(%2FNumberEight%2Fbge-reranker-v2-m3-openvino)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
biomedical-ner-all_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
d4data/biomedical-ner-all,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- Token Classification
|
| 8 |
+
co2_eq_emissions: 0.0279399890043426
|
| 9 |
+
widget:
|
| 10 |
+
- text: ""CASE: A 28-year-old previously healthy man presented with a 6-week history of palpitations.
|
| 11 |
+
The symptoms occurred during rest, 2–3 times per week, lasted up to 30 minutes at a time and were associated with dyspnea.
|
| 12 |
+
Except for a grade 2/6 holosystolic tricuspid regurgitation murmur (best heard at the left sternal border with inspiratory accentuation), physical examination yielded unremarkable findings.""
|
| 13 |
+
example_title: ""example 1""
|
| 14 |
+
- text: ""A 63-year-old woman with no known cardiac history presented with a sudden onset of dyspnea requiring intubation and ventilatory support out of hospital.
|
| 15 |
+
She denied preceding symptoms of chest discomfort, palpitations, syncope or infection.
|
| 16 |
+
The patient was afebrile and normotensive, with a sinus tachycardia of 140 beats/min.""
|
| 17 |
+
example_title: ""example 2""
|
| 18 |
+
- text: ""A 48 year-old female presented with vaginal bleeding and abnormal Pap smears.
|
| 19 |
+
Upon diagnosis of invasive non-keratinizing SCC of the cervix, she underwent a radical hysterectomy with salpingo-oophorectomy which demonstrated positive spread to the pelvic lymph nodes and the parametrium.
|
| 20 |
+
Pathological examination revealed that the tumour also extensively involved the lower uterine segment.""
|
| 21 |
+
example_title: ""example 3""
|
| 22 |
+
---
|
| 23 |
+
|
| 24 |
+
## About the Model
|
| 25 |
+
An English Named Entity Recognition model, trained on Maccrobat to recognize the bio-medical entities (107 entities) from a given text corpus (case reports etc.). This model was built on top of distilbert-base-uncased
|
| 26 |
+
|
| 27 |
+
- Dataset: Maccrobat https://figshare.com/articles/dataset/MACCROBAT2018/9764942
|
| 28 |
+
- Carbon emission: 0.0279399890043426 Kg
|
| 29 |
+
- Training time: 30.16527 minutes
|
| 30 |
+
- GPU used : 1 x GeForce RTX 3060 Laptop GPU
|
| 31 |
+
|
| 32 |
+
Checkout the tutorial video for explanation of this model and corresponding python library: https://youtu.be/xpiDPdBpS18
|
| 33 |
+
|
| 34 |
+
## Usage
|
| 35 |
+
The easiest way is to load the inference api from huggingface and second method is through the pipeline object offered by transformers library.
|
| 36 |
+
```python
|
| 37 |
+
from transformers import pipeline
|
| 38 |
+
from transformers import AutoTokenizer, AutoModelForTokenClassification
|
| 39 |
+
|
| 40 |
+
tokenizer = AutoTokenizer.from_pretrained(""d4data/biomedical-ner-all"")
|
| 41 |
+
model = AutoModelForTokenClassification.from_pretrained(""d4data/biomedical-ner-all"")
|
| 42 |
+
|
| 43 |
+
pipe = pipeline(""ner"", model=model, tokenizer=tokenizer, aggregation_strategy=""simple"") # pass device=0 if using gpu
|
| 44 |
+
pipe(""""""The patient reported no recurrence of palpitations at follow-up 6 months after the ablation."""""")
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
## Author
|
| 48 |
+
This model is part of the Research topic ""AI in Biomedical field"" conducted by Deepak John Reji, Shaina Raza. If you use this work (code, model or dataset), please star at:
|
| 49 |
+
> https://github.com/dreji18/Bio-Epidemiology-NER
|
| 50 |
+
|
| 51 |
+
## You can support me here :)
|
| 52 |
+
<a href=""https://www.buymeacoffee.com/deepakjohnreji"" target=""_blank""><img src=""https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png"" alt=""Buy Me A Coffee"" style=""height: 60px !important;width: 217px !important;"" ></a>","{""id"": ""d4data/biomedical-ner-all"", ""author"": ""d4data"", ""sha"": ""015a4050c9ac99722e61c547aa9b4282bcbedc7f"", ""last_modified"": ""2023-07-02 07:28:28+00:00"", ""created_at"": ""2022-06-19 14:04:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 110942, ""downloads_all_time"": null, ""likes"": 164, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""distilbert"", ""token-classification"", ""Token Classification"", ""en"", ""license:apache-2.0"", ""co2_eq_emissions"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""token-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: apache-2.0\ntags:\n- Token Classification\nco2_eq_emissions: 0.0279399890043426\nwidget:\n- text: 'CASE: A 28-year-old previously healthy man presented with a 6-week history\n of palpitations. The symptoms occurred during rest, 2\u20133 times per week, lasted\n up to 30 minutes at a time and were associated with dyspnea. Except for a grade\n 2/6 holosystolic tricuspid regurgitation murmur (best heard at the left sternal\n border with inspiratory accentuation), physical examination yielded unremarkable\n findings.'\n example_title: example 1\n- text: A 63-year-old woman with no known cardiac history presented with a sudden\n onset of dyspnea requiring intubation and ventilatory support out of hospital.\n She denied preceding symptoms of chest discomfort, palpitations, syncope or infection.\n The patient was afebrile and normotensive, with a sinus tachycardia of 140 beats/min.\n example_title: example 2\n- text: A 48 year-old female presented with vaginal bleeding and abnormal Pap smears.\n Upon diagnosis of invasive non-keratinizing SCC of the cervix, she underwent a\n radical hysterectomy with salpingo-oophorectomy which demonstrated positive spread\n to the pelvic lymph nodes and the parametrium. Pathological examination revealed\n that the tumour also extensively involved the lower uterine segment.\n example_title: example 3"", ""widget_data"": [{""text"": ""CASE: A 28-year-old previously healthy man presented with a 6-week history of palpitations. The symptoms occurred during rest, 2\u20133 times per week, lasted up to 30 minutes at a time and were associated with dyspnea. Except for a grade 2/6 holosystolic tricuspid regurgitation murmur (best heard at the left sternal border with inspiratory accentuation), physical examination yielded unremarkable findings."", ""example_title"": ""example 1""}, {""text"": ""A 63-year-old woman with no known cardiac history presented with a sudden onset of dyspnea requiring intubation and ventilatory support out of hospital. She denied preceding symptoms of chest discomfort, palpitations, syncope or infection. The patient was afebrile and normotensive, with a sinus tachycardia of 140 beats/min."", ""example_title"": ""example 2""}, {""text"": ""A 48 year-old female presented with vaginal bleeding and abnormal Pap smears. Upon diagnosis of invasive non-keratinizing SCC of the cervix, she underwent a radical hysterectomy with salpingo-oophorectomy which demonstrated positive spread to the pelvic lymph nodes and the parametrium. Pathological examination revealed that the tumour also extensively involved the lower uterine segment."", ""example_title"": ""example 3""}], ""model_index"": null, ""config"": {""architectures"": [""DistilBertForTokenClassification""], ""model_type"": ""distilbert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""awacke1/Clinical-Terminology-Search-NER-Datasets"", ""DataScienceEngineering/7-NER-Biomed-ClinicalTerms"", ""awacke1/Ontology-Gradio"", ""keshva/Intelligent-Document-Processing"", ""andrewgleave/note-ner-demo"", ""awacke1/Biomed-NER-SNOMED-LOINC-CQM"", ""ceckenrode/Biomed-NLP-AI-Clinical-Terminology"", ""keneonyeachonam/Biomed-NER-AI-NLP-CT-Demo1"", ""AI-ZTH-03-23/8.Datasets-NER-Biomed-ClinicalTerms"", ""joaopimenta/tackling-hospital-readmissions-ai"", ""Sasidhar/information-extraction-demo"", ""AIZero2Hero4Health/2-BiomedEntityRecognition-GR"", ""apratap5/Abhay-2-BiomedEntityRecognition-GR"", ""AIZerotoHero-Health4All/03-BiomedNER-1117-Gradio"", ""RamAnanth1/whisper_biomed_ner"", ""UVA-MSBA/ADR_Detector"", ""keneonyeachonam/NPR_AI_NER_020623"", ""awacke1/Biomed-NER-AI-NLP-CT-Demo1"", ""ceckenrode/Biomed-NER-AI-NLP-CT-Demo1"", ""keneonyeachonam/d4data-biomedical-ner-all-020323"", ""ceckenrode/d4data-biomedical-ner-all232023"", ""furqankassa/d4data-biomedical-ner-all02032023"", ""awacke1/HEDIS.Dash.Component.Top.Clinical.Terminology.Vocabulary"", ""awacke1/d4data-biomedical-ner-all-0302"", ""GeekTony/Gradio-Ontology"", ""awacke1/ClinicalTerminologyNER-Refactored"", ""JohnC26/7-NER-Biomed-ClinicalTerms"", ""UVA-MSBA/vitalityv2"", ""calerio/SVM-ADR-Severity-Classifier-2"", ""calerio/SVM-ADR-Severity-Classifier-1"", ""mohamedali/MedNer"", ""hschlotter/BiomedEntityRecog"", ""pzimmel/2-BiomedEntityRecognition-GR"", ""sunilpuri/S-BimedEntityRecognition-GR"", ""jamesjohnson763/BiomedEntityRecognition-GR"", ""Sampathraju/2-BiomedEntityRecognition-GR"", ""Robo2000/BiomedEntityRecognition-GR"", ""vslasor/VLS2-BiomedEntityRecognition-GR"", ""tkottke/C2-BiomedEntityRecon"", ""versaggi/BiomedEntityRecognition-GR"", ""awacke1/Z2-BiomedEntityRecognition-GR"", ""ashishgargcse/BiomedEntityRecognition-GR"", ""jamessteele/BiomedEntityRecognition-GR"", ""Ami06/2-BiomedEntityRecognition-GR"", ""rrichaz/2-BiomedEntityRecognition-GR"", ""madireddi/MA-2-BiomedEntityRecognition-GR"", ""matt1873/Z2-BiomedEntityRecognition-GR"", ""Hitha/HJBiomedentityrecognition"", ""John4064/BiomedEntityRecognition-GR"", ""jamesjohnson763/1110-BiomedNER"", ""awacke1/1110-BiomedNER"", ""Ami06/1110-BiomedNER"", ""apratap5/1110-BiomedNer"", ""Robo2000/BiomedEntity"", ""Robo2000/03-BioMed_NER"", ""jharms10/03-BiomedNER-1117-Gradio"", ""alecmueller/03-BiomedEntityRecognition-GR"", ""Robo2000/01-Text-toSpeech"", ""dabram2/3biomed"", ""tritter2/03-BiomedEntityRecognition-GR"", ""goetzjj/03_BiomedEntryRecognition_class"", ""jhescheles/03_gradio_biomed_named_entity"", ""AIZerotoHero-Health4All/01-Biomed-NER"", ""paragon-analytics/biomedical-ner"", ""sanjayw/d4data-biomedical-ner-all"", ""awacke1/d4data-biomedical-ner-all"", ""jharrison27/d4data-biomedical-ner-all-test"", ""awacke1/d4data-biomedical-ner-all-02032023"", ""VaAishvarR/Mod4Team5"", ""ser4ff/DAVA"", ""lchavan1/Ontology-Gradio"", ""asistaoptum/ontoogy-example"", ""richardyoung/Ontology-Gradio"", ""richardyoung/d4data-biomedical-ner-all"", ""bqr5tf/adrtest"", ""trevrock/adr_test"", ""keneonyeachonam/Biomed-NLP-AI-Clinical-Terminology-022323"", ""SteveMama/Bio-med-NER"", ""Anar0140/8.Datasets-NER-Biomed-ClinicalTerms"", ""kirchdub/8.Datasets-NER-Biomed-ClinicalTerms"", ""mpinzon/8.Datasets-NER-Biomed-ClinicalTerms"", ""platzi/spanish-entities"", ""UVA-MSBA/Mod4_Team1"", ""UVA-MSBA/m4_team7"", ""MarkAdamsMSBA24/ADRv2024"", ""UVA-MSBA/Mod4_Team5"", ""UVA-MSBA/Mod4Team3_ADR_Detector"", ""UVA-MSBA/macm"", ""UVA-MSBA/MOD4Team6MSBA2024"", ""MSBA24-Team8/ADRv2024"", ""frankysagan/Medical_NER"", ""ljyflores/casemaker_demo"", ""dgrant6/Clinical-Demo"", ""TachyHealth/DataScienceEngineering_7-NER-Biomed-ClinicalTerms_tool"", ""peachfawn/trial_matching_system"", ""Kobunaga/mistralai-Mistral-7B-Instruct-v0.3"", ""Kobunaga/mistralai-Mistral-7Btest"", ""orangeorang/mistralai-Mistral-7B-Instruct-v0.3"", ""flavaflav/medical_chatbot"", ""Akanksha17/BioBert""], ""safetensors"": {""parameters"": {""F32"": 66427476}, ""total"": 66427476}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-07-02 07:28:28+00:00"", ""cardData"": ""language:\n- en\nlicense: apache-2.0\ntags:\n- Token Classification\nco2_eq_emissions: 0.0279399890043426\nwidget:\n- text: 'CASE: A 28-year-old previously healthy man presented with a 6-week history\n of palpitations. The symptoms occurred during rest, 2\u20133 times per week, lasted\n up to 30 minutes at a time and were associated with dyspnea. Except for a grade\n 2/6 holosystolic tricuspid regurgitation murmur (best heard at the left sternal\n border with inspiratory accentuation), physical examination yielded unremarkable\n findings.'\n example_title: example 1\n- text: A 63-year-old woman with no known cardiac history presented with a sudden\n onset of dyspnea requiring intubation and ventilatory support out of hospital.\n She denied preceding symptoms of chest discomfort, palpitations, syncope or infection.\n The patient was afebrile and normotensive, with a sinus tachycardia of 140 beats/min.\n example_title: example 2\n- text: A 48 year-old female presented with vaginal bleeding and abnormal Pap smears.\n Upon diagnosis of invasive non-keratinizing SCC of the cervix, she underwent a\n radical hysterectomy with salpingo-oophorectomy which demonstrated positive spread\n to the pelvic lymph nodes and the parametrium. Pathological examination revealed\n that the tumour also extensively involved the lower uterine segment.\n example_title: example 3"", ""transformersInfo"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""62af2ce2a5bd7cef3efe4d2a"", ""modelId"": ""d4data/biomedical-ner-all"", ""usedStorage"": 1063094323}",0,"https://huggingface.co/roupenminassian/medicalBERT, https://huggingface.co/thainq107/ner-biomedical-maccrobat2018, https://huggingface.co/phuoc2k9/ner-biomedical-maccrobat2018, https://huggingface.co/Khoivudang1209/ner-biomedical-maccrobat2018, https://huggingface.co/htkien95/ner-biomedical-maccrobat2018",5,,0,,0,,0,"DataScienceEngineering/7-NER-Biomed-ClinicalTerms, TachyHealth/DataScienceEngineering_7-NER-Biomed-ClinicalTerms_tool, UVA-MSBA/ADR_Detector, andrewgleave/note-ner-demo, awacke1/Biomed-NER-SNOMED-LOINC-CQM, awacke1/Clinical-Terminology-Search-NER-Datasets, awacke1/Ontology-Gradio, calerio/SVM-ADR-Severity-Classifier-1, calerio/SVM-ADR-Severity-Classifier-2, dgrant6/Clinical-Demo, huggingface/InferenceSupport/discussions/new?title=d4data/biomedical-ner-all&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bd4data%2Fbiomedical-ner-all%5D(%2Fd4data%2Fbiomedical-ner-all)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, keshva/Intelligent-Document-Processing, orangeorang/mistralai-Mistral-7B-Instruct-v0.3",13
|
| 53 |
+
roupenminassian/medicalBERT,"---
|
| 54 |
+
license: apache-2.0
|
| 55 |
+
base_model: d4data/biomedical-ner-all
|
| 56 |
+
tags:
|
| 57 |
+
- generated_from_trainer
|
| 58 |
+
model-index:
|
| 59 |
+
- name: medicalBERT
|
| 60 |
+
results: []
|
| 61 |
+
---
|
| 62 |
+
|
| 63 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 64 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 65 |
+
|
| 66 |
+
# medicalBERT
|
| 67 |
+
|
| 68 |
+
This model is a fine-tuned version of [d4data/biomedical-ner-all](https://huggingface.co/d4data/biomedical-ner-all) on the None dataset.
|
| 69 |
+
|
| 70 |
+
## Model description
|
| 71 |
+
|
| 72 |
+
More information needed
|
| 73 |
+
|
| 74 |
+
## Intended uses & limitations
|
| 75 |
+
|
| 76 |
+
More information needed
|
| 77 |
+
|
| 78 |
+
## Training and evaluation data
|
| 79 |
+
|
| 80 |
+
More information needed
|
| 81 |
+
|
| 82 |
+
## Training procedure
|
| 83 |
+
|
| 84 |
+
### Training hyperparameters
|
| 85 |
+
|
| 86 |
+
The following hyperparameters were used during training:
|
| 87 |
+
- learning_rate: 5e-05
|
| 88 |
+
- train_batch_size: 1
|
| 89 |
+
- eval_batch_size: 8
|
| 90 |
+
- seed: 42
|
| 91 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 92 |
+
- lr_scheduler_type: linear
|
| 93 |
+
- num_epochs: 30
|
| 94 |
+
|
| 95 |
+
### Training results
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
### Framework versions
|
| 100 |
+
|
| 101 |
+
- Transformers 4.35.2
|
| 102 |
+
- Pytorch 2.1.0+cu121
|
| 103 |
+
- Datasets 2.16.1
|
| 104 |
+
- Tokenizers 0.15.1
|
| 105 |
+
","{""id"": ""roupenminassian/medicalBERT"", ""author"": ""roupenminassian"", ""sha"": ""ae3c37ad7440b6d0b1f0a1d5c4a33f032ae116e7"", ""last_modified"": ""2024-02-07 08:30:56+00:00"", ""created_at"": ""2024-02-07 08:25:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""distilbert"", ""token-classification"", ""generated_from_trainer"", ""base_model:d4data/biomedical-ner-all"", ""base_model:finetune:d4data/biomedical-ner-all"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""token-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model: d4data/biomedical-ner-all\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: medicalBERT\n results: []"", ""widget_data"": [{""text"": ""My name is Wolfgang and I live in Berlin""}, {""text"": ""My name is Sarah and I live in London""}, {""text"": ""My name is Clara and I live in Berkeley, California.""}], ""model_index"": [{""name"": ""medicalBERT"", ""results"": []}], ""config"": {""architectures"": [""DistilBertForTokenClassification""], ""model_type"": ""distilbert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb07_07-54-51_3eaf2d916f9a/events.out.tfevents.1707292493.3eaf2d916f9a.4746.3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb07_07-59-26_3eaf2d916f9a/events.out.tfevents.1707292768.3eaf2d916f9a.4746.4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb07_08-12-53_3eaf2d916f9a/events.out.tfevents.1707293574.3eaf2d916f9a.4746.5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb07_08-16-54_3eaf2d916f9a/events.out.tfevents.1707293815.3eaf2d916f9a.4746.6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb07_08-25-53_3eaf2d916f9a/events.out.tfevents.1707294354.3eaf2d916f9a.4746.7', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 66368263}, ""total"": 66368263}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-07 08:30:56+00:00"", ""cardData"": ""base_model: d4data/biomedical-ner-all\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: medicalBERT\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65c33e91d14c096f8a7fdd3c"", ""modelId"": ""roupenminassian/medicalBERT"", ""usedStorage"": 265513780}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=roupenminassian/medicalBERT&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Broupenminassian%2FmedicalBERT%5D(%2Froupenminassian%2FmedicalBERT)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 106 |
+
thainq107/ner-biomedical-maccrobat2018,"---
|
| 107 |
+
library_name: transformers
|
| 108 |
+
license: apache-2.0
|
| 109 |
+
base_model: d4data/biomedical-ner-all
|
| 110 |
+
tags:
|
| 111 |
+
- generated_from_trainer
|
| 112 |
+
metrics:
|
| 113 |
+
- accuracy
|
| 114 |
+
model-index:
|
| 115 |
+
- name: ner-biomedical-maccrobat2018
|
| 116 |
+
results: []
|
| 117 |
+
---
|
| 118 |
+
|
| 119 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 120 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 121 |
+
|
| 122 |
+
# ner-biomedical-maccrobat2018
|
| 123 |
+
|
| 124 |
+
This model is a fine-tuned version of [d4data/biomedical-ner-all](https://huggingface.co/d4data/biomedical-ner-all) on an unknown dataset.
|
| 125 |
+
It achieves the following results on the evaluation set:
|
| 126 |
+
- Loss: 0.6342
|
| 127 |
+
- Accuracy: 0.7903
|
| 128 |
+
|
| 129 |
+
## Model description
|
| 130 |
+
|
| 131 |
+
More information needed
|
| 132 |
+
|
| 133 |
+
## Intended uses & limitations
|
| 134 |
+
|
| 135 |
+
More information needed
|
| 136 |
+
|
| 137 |
+
## Training and evaluation data
|
| 138 |
+
|
| 139 |
+
More information needed
|
| 140 |
+
|
| 141 |
+
## Training procedure
|
| 142 |
+
|
| 143 |
+
### Training hyperparameters
|
| 144 |
+
|
| 145 |
+
The following hyperparameters were used during training:
|
| 146 |
+
- learning_rate: 0.0001
|
| 147 |
+
- train_batch_size: 16
|
| 148 |
+
- eval_batch_size: 16
|
| 149 |
+
- seed: 42
|
| 150 |
+
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 151 |
+
- lr_scheduler_type: linear
|
| 152 |
+
- num_epochs: 20
|
| 153 |
+
|
| 154 |
+
### Training results
|
| 155 |
+
|
| 156 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
| 157 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
| 158 |
+
| 2.5437 | 1.0 | 10 | 1.6009 | 0.3671 |
|
| 159 |
+
| 1.3262 | 2.0 | 20 | 0.9660 | 0.6254 |
|
| 160 |
+
| 0.8675 | 3.0 | 30 | 0.7436 | 0.7145 |
|
| 161 |
+
| 0.6199 | 4.0 | 40 | 0.6544 | 0.7385 |
|
| 162 |
+
| 0.4707 | 5.0 | 50 | 0.6131 | 0.7660 |
|
| 163 |
+
| 0.3735 | 6.0 | 60 | 0.6027 | 0.7709 |
|
| 164 |
+
| 0.3049 | 7.0 | 70 | 0.6056 | 0.7786 |
|
| 165 |
+
| 0.2507 | 8.0 | 80 | 0.5992 | 0.7792 |
|
| 166 |
+
| 0.2144 | 9.0 | 90 | 0.6115 | 0.7780 |
|
| 167 |
+
| 0.1801 | 10.0 | 100 | 0.6062 | 0.7863 |
|
| 168 |
+
| 0.1539 | 11.0 | 110 | 0.6101 | 0.7854 |
|
| 169 |
+
| 0.1372 | 12.0 | 120 | 0.6157 | 0.7892 |
|
| 170 |
+
| 0.1234 | 13.0 | 130 | 0.6269 | 0.7896 |
|
| 171 |
+
| 0.1119 | 14.0 | 140 | 0.6285 | 0.7881 |
|
| 172 |
+
| 0.1025 | 15.0 | 150 | 0.6364 | 0.7879 |
|
| 173 |
+
| 0.0945 | 16.0 | 160 | 0.6326 | 0.7896 |
|
| 174 |
+
| 0.09 | 17.0 | 170 | 0.6297 | 0.7916 |
|
| 175 |
+
| 0.0861 | 18.0 | 180 | 0.6318 | 0.7908 |
|
| 176 |
+
| 0.083 | 19.0 | 190 | 0.6317 | 0.7901 |
|
| 177 |
+
| 0.0817 | 20.0 | 200 | 0.6342 | 0.7903 |
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
### Framework versions
|
| 181 |
+
|
| 182 |
+
- Transformers 4.47.1
|
| 183 |
+
- Pytorch 2.5.1+cu121
|
| 184 |
+
- Datasets 3.2.0
|
| 185 |
+
- Tokenizers 0.21.0
|
| 186 |
+
","{""id"": ""thainq107/ner-biomedical-maccrobat2018"", ""author"": ""thainq107"", ""sha"": ""b2557ef3bfc8bcbeadbd33b82edf5fb92422678d"", ""last_modified"": ""2025-02-03 02:06:06+00:00"", ""created_at"": ""2025-02-01 03:22:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 32, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""distilbert"", ""token-classification"", ""generated_from_trainer"", ""base_model:d4data/biomedical-ner-all"", ""base_model:finetune:d4data/biomedical-ner-all"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""token-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model: d4data/biomedical-ner-all\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ner-biomedical-maccrobat2018\n results: []"", ""widget_data"": [{""text"": ""My name is Wolfgang and I live in Berlin""}, {""text"": ""My name is Sarah and I live in London""}, {""text"": ""My name is Clara and I live in Berkeley, California.""}], ""model_index"": [{""name"": ""ner-biomedical-maccrobat2018"", ""results"": []}], ""config"": {""architectures"": [""DistilBertForTokenClassification""], ""model_type"": ""distilbert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb01_03-17-09_fa8cac01d5e9/events.out.tfevents.1738379831.fa8cac01d5e9.1529.5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb03_01-58-07_580211c92e0c/events.out.tfevents.1738547888.580211c92e0c.798.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb03_01-58-28_580211c92e0c/events.out.tfevents.1738547908.580211c92e0c.798.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 66426707}, ""total"": 66426707}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-03 02:06:06+00:00"", ""cardData"": ""base_model: d4data/biomedical-ner-all\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ner-biomedical-maccrobat2018\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""679d938e2c82de9b3fb2e06a"", ""modelId"": ""thainq107/ner-biomedical-maccrobat2018"", ""usedStorage"": 531498594}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=thainq107/ner-biomedical-maccrobat2018&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bthainq107%2Fner-biomedical-maccrobat2018%5D(%2Fthainq107%2Fner-biomedical-maccrobat2018)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 187 |
+
phuoc2k9/ner-biomedical-maccrobat2018,"---
|
| 188 |
+
library_name: transformers
|
| 189 |
+
license: apache-2.0
|
| 190 |
+
base_model: d4data/biomedical-ner-all
|
| 191 |
+
tags:
|
| 192 |
+
- generated_from_trainer
|
| 193 |
+
metrics:
|
| 194 |
+
- accuracy
|
| 195 |
+
model-index:
|
| 196 |
+
- name: ner-biomedical-maccrobat2018
|
| 197 |
+
results: []
|
| 198 |
+
---
|
| 199 |
+
|
| 200 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 201 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 202 |
+
|
| 203 |
+
# ner-biomedical-maccrobat2018
|
| 204 |
+
|
| 205 |
+
This model is a fine-tuned version of [d4data/biomedical-ner-all](https://huggingface.co/d4data/biomedical-ner-all) on an unknown dataset.
|
| 206 |
+
It achieves the following results on the evaluation set:
|
| 207 |
+
- Loss: 0.6384
|
| 208 |
+
- Accuracy: 0.8007
|
| 209 |
+
|
| 210 |
+
## Model description
|
| 211 |
+
|
| 212 |
+
More information needed
|
| 213 |
+
|
| 214 |
+
## Intended uses & limitations
|
| 215 |
+
|
| 216 |
+
More information needed
|
| 217 |
+
|
| 218 |
+
## Training and evaluation data
|
| 219 |
+
|
| 220 |
+
More information needed
|
| 221 |
+
|
| 222 |
+
## Training procedure
|
| 223 |
+
|
| 224 |
+
### Training hyperparameters
|
| 225 |
+
|
| 226 |
+
The following hyperparameters were used during training:
|
| 227 |
+
- learning_rate: 0.0001
|
| 228 |
+
- train_batch_size: 16
|
| 229 |
+
- eval_batch_size: 16
|
| 230 |
+
- seed: 42
|
| 231 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 232 |
+
- lr_scheduler_type: linear
|
| 233 |
+
- num_epochs: 20
|
| 234 |
+
|
| 235 |
+
### Training results
|
| 236 |
+
|
| 237 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
| 238 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
| 239 |
+
| 2.7438 | 1.0 | 10 | 1.8410 | 0.2970 |
|
| 240 |
+
| 1.4729 | 2.0 | 20 | 1.0655 | 0.5852 |
|
| 241 |
+
| 0.9161 | 3.0 | 30 | 0.7775 | 0.7075 |
|
| 242 |
+
| 0.6509 | 4.0 | 40 | 0.6808 | 0.7477 |
|
| 243 |
+
| 0.4923 | 5.0 | 50 | 0.6315 | 0.7603 |
|
| 244 |
+
| 0.3818 | 6.0 | 60 | 0.6120 | 0.7756 |
|
| 245 |
+
| 0.3096 | 7.0 | 70 | 0.6025 | 0.7742 |
|
| 246 |
+
| 0.2546 | 8.0 | 80 | 0.5992 | 0.7861 |
|
| 247 |
+
| 0.2089 | 9.0 | 90 | 0.6075 | 0.7883 |
|
| 248 |
+
| 0.178 | 10.0 | 100 | 0.6149 | 0.7877 |
|
| 249 |
+
| 0.159 | 11.0 | 110 | 0.6219 | 0.8012 |
|
| 250 |
+
| 0.139 | 12.0 | 120 | 0.6282 | 0.7997 |
|
| 251 |
+
| 0.1239 | 13.0 | 130 | 0.6222 | 0.7970 |
|
| 252 |
+
| 0.1115 | 14.0 | 140 | 0.6311 | 0.7915 |
|
| 253 |
+
| 0.1015 | 15.0 | 150 | 0.6336 | 0.7976 |
|
| 254 |
+
| 0.0958 | 16.0 | 160 | 0.6321 | 0.7955 |
|
| 255 |
+
| 0.0898 | 17.0 | 170 | 0.6352 | 0.7990 |
|
| 256 |
+
| 0.0874 | 18.0 | 180 | 0.6464 | 0.7981 |
|
| 257 |
+
| 0.0841 | 19.0 | 190 | 0.6380 | 0.7992 |
|
| 258 |
+
| 0.0819 | 20.0 | 200 | 0.6384 | 0.8007 |
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
### Framework versions
|
| 262 |
+
|
| 263 |
+
- Transformers 4.48.2
|
| 264 |
+
- Pytorch 2.5.1+cu124
|
| 265 |
+
- Datasets 3.2.0
|
| 266 |
+
- Tokenizers 0.21.0
|
| 267 |
+
","{""id"": ""phuoc2k9/ner-biomedical-maccrobat2018"", ""author"": ""phuoc2k9"", ""sha"": ""89309515099c5f56a353754ec7ceb8cfbb7c61d3"", ""last_modified"": ""2025-02-11 07:09:14+00:00"", ""created_at"": ""2025-02-11 07:08:52+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""distilbert"", ""token-classification"", ""generated_from_trainer"", ""base_model:d4data/biomedical-ner-all"", ""base_model:finetune:d4data/biomedical-ner-all"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""token-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model: d4data/biomedical-ner-all\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ner-biomedical-maccrobat2018\n results: []"", ""widget_data"": [{""text"": ""My name is Wolfgang and I live in Berlin""}, {""text"": ""My name is Sarah and I live in London""}, {""text"": ""My name is Clara and I live in Berkeley, California.""}], ""model_index"": [{""name"": ""ner-biomedical-maccrobat2018"", ""results"": []}], ""config"": {""architectures"": [""DistilBertForTokenClassification""], ""model_type"": ""distilbert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb11_06-50-31_f26b9cc97ceb/events.out.tfevents.1739256632.f26b9cc97ceb.467.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb11_06-51-45_f26b9cc97ceb/events.out.tfevents.1739256706.f26b9cc97ceb.467.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb11_06-52-47_f26b9cc97ceb/events.out.tfevents.1739256767.f26b9cc97ceb.467.2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb11_06-53-33_f26b9cc97ceb/events.out.tfevents.1739256813.f26b9cc97ceb.467.3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb11_06-53-59_f26b9cc97ceb/events.out.tfevents.1739256839.f26b9cc97ceb.467.4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb11_06-54-57_f26b9cc97ceb/events.out.tfevents.1739256898.f26b9cc97ceb.467.5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb11_06-56-56_f26b9cc97ceb/events.out.tfevents.1739257017.f26b9cc97ceb.467.6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 66426707}, ""total"": 66426707}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-11 07:09:14+00:00"", ""cardData"": ""base_model: d4data/biomedical-ner-all\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ner-biomedical-maccrobat2018\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67aaf7846b842ad4f7dbe9e4"", ""modelId"": ""phuoc2k9/ner-biomedical-maccrobat2018"", ""usedStorage"": 265800260}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=phuoc2k9/ner-biomedical-maccrobat2018&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bphuoc2k9%2Fner-biomedical-maccrobat2018%5D(%2Fphuoc2k9%2Fner-biomedical-maccrobat2018)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 268 |
+
Khoivudang1209/ner-biomedical-maccrobat2018,"---
|
| 269 |
+
library_name: transformers
|
| 270 |
+
license: apache-2.0
|
| 271 |
+
base_model: d4data/biomedical-ner-all
|
| 272 |
+
tags:
|
| 273 |
+
- generated_from_trainer
|
| 274 |
+
metrics:
|
| 275 |
+
- accuracy
|
| 276 |
+
model-index:
|
| 277 |
+
- name: ner-biomedical-maccrobat2018
|
| 278 |
+
results: []
|
| 279 |
+
---
|
| 280 |
+
|
| 281 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 282 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 283 |
+
|
| 284 |
+
# ner-biomedical-maccrobat2018
|
| 285 |
+
|
| 286 |
+
This model is a fine-tuned version of [d4data/biomedical-ner-all](https://huggingface.co/d4data/biomedical-ner-all) on an unknown dataset.
|
| 287 |
+
It achieves the following results on the evaluation set:
|
| 288 |
+
- Loss: 0.6603
|
| 289 |
+
- Accuracy: 0.7785
|
| 290 |
+
|
| 291 |
+
## Model description
|
| 292 |
+
|
| 293 |
+
More information needed
|
| 294 |
+
|
| 295 |
+
## Intended uses & limitations
|
| 296 |
+
|
| 297 |
+
More information needed
|
| 298 |
+
|
| 299 |
+
## Training and evaluation data
|
| 300 |
+
|
| 301 |
+
More information needed
|
| 302 |
+
|
| 303 |
+
## Training procedure
|
| 304 |
+
|
| 305 |
+
### Training hyperparameters
|
| 306 |
+
|
| 307 |
+
The following hyperparameters were used during training:
|
| 308 |
+
- learning_rate: 0.0001
|
| 309 |
+
- train_batch_size: 16
|
| 310 |
+
- eval_batch_size: 16
|
| 311 |
+
- seed: 42
|
| 312 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 313 |
+
- lr_scheduler_type: linear
|
| 314 |
+
- num_epochs: 20
|
| 315 |
+
|
| 316 |
+
### Training results
|
| 317 |
+
|
| 318 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
| 319 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
| 320 |
+
| 2.6521 | 1.0 | 10 | 1.7474 | 0.2525 |
|
| 321 |
+
| 1.3889 | 2.0 | 20 | 1.0400 | 0.5568 |
|
| 322 |
+
| 0.8632 | 3.0 | 30 | 0.7819 | 0.6695 |
|
| 323 |
+
| 0.6142 | 4.0 | 40 | 0.6757 | 0.7196 |
|
| 324 |
+
| 0.4609 | 5.0 | 50 | 0.6340 | 0.7401 |
|
| 325 |
+
| 0.3611 | 6.0 | 60 | 0.6265 | 0.7547 |
|
| 326 |
+
| 0.2924 | 7.0 | 70 | 0.6077 | 0.7491 |
|
| 327 |
+
| 0.2378 | 8.0 | 80 | 0.6112 | 0.7546 |
|
| 328 |
+
| 0.1974 | 9.0 | 90 | 0.6043 | 0.7605 |
|
| 329 |
+
| 0.1725 | 10.0 | 100 | 0.6336 | 0.7565 |
|
| 330 |
+
| 0.1493 | 11.0 | 110 | 0.6230 | 0.7685 |
|
| 331 |
+
| 0.1311 | 12.0 | 120 | 0.6339 | 0.7693 |
|
| 332 |
+
| 0.1168 | 13.0 | 130 | 0.6295 | 0.7748 |
|
| 333 |
+
| 0.1061 | 14.0 | 140 | 0.6472 | 0.7700 |
|
| 334 |
+
| 0.0975 | 15.0 | 150 | 0.6553 | 0.7731 |
|
| 335 |
+
| 0.0914 | 16.0 | 160 | 0.6526 | 0.7773 |
|
| 336 |
+
| 0.0864 | 17.0 | 170 | 0.6558 | 0.7755 |
|
| 337 |
+
| 0.0823 | 18.0 | 180 | 0.6624 | 0.7740 |
|
| 338 |
+
| 0.0813 | 19.0 | 190 | 0.6627 | 0.7778 |
|
| 339 |
+
| 0.0786 | 20.0 | 200 | 0.6603 | 0.7785 |
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
### Framework versions
|
| 343 |
+
|
| 344 |
+
- Transformers 4.48.3
|
| 345 |
+
- Pytorch 2.5.1+cu124
|
| 346 |
+
- Datasets 3.3.2
|
| 347 |
+
- Tokenizers 0.21.0
|
| 348 |
+
","{""id"": ""Khoivudang1209/ner-biomedical-maccrobat2018"", ""author"": ""Khoivudang1209"", ""sha"": ""ef91449bc5297d9f43f1e095b612887848692144"", ""last_modified"": ""2025-03-02 02:00:00+00:00"", ""created_at"": ""2025-03-02 01:59:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""distilbert"", ""token-classification"", ""generated_from_trainer"", ""base_model:d4data/biomedical-ner-all"", ""base_model:finetune:d4data/biomedical-ner-all"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""token-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model: d4data/biomedical-ner-all\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ner-biomedical-maccrobat2018\n results: []"", ""widget_data"": [{""text"": ""My name is Wolfgang and I live in Berlin""}, {""text"": ""My name is Sarah and I live in London""}, {""text"": ""My name is Clara and I live in Berkeley, California.""}], ""model_index"": [{""name"": ""ner-biomedical-maccrobat2018"", ""results"": []}], ""config"": {""architectures"": [""DistilBertForTokenClassification""], ""model_type"": ""distilbert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar01_16-02-52_e39e0207c33d/events.out.tfevents.1740844973.e39e0207c33d.174.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar02_01-23-55_60d42fbee2e2/events.out.tfevents.1740878638.60d42fbee2e2.776.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 66426707}, ""total"": 66426707}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-02 02:00:00+00:00"", ""cardData"": ""base_model: d4data/biomedical-ner-all\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ner-biomedical-maccrobat2018\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67c3bb82b8b3182d52137764"", ""modelId"": ""Khoivudang1209/ner-biomedical-maccrobat2018"", ""usedStorage"": 265760218}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Khoivudang1209/ner-biomedical-maccrobat2018&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BKhoivudang1209%2Fner-biomedical-maccrobat2018%5D(%2FKhoivudang1209%2Fner-biomedical-maccrobat2018)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 349 |
+
htkien95/ner-biomedical-maccrobat2018,"---
|
| 350 |
+
library_name: transformers
|
| 351 |
+
license: apache-2.0
|
| 352 |
+
base_model: d4data/biomedical-ner-all
|
| 353 |
+
tags:
|
| 354 |
+
- generated_from_trainer
|
| 355 |
+
metrics:
|
| 356 |
+
- accuracy
|
| 357 |
+
model-index:
|
| 358 |
+
- name: ner-biomedical-maccrobat2018
|
| 359 |
+
results: []
|
| 360 |
+
---
|
| 361 |
+
|
| 362 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 363 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 364 |
+
|
| 365 |
+
# ner-biomedical-maccrobat2018
|
| 366 |
+
|
| 367 |
+
This model is a fine-tuned version of [d4data/biomedical-ner-all](https://huggingface.co/d4data/biomedical-ner-all) on an unknown dataset.
|
| 368 |
+
It achieves the following results on the evaluation set:
|
| 369 |
+
- Loss: 0.7250
|
| 370 |
+
- Accuracy: 0.7665
|
| 371 |
+
|
| 372 |
+
## Model description
|
| 373 |
+
|
| 374 |
+
More information needed
|
| 375 |
+
|
| 376 |
+
## Intended uses & limitations
|
| 377 |
+
|
| 378 |
+
More information needed
|
| 379 |
+
|
| 380 |
+
## Training and evaluation data
|
| 381 |
+
|
| 382 |
+
More information needed
|
| 383 |
+
|
| 384 |
+
## Training procedure
|
| 385 |
+
|
| 386 |
+
### Training hyperparameters
|
| 387 |
+
|
| 388 |
+
The following hyperparameters were used during training:
|
| 389 |
+
- learning_rate: 0.0001
|
| 390 |
+
- train_batch_size: 16
|
| 391 |
+
- eval_batch_size: 16
|
| 392 |
+
- seed: 42
|
| 393 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 394 |
+
- lr_scheduler_type: linear
|
| 395 |
+
- num_epochs: 20
|
| 396 |
+
|
| 397 |
+
### Training results
|
| 398 |
+
|
| 399 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
| 400 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
| 401 |
+
| 2.6575 | 1.0 | 10 | 1.7587 | 0.2575 |
|
| 402 |
+
| 1.4576 | 2.0 | 20 | 1.1109 | 0.5523 |
|
| 403 |
+
| 0.9363 | 3.0 | 30 | 0.8227 | 0.6823 |
|
| 404 |
+
| 0.6519 | 4.0 | 40 | 0.7157 | 0.7200 |
|
| 405 |
+
| 0.4871 | 5.0 | 50 | 0.6950 | 0.7298 |
|
| 406 |
+
| 0.3808 | 6.0 | 60 | 0.6697 | 0.7455 |
|
| 407 |
+
| 0.3106 | 7.0 | 70 | 0.6761 | 0.7481 |
|
| 408 |
+
| 0.2558 | 8.0 | 80 | 0.6697 | 0.7568 |
|
| 409 |
+
| 0.2151 | 9.0 | 90 | 0.6689 | 0.7546 |
|
| 410 |
+
| 0.1856 | 10.0 | 100 | 0.6743 | 0.7587 |
|
| 411 |
+
| 0.1617 | 11.0 | 110 | 0.7075 | 0.7569 |
|
| 412 |
+
| 0.1426 | 12.0 | 120 | 0.7087 | 0.7561 |
|
| 413 |
+
| 0.1282 | 13.0 | 130 | 0.6996 | 0.7624 |
|
| 414 |
+
| 0.1163 | 14.0 | 140 | 0.7200 | 0.7646 |
|
| 415 |
+
| 0.1068 | 15.0 | 150 | 0.7044 | 0.7673 |
|
| 416 |
+
| 0.0968 | 16.0 | 160 | 0.7150 | 0.7641 |
|
| 417 |
+
| 0.0934 | 17.0 | 170 | 0.7239 | 0.7673 |
|
| 418 |
+
| 0.0871 | 18.0 | 180 | 0.7264 | 0.7652 |
|
| 419 |
+
| 0.085 | 19.0 | 190 | 0.7269 | 0.7655 |
|
| 420 |
+
| 0.083 | 20.0 | 200 | 0.7250 | 0.7665 |
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
### Framework versions
|
| 424 |
+
|
| 425 |
+
- Transformers 4.50.3
|
| 426 |
+
- Pytorch 2.6.0+cu124
|
| 427 |
+
- Tokenizers 0.21.1
|
| 428 |
+
","{""id"": ""htkien95/ner-biomedical-maccrobat2018"", ""author"": ""htkien95"", ""sha"": ""f1412cd43115e46e0c6d35ba4d651cea3a72b518"", ""last_modified"": ""2025-04-04 07:33:34+00:00"", ""created_at"": ""2025-04-04 07:33:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""distilbert"", ""token-classification"", ""generated_from_trainer"", ""base_model:d4data/biomedical-ner-all"", ""base_model:finetune:d4data/biomedical-ner-all"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""token-classification"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""base_model: d4data/biomedical-ner-all\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ner-biomedical-maccrobat2018\n results: []"", ""widget_data"": [{""text"": ""My name is Wolfgang and I live in Berlin""}, {""text"": ""My name is Sarah and I live in London""}, {""text"": ""My name is Clara and I live in Berkeley, California.""}], ""model_index"": [{""name"": ""ner-biomedical-maccrobat2018"", ""results"": []}], ""config"": {""architectures"": [""DistilBertForTokenClassification""], ""model_type"": ""distilbert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr04_07-28-21_2467974bb524/events.out.tfevents.1743751702.2467974bb524.1268.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 66426707}, ""total"": 66426707}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-04 07:33:34+00:00"", ""cardData"": ""base_model: d4data/biomedical-ner-all\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ner-biomedical-maccrobat2018\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForTokenClassification"", ""custom_class"": null, ""pipeline_tag"": ""token-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67ef8b3f47959554e359d5c6"", ""modelId"": ""htkien95/ner-biomedical-maccrobat2018"", ""usedStorage"": 265744696}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=htkien95/ner-biomedical-maccrobat2018&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhtkien95%2Fner-biomedical-maccrobat2018%5D(%2Fhtkien95%2Fner-biomedical-maccrobat2018)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
blenderbot-400M-distill_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
blessed_vae_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
NoCrypt/blessed_vae,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
thumbnail: >-
|
| 6 |
+
https://s3.amazonaws.com/moonup/production/uploads/1678229171792-62de447b4dcb9177d4bd876c.png
|
| 7 |
+
tags:
|
| 8 |
+
- compvis
|
| 9 |
+
- vae
|
| 10 |
+
---
|
| 11 |
+
# Blessed Vae
|
| 12 |
+
Contrast version of the regular nai/any vae
|
| 13 |
+
|
| 14 |
+
Good for models that are low on contrast even after using said vae
|
| 15 |
+
|
| 16 |
+
There's a few VAEs in here
|
| 17 |
+
1. blessed.vae.pt : VAE from salt's example VAEs
|
| 18 |
+
2. blessed-fix.vae.pt : blessed VAE with Patch Encoder (to fix [this issue](https://github.com/sALTaccount/VAE-BlessUp/issues/1))
|
| 19 |
+
3. blessed2.vae.pt : Customly tuned by me
|
| 20 |
+
|
| 21 |
+
## Differences
|
| 22 |
+
```diff
|
| 23 |
+
- Left: AnythingVAE (funfact: it's just NAI VAE but renamed)
|
| 24 |
+
+ Right: BlessedVAE
|
| 25 |
+
```
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
|
| 36 |
+
## Credits
|
| 37 |
+
- Salt for the model and script
|
| 38 |
+
- Reuploaded from https://github.com/sALTaccount/VAE-BlessUp example","{""id"": ""NoCrypt/blessed_vae"", ""author"": ""NoCrypt"", ""sha"": ""4c1fcf7d13a1361ede6b196076b8a95472f21c39"", ""last_modified"": ""2023-03-08 11:41:44+00:00"", ""created_at"": ""2023-03-07 15:58:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 188, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""compvis"", ""vae"", ""en"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\ntags:\n- compvis\n- vae\nthumbnail: https://s3.amazonaws.com/moonup/production/uploads/1678229171792-62de447b4dcb9177d4bd876c.png"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='blessed-fix.vae.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='blessed.vae.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='blessed2.vae.pt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-03-08 11:41:44+00:00"", ""cardData"": ""language:\n- en\ntags:\n- compvis\n- vae\nthumbnail: https://s3.amazonaws.com/moonup/production/uploads/1678229171792-62de447b4dcb9177d4bd876c.png"", ""transformersInfo"": null, ""_id"": ""64075f3be9e4f5a82bc05eca"", ""modelId"": ""NoCrypt/blessed_vae"", ""usedStorage"": 1004080513}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=NoCrypt/blessed_vae&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNoCrypt%2Fblessed_vae%5D(%2FNoCrypt%2Fblessed_vae)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
blue_pencil_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
clip-vit-large-patch14_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deepseek-vl2-small_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
deepseek-ai/deepseek-vl2-small,"---
|
| 3 |
+
license: other
|
| 4 |
+
license_name: deepseek
|
| 5 |
+
license_link: https://github.com/deepseek-ai/DeepSeek-LLM/blob/HEAD/LICENSE-MODEL
|
| 6 |
+
pipeline_tag: image-text-to-text
|
| 7 |
+
library_name: transformers
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
## 1. Introduction
|
| 11 |
+
|
| 12 |
+
Introducing DeepSeek-VL2, an advanced series of large Mixture-of-Experts (MoE) Vision-Language Models that significantly improves upon its predecessor, DeepSeek-VL. DeepSeek-VL2 demonstrates superior capabilities across various tasks, including but not limited to visual question answering, optical character recognition, document/table/chart understanding, and visual grounding. Our model series is composed of three variants: DeepSeek-VL2-Tiny, DeepSeek-VL2-Small and DeepSeek-VL2, with 1.0B, 2.8B and 4.5B activated parameters respectively.
|
| 13 |
+
DeepSeek-VL2 achieves competitive or state-of-the-art performance with similar or fewer activated parameters compared to existing open-source dense and MoE-based models.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
[DeepSeek-VL2: Mixture-of-Experts Vision-Language Models for Advanced Multimodal Understanding](https://arxiv.org/abs/2412.10302)
|
| 17 |
+
|
| 18 |
+
[**Github Repository**](https://github.com/deepseek-ai/DeepSeek-VL2)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
Zhiyu Wu*, Xiaokang Chen*, Zizheng Pan*, Xingchao Liu*, Wen Liu**, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, Zhenda Xie, Yu Wu, Kai Hu, Jiawei Wang, Yaofeng Sun, Yukun Li, Yishi Piao, Kang Guan, Aixin Liu, Xin Xie, Yuxiang You, Kai Dong, Xingkai Yu, Haowei Zhang, Liang Zhao, Yisong Wang, Chong Ruan*** (* Equal Contribution, ** Project Lead, *** Corresponding author)
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
### 2. Model Summary
|
| 27 |
+
|
| 28 |
+
DeepSeek-VL2-small is built on DeepSeekMoE-16B.
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## 3. Quick Start
|
| 32 |
+
|
| 33 |
+
### Installation
|
| 34 |
+
|
| 35 |
+
On the basis of `Python >= 3.8` environment, install the necessary dependencies by running the following command:
|
| 36 |
+
|
| 37 |
+
```shell
|
| 38 |
+
pip install -e .
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
### Notifications
|
| 42 |
+
1. We suggest to use a temperature T <= 0.7 when sampling. We observe a larger temperature decreases the generation quality.
|
| 43 |
+
2. To keep the number of tokens managable in the context window, we apply dynamic tiling strategy to <=2 images. When there are >=3 images, we directly pad the images to 384*384 as inputs without tiling.
|
| 44 |
+
3. The main difference between DeepSeek-VL2-Tiny, DeepSeek-VL2-Small and DeepSeek-VL2 is the base LLM.
|
| 45 |
+
|
| 46 |
+
### Simple Inference Example
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
import torch
|
| 50 |
+
from transformers import AutoModelForCausalLM
|
| 51 |
+
|
| 52 |
+
from deepseek_vl.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM
|
| 53 |
+
from deepseek_vl.utils.io import load_pil_images
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# specify the path to the model
|
| 57 |
+
model_path = ""deepseek-ai/deepseek-vl2-small""
|
| 58 |
+
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path)
|
| 59 |
+
tokenizer = vl_chat_processor.tokenizer
|
| 60 |
+
|
| 61 |
+
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
|
| 62 |
+
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
| 63 |
+
|
| 64 |
+
## single image conversation example
|
| 65 |
+
conversation = [
|
| 66 |
+
{
|
| 67 |
+
""role"": ""<|User|>"",
|
| 68 |
+
""content"": ""<image>\n<|ref|>The giraffe at the back.<|/ref|>."",
|
| 69 |
+
""images"": [""./images/visual_grounding.jpeg""],
|
| 70 |
+
},
|
| 71 |
+
{""role"": ""<|Assistant|>"", ""content"": """"},
|
| 72 |
+
]
|
| 73 |
+
|
| 74 |
+
## multiple images (or in-context learning) conversation example
|
| 75 |
+
# conversation = [
|
| 76 |
+
# {
|
| 77 |
+
# ""role"": ""User"",
|
| 78 |
+
# ""content"": ""<image_placeholder>A dog wearing nothing in the foreground, ""
|
| 79 |
+
# ""<image_placeholder>a dog wearing a santa hat, ""
|
| 80 |
+
# ""<image_placeholder>a dog wearing a wizard outfit, and ""
|
| 81 |
+
# ""<image_placeholder>what's the dog wearing?"",
|
| 82 |
+
# ""images"": [
|
| 83 |
+
# ""images/dog_a.png"",
|
| 84 |
+
# ""images/dog_b.png"",
|
| 85 |
+
# ""images/dog_c.png"",
|
| 86 |
+
# ""images/dog_d.png"",
|
| 87 |
+
# ],
|
| 88 |
+
# },
|
| 89 |
+
# {""role"": ""Assistant"", ""content"": """"}
|
| 90 |
+
# ]
|
| 91 |
+
|
| 92 |
+
# load images and prepare for inputs
|
| 93 |
+
pil_images = load_pil_images(conversation)
|
| 94 |
+
prepare_inputs = vl_chat_processor(
|
| 95 |
+
conversations=conversation,
|
| 96 |
+
images=pil_images,
|
| 97 |
+
force_batchify=True,
|
| 98 |
+
system_prompt=""""
|
| 99 |
+
).to(vl_gpt.device)
|
| 100 |
+
|
| 101 |
+
# run image encoder to get the image embeddings
|
| 102 |
+
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
| 103 |
+
|
| 104 |
+
# run the model to get the response
|
| 105 |
+
outputs = vl_gpt.language_model.generate(
|
| 106 |
+
inputs_embeds=inputs_embeds,
|
| 107 |
+
attention_mask=prepare_inputs.attention_mask,
|
| 108 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 109 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 110 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 111 |
+
max_new_tokens=512,
|
| 112 |
+
do_sample=False,
|
| 113 |
+
use_cache=True
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
|
| 117 |
+
print(f""{prepare_inputs['sft_format'][0]}"", answer)
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
### Gradio Demo (TODO)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
## 4. License
|
| 124 |
+
|
| 125 |
+
This code repository is licensed under [MIT License](./LICENSE-CODE). The use of DeepSeek-VL2 models is subject to [DeepSeek Model License](./LICENSE-MODEL). DeepSeek-VL2 series supports commercial use.
|
| 126 |
+
|
| 127 |
+
## 5. Citation
|
| 128 |
+
|
| 129 |
+
```
|
| 130 |
+
@misc{wu2024deepseekvl2mixtureofexpertsvisionlanguagemodels,
|
| 131 |
+
title={DeepSeek-VL2: Mixture-of-Experts Vision-Language Models for Advanced Multimodal Understanding},
|
| 132 |
+
author={Zhiyu Wu and Xiaokang Chen and Zizheng Pan and Xingchao Liu and Wen Liu and Damai Dai and Huazuo Gao and Yiyang Ma and Chengyue Wu and Bingxuan Wang and Zhenda Xie and Yu Wu and Kai Hu and Jiawei Wang and Yaofeng Sun and Yukun Li and Yishi Piao and Kang Guan and Aixin Liu and Xin Xie and Yuxiang You and Kai Dong and Xingkai Yu and Haowei Zhang and Liang Zhao and Yisong Wang and Chong Ruan},
|
| 133 |
+
year={2024},
|
| 134 |
+
eprint={2412.10302},
|
| 135 |
+
archivePrefix={arXiv},
|
| 136 |
+
primaryClass={cs.CV},
|
| 137 |
+
url={https://arxiv.org/abs/2412.10302},
|
| 138 |
+
}
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
## 6. Contact
|
| 142 |
+
|
| 143 |
+
If you have any questions, please raise an issue or contact us at [service@deepseek.com](mailto:service@deepseek.com).","{""id"": ""deepseek-ai/deepseek-vl2-small"", ""author"": ""deepseek-ai"", ""sha"": ""6033e16432a1d771cf9fe4a6f894ff5e5e1459af"", ""last_modified"": ""2024-12-18 08:17:59+00:00"", ""created_at"": ""2024-12-13 09:01:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 68032, ""downloads_all_time"": null, ""likes"": 157, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""deepseek_vl_v2"", ""image-text-to-text"", ""conversational"", ""arxiv:2412.10302"", ""license:other"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: transformers\nlicense: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-LLM/blob/HEAD/LICENSE-MODEL\npipeline_tag: image-text-to-text"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""model_type"": ""deepseek_vl_v2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{%- set found_item = false -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set found_item = true -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if not found_item -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5c\u2581pad\u2581\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-000004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-000004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-000004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-000004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""deepseek-ai/deepseek-vl2-small"", ""Sarath0x8f/Document-QA-bot"", ""AskUI/DeepSeek-Vl-UI"", ""Canstralian/deepseek-vl2-small"", ""Ronith55/OCR_deepseek-vl2"", ""rapsar/fff01"", ""JimmyK300/deepseek-vl2-small"", ""kevinbioinformatics/deepseek-vl2-small"", ""roxky/deepseek-vl2-small"", ""sailokesh/Hello_GPT"", ""zuehue/deepseek-vl2-small"", ""lli-jiaxin/DeepSeek-VL2-Run-On-Google-Colab"", ""Asya2025/TB_GenAI_Model1_DeepSeek-VL2-Small""], ""safetensors"": {""parameters"": {""BF16"": 16148349504}, ""total"": 16148349504}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-18 08:17:59+00:00"", ""cardData"": ""library_name: transformers\nlicense: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-LLM/blob/HEAD/LICENSE-MODEL\npipeline_tag: image-text-to-text"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""675bf7cf1d68b705c6817d90"", ""modelId"": ""deepseek-ai/deepseek-vl2-small"", ""usedStorage"": 32297441224}",0,,0,,0,,0,,0,"AskUI/DeepSeek-Vl-UI, Canstralian/deepseek-vl2-small, JimmyK300/deepseek-vl2-small, Ronith55/OCR_deepseek-vl2, Sarath0x8f/Document-QA-bot, deepseek-ai/deepseek-vl2-small, huggingface/InferenceSupport/discussions/623, kevinbioinformatics/deepseek-vl2-small, lli-jiaxin/DeepSeek-VL2-Run-On-Google-Colab, rapsar/fff01, roxky/deepseek-vl2-small, sailokesh/Hello_GPT, zuehue/deepseek-vl2-small",13
|
deepseek-vl2_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
deepseek-ai/deepseek-vl2,"---
|
| 3 |
+
license: other
|
| 4 |
+
license_name: deepseek
|
| 5 |
+
license_link: https://github.com/deepseek-ai/DeepSeek-LLM/blob/HEAD/LICENSE-MODEL
|
| 6 |
+
pipeline_tag: image-text-to-text
|
| 7 |
+
library_name: transformers
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
## 1. Introduction
|
| 11 |
+
|
| 12 |
+
Introducing DeepSeek-VL2, an advanced series of large Mixture-of-Experts (MoE) Vision-Language Models that significantly improves upon its predecessor, DeepSeek-VL. DeepSeek-VL2 demonstrates superior capabilities across various tasks, including but not limited to visual question answering, optical character recognition, document/table/chart understanding, and visual grounding. Our model series is composed of three variants: DeepSeek-VL2-Tiny, DeepSeek-VL2-Small and DeepSeek-VL2, with 1.0B, 2.8B and 4.5B activated parameters respectively.
|
| 13 |
+
DeepSeek-VL2 achieves competitive or state-of-the-art performance with similar or fewer activated parameters compared to existing open-source dense and MoE-based models.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
[DeepSeek-VL2: Mixture-of-Experts Vision-Language Models for Advanced Multimodal Understanding](https://arxiv.org/abs/2412.10302)
|
| 17 |
+
|
| 18 |
+
[**Github Repository**](https://github.com/deepseek-ai/DeepSeek-VL2)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
Zhiyu Wu*, Xiaokang Chen*, Zizheng Pan*, Xingchao Liu*, Wen Liu**, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, Zhenda Xie, Yu Wu, Kai Hu, Jiawei Wang, Yaofeng Sun, Yukun Li, Yishi Piao, Kang Guan, Aixin Liu, Xin Xie, Yuxiang You, Kai Dong, Xingkai Yu, Haowei Zhang, Liang Zhao, Yisong Wang, Chong Ruan*** (* Equal Contribution, ** Project Lead, *** Corresponding author)
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
### 2. Model Summary
|
| 27 |
+
|
| 28 |
+
DeepSeek-VL2 is built on DeepSeekMoE-27B.
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## 3. Quick Start
|
| 32 |
+
|
| 33 |
+
### Installation
|
| 34 |
+
|
| 35 |
+
On the basis of `Python >= 3.8` environment, install the necessary dependencies by running the following command:
|
| 36 |
+
|
| 37 |
+
```shell
|
| 38 |
+
pip install -e .
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
### Notifications
|
| 42 |
+
1. We suggest to use a temperature T <= 0.7 when sampling. We observe a larger temperature decreases the generation quality.
|
| 43 |
+
2. To keep the number of tokens managable in the context window, we apply dynamic tiling strategy to <=2 images. When there are >=3 images, we directly pad the images to 384*384 as inputs without tiling.
|
| 44 |
+
3. The main difference between DeepSeek-VL2-Tiny, DeepSeek-VL2-Small and DeepSeek-VL2 is the base LLM.
|
| 45 |
+
|
| 46 |
+
### Simple Inference Example
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
import torch
|
| 50 |
+
from transformers import AutoModelForCausalLM
|
| 51 |
+
|
| 52 |
+
from deepseek_vl.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM
|
| 53 |
+
from deepseek_vl.utils.io import load_pil_images
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# specify the path to the model
|
| 57 |
+
model_path = ""deepseek-ai/deepseek-vl2-small""
|
| 58 |
+
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path)
|
| 59 |
+
tokenizer = vl_chat_processor.tokenizer
|
| 60 |
+
|
| 61 |
+
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
|
| 62 |
+
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
| 63 |
+
|
| 64 |
+
## single image conversation example
|
| 65 |
+
conversation = [
|
| 66 |
+
{
|
| 67 |
+
""role"": ""<|User|>"",
|
| 68 |
+
""content"": ""<image>\n<|ref|>The giraffe at the back.<|/ref|>."",
|
| 69 |
+
""images"": [""./images/visual_grounding.jpeg""],
|
| 70 |
+
},
|
| 71 |
+
{""role"": ""<|Assistant|>"", ""content"": """"},
|
| 72 |
+
]
|
| 73 |
+
|
| 74 |
+
## multiple images (or in-context learning) conversation example
|
| 75 |
+
# conversation = [
|
| 76 |
+
# {
|
| 77 |
+
# ""role"": ""User"",
|
| 78 |
+
# ""content"": ""<image_placeholder>A dog wearing nothing in the foreground, ""
|
| 79 |
+
# ""<image_placeholder>a dog wearing a santa hat, ""
|
| 80 |
+
# ""<image_placeholder>a dog wearing a wizard outfit, and ""
|
| 81 |
+
# ""<image_placeholder>what's the dog wearing?"",
|
| 82 |
+
# ""images"": [
|
| 83 |
+
# ""images/dog_a.png"",
|
| 84 |
+
# ""images/dog_b.png"",
|
| 85 |
+
# ""images/dog_c.png"",
|
| 86 |
+
# ""images/dog_d.png"",
|
| 87 |
+
# ],
|
| 88 |
+
# },
|
| 89 |
+
# {""role"": ""Assistant"", ""content"": """"}
|
| 90 |
+
# ]
|
| 91 |
+
|
| 92 |
+
# load images and prepare for inputs
|
| 93 |
+
pil_images = load_pil_images(conversation)
|
| 94 |
+
prepare_inputs = vl_chat_processor(
|
| 95 |
+
conversations=conversation,
|
| 96 |
+
images=pil_images,
|
| 97 |
+
force_batchify=True,
|
| 98 |
+
system_prompt=""""
|
| 99 |
+
).to(vl_gpt.device)
|
| 100 |
+
|
| 101 |
+
# run image encoder to get the image embeddings
|
| 102 |
+
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
| 103 |
+
|
| 104 |
+
# run the model to get the response
|
| 105 |
+
outputs = vl_gpt.language_model.generate(
|
| 106 |
+
inputs_embeds=inputs_embeds,
|
| 107 |
+
attention_mask=prepare_inputs.attention_mask,
|
| 108 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 109 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 110 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 111 |
+
max_new_tokens=512,
|
| 112 |
+
do_sample=False,
|
| 113 |
+
use_cache=True
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
|
| 117 |
+
print(f""{prepare_inputs['sft_format'][0]}"", answer)
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
### Gradio Demo (TODO)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
## 4. License
|
| 124 |
+
|
| 125 |
+
This code repository is licensed under [MIT License](./LICENSE-CODE). The use of DeepSeek-VL2 models is subject to [DeepSeek Model License](./LICENSE-MODEL). DeepSeek-VL2 series supports commercial use.
|
| 126 |
+
|
| 127 |
+
## 5. Citation
|
| 128 |
+
|
| 129 |
+
```
|
| 130 |
+
@misc{wu2024deepseekvl2mixtureofexpertsvisionlanguagemodels,
|
| 131 |
+
title={DeepSeek-VL2: Mixture-of-Experts Vision-Language Models for Advanced Multimodal Understanding},
|
| 132 |
+
author={Zhiyu Wu and Xiaokang Chen and Zizheng Pan and Xingchao Liu and Wen Liu and Damai Dai and Huazuo Gao and Yiyang Ma and Chengyue Wu and Bingxuan Wang and Zhenda Xie and Yu Wu and Kai Hu and Jiawei Wang and Yaofeng Sun and Yukun Li and Yishi Piao and Kang Guan and Aixin Liu and Xin Xie and Yuxiang You and Kai Dong and Xingkai Yu and Haowei Zhang and Liang Zhao and Yisong Wang and Chong Ruan},
|
| 133 |
+
year={2024},
|
| 134 |
+
eprint={2412.10302},
|
| 135 |
+
archivePrefix={arXiv},
|
| 136 |
+
primaryClass={cs.CV},
|
| 137 |
+
url={https://arxiv.org/abs/2412.10302},
|
| 138 |
+
}
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
## 6. Contact
|
| 142 |
+
|
| 143 |
+
If you have any questions, please raise an issue or contact us at [service@deepseek.com](mailto:service@deepseek.com).","{""id"": ""deepseek-ai/deepseek-vl2"", ""author"": ""deepseek-ai"", ""sha"": ""f363772d1c47f4239dd844015b4bd53beb87951b"", ""last_modified"": ""2024-12-18 08:18:21+00:00"", ""created_at"": ""2024-12-13 09:06:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7448, ""downloads_all_time"": null, ""likes"": 325, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""deepseek_vl_v2"", ""image-text-to-text"", ""arxiv:2412.10302"", ""license:other"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: transformers\nlicense: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-LLM/blob/HEAD/LICENSE-MODEL\npipeline_tag: image-text-to-text"", ""widget_data"": null, ""model_index"": null, ""config"": {""model_type"": ""deepseek_vl_v2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5c\u2581pad\u2581\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-000008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-000008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-000008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-000008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-000008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-000008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-000008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-000008.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""deepseek-ai/deepseek-vl2-small"", ""Sarath0x8f/Document-QA-bot"", ""AskUI/DeepSeek-Vl-UI"", ""Canstralian/deepseek-vl2-small"", ""BronioInt/Lake-1-Pro"", ""99i/si"", ""Nocigar/siliconflow"", ""rapsar/fff01"", ""JimmyK300/deepseek-vl2-small"", ""kevinbioinformatics/deepseek-vl2-small"", ""roxky/deepseek-vl2-small"", ""sailokesh/Hello_GPT"", ""zuehue/deepseek-vl2-small"", ""lli-jiaxin/DeepSeek-VL2-Run-On-Google-Colab"", ""Sanjeev23oct/browser-use-sg"", ""Unknown504/web-ui"", ""Asya2025/TB_GenAI_Model1_DeepSeek-VL2-Small""], ""safetensors"": {""parameters"": {""F32"": 2088, ""BF16"": 27480132160}, ""total"": 27480134248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-18 08:18:21+00:00"", ""cardData"": ""library_name: transformers\nlicense: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-LLM/blob/HEAD/LICENSE-MODEL\npipeline_tag: image-text-to-text"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""675bf92424ddd1821c8b6260"", ""modelId"": ""deepseek-ai/deepseek-vl2"", ""usedStorage"": 54961192528}",0,,0,,0,https://huggingface.co/OPEA/deepseek-vl2-int4-sym-gptq-inc,1,,0,"99i/si, AskUI/DeepSeek-Vl-UI, BronioInt/Lake-1-Pro, Canstralian/deepseek-vl2-small, JimmyK300/deepseek-vl2-small, Nocigar/siliconflow, Sanjeev23oct/browser-use-sg, Sarath0x8f/Document-QA-bot, deepseek-ai/deepseek-vl2-small, huggingface/InferenceSupport/discussions/1185, rapsar/fff01, roxky/deepseek-vl2-small, zuehue/deepseek-vl2-small",13
|