Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 4x-Ultrasharp_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +2 -0
- Annotators_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +5 -0
- Arcane-Diffusion_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +59 -0
- BLOOMChat-176B-v1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +479 -0
- Baichuan-7B_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +230 -0
- BioMistral-7B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
- CogView4-6B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +330 -0
- ControlNet-modules-safetensors_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +4 -0
- CrisperWhisper_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +225 -0
- Cyberpunk-Anime-Diffusion_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +122 -0
- DeepSeek-Coder-V2-Instruct_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +286 -0
- DucHaitenAIart_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +70 -0
- EXAONE-Deep-32B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- FalconLite_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +77 -0
- Flux-uncensored_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +116 -0
- Future-Diffusion_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +47 -0
- Hyper-SD_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +517 -0
- Inkpunk-Diffusion_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +23 -0
- Lag-Llama_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +100 -0
- Leffa_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +81 -0
- Llama-2-7B-GGUF_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +406 -0
- Llama-3-8B-Instruct-Gradient-1048k_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +161 -0
- MagicPrompt-Stable-Diffusion_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv +31 -0
- Molmo-7B-O-0924_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +218 -0
- MythoMax-L2-13b_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +284 -0
- NuExtract_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +653 -0
- NuminaMath-7B-TIR_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
- OOTDiffusion_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +37 -0
- PhotoMaker_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +2 -0
- Qwen-72B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +808 -0
- Qwen2-72B-Instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
- SillyTavern-Presets_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +143 -0
- SmolVLM-256M-Instruct_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- Starling-LM-7B-alpha_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +345 -0
- Tifa-Deepsex-14b-CoT_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv +290 -0
- VLM_WebSight_finetuned_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +127 -0
- bark_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
- bart-large-cnn_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
- controlnet-sd21_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +79 -0
- distil-large-v3_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +0 -0
- distilgpt2_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
- fashion-clip_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +207 -0
- flux-controlnet-collections_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +86 -0
- gte-multilingual-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- idefics2-8b_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
- ip-composition-adapter_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +71 -0
- jina-clip-v2_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
- llama-2-ko-7b_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +640 -0
- m3e-base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +236 -0
- miqu-1-70b-sf_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +0 -0
4x-Ultrasharp_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
lokCX/4x-Ultrasharp,N/A,"{""id"": ""lokCX/4x-Ultrasharp"", ""author"": ""lokCX"", ""sha"": ""1856559b50de25116a7c07261177dd128f1f5664"", ""last_modified"": ""2023-04-07 12:32:49+00:00"", ""created_at"": ""2023-04-07 12:15:33+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 158, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": null, ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='4x-UltraSharp.pth', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eienmojiki/AnyDiffuse""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-04-07 12:32:49+00:00"", ""cardData"": null, ""transformersInfo"": null, ""_id"": ""6430096543a53c86b3fcb2a0"", ""modelId"": ""lokCX/4x-Ultrasharp"", ""usedStorage"": 66961958}",0,,0,,0,,0,,0,"eienmojiki/AnyDiffuse, huggingface/InferenceSupport/discussions/new?title=lokCX/4x-Ultrasharp&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BlokCX%2F4x-Ultrasharp%5D(%2FlokCX%2F4x-Ultrasharp)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
Annotators_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
lllyasviel/Annotators,"---
|
| 3 |
+
license: other
|
| 4 |
+
---
|
| 5 |
+
","{""id"": ""lllyasviel/Annotators"", ""author"": ""lllyasviel"", ""sha"": ""982e7edaec38759d914a963c48c4726685de7d96"", ""last_modified"": ""2023-08-27 01:01:06+00:00"", ""created_at"": ""2023-03-14 15:30:15+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 340, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""license:other"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='150_16_swin_l_oneformer_coco_100ep.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='250_16_swin_l_oneformer_ade20k_160k.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ControlNetHED.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ControlNetLama.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='RealESRGAN_x4plus.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ZoeD_M12_N.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='body_pose_model.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='clip_g.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='dpt_hybrid-midas-501f0c75.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='erika.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='facenet.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hand_pose_model.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='lama.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='latest_net_G.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mlsd_large_512_fp32.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='netG.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='network-bsds500.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='res101.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scannet.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sk_model.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sk_model2.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='table5_pidinet.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='upernet_global_small.pth', size=None, blob_id=None, lfs=None)""], ""spaces"": [""hysts/ControlNet-v1-1"", ""okaris/omni-zero"", ""Anonymous-sub/Rerender"", ""TencentARC/T2I-Adapter-SDXL"", ""roubaofeipi/UltraPixel-demo"", ""linoyts/scribble-sdxl-flash"", ""shgao/EditAnything"", ""Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro"", ""KBlueLeaf/Sketch-Gen"", ""shi-labs/Prompt-Free-Diffusion"", ""Koi953215/NaRCan_demo"", ""ozgurkara/RAVE"", ""limingcv/ControlNet-Plus-Plus"", ""linoyts/scribble-sdxl"", ""openfree/ultpixgen"", ""fffiloni/AniDoc"", ""broyang/anime-ai"", ""feishen29/IMAGDressing-v1"", ""Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro-2.0"", ""fffiloni/MangaNinja-demo"", ""guardiancc/flux-advanced-explorer"", ""Kwai-Kolors/Kolors-Controlnet_and_IPA"", ""mikonvergence/mask-and-sketch"", ""broyang/interior-ai-designer"", ""vilarin/Diffutoon-ExVideo"", ""Deadmon/union-sdxl"", ""okaris/omni-zero-couples"", ""Kwai-Kolors/Kolors-Controlnet"", ""Sebastiankay/controlnet-and-ip-adapter-plus"", ""Superlang/ImageProcessor"", ""Robert001/UniControl-Demo"", ""dreamer-technoland/object-to-object-replace"", ""SakanaAI/Evo-Nishikie"", ""coreml-community/ControlNet-v1-1-Annotators-cpu"", ""Deadmon/scribble-pony-sdxl"", ""fantos/flxcontrol"", ""rupeshs/fastsdcpu"", ""Aatricks/LightDiffusion-Next"", ""OmPrakashSingh1704/ADVERTISE"", ""abhishek/sketch-to-image"", ""ameerazam08/Stable-Cascade-Super-Resolution"", ""fantos/EveryText"", ""wondervictor/ControlAR"", ""Make-A-Protagonist/Make-A-Protagonist-inference"", ""DMTuit/Diffusion-API"", ""jiuface/flux-controlnet-inpainting"", ""nowsyn/StyleShot"", ""stazizov/XFluxSpace"", ""PKUWilliamYang/FRESCO"", ""kevinwang676/ControlNet-with-GPT-4"", ""ilanser/Sketch-Stable-Diffusion"", ""egg22314/object-to-object-replace"", ""dreamer-technoland/object-to-object-replace-1"", ""HighCWu/control-lora-v3"", ""smartfeed/image2image"", ""willgibs/ControlNet-v1-1"", ""fantos/kolcontrl"", ""cbensimon/omni-zero-couples"", ""nuwandaa/StyleShot"", ""atatakun/ControlNet-v1-1-Annotators-cpu"", ""mikelalda/Drawing-Diffusion"", ""TheNetherWatcher/Vid2Vid-using-Text-prompt"", ""kevinwang676/Diffutoon"", ""K00B404/AnimateDiff-Lightning"", ""Gainward777/Sketcher"", ""dezzman/diffusion_models"", ""RobLi/ControlNet-v1-1"", ""barani/ControlNet"", ""cyberoleg/b2719240e190e2a649150d94db50be82838efeb0"", ""YEsssssssssssir/AIgen"", ""adorp/ControlNet-v1-1-duplicate"", ""flatindo/generate2"", ""flatindo/Image-Diffusion-WebUI"", ""kottu/stabble_diffusion_sketch"", ""killah-t-cell/EditAnything"", ""lloki/scribble-sdxl-flash"", ""itsVilen/trail"", ""Sebastiankay/controlnet-preprocessors-only"", ""terrapretapermaculture/ControlNet-v1-1-B"", ""smartfeed/turbo_fm"", ""jiuface/flux-controlnet-inpainting-large"", ""jordandotzel/ControlNet-v1-1"", ""svjack/AniDoc"", ""tianaco/tianacomakethedot"", ""anushriiyer/IMAGDressing-v1"", ""tejani/Another"", ""otmanheddouch/house_design"", ""VCHENZIJIA/Stable-Diffusion-ControlNet-WebUI"", ""bastibond/ControlNet-v1-1"", ""licasexko/ControlNet-v1-1"", ""serhatderya/ControlNet-v1-1-scribble"", ""imjunaidafzal/ControlNet-v1-1"", ""xawery/ControlNet-v1-1"", ""rishavraina/ControlNet-v1-1"", ""dddddahling/ControlNet-v1-1"", ""xing2024/ControlNet-v1-1"", ""lollo21/will-video-editor"", ""wesbuskirk/create"", ""777Andy777/ControlNet-v1-1"", ""wimvanhenden/ControlNet-v1-1""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-08-27 01:01:06+00:00"", ""cardData"": ""license: other"", ""transformersInfo"": null, ""_id"": ""641093072a593afb553ee805"", ""modelId"": ""lllyasviel/Annotators"", ""usedStorage"": 12718642304}",0,,0,,0,,0,,0,"KBlueLeaf/Sketch-Gen, Koi953215/NaRCan_demo, Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro, broyang/anime-ai, huggingface/InferenceSupport/discussions/new?title=lllyasviel/Annotators&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blllyasviel%2FAnnotators%5D(%2Flllyasviel%2FAnnotators)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, hysts/ControlNet-v1-1, limingcv/ControlNet-Plus-Plus, linoyts/scribble-sdxl, linoyts/scribble-sdxl-flash, okaris/omni-zero, openfree/ultpixgen, ozgurkara/RAVE, roubaofeipi/UltraPixel-demo",13
|
Arcane-Diffusion_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nitrosocke/Arcane-Diffusion,"---
|
| 3 |
+
license: creativeml-openrail-m
|
| 4 |
+
tags:
|
| 5 |
+
- stable-diffusion
|
| 6 |
+
- text-to-image
|
| 7 |
+
---
|
| 8 |
+
# Arcane Diffusion
|
| 9 |
+
This is the fine-tuned Stable Diffusion model trained on images from the TV Show Arcane.
|
| 10 |
+
Use the tokens **_arcane style_** in your prompts for the effect.
|
| 11 |
+
|
| 12 |
+
**If you enjoy my work, please consider supporting me**
|
| 13 |
+
[](https://patreon.com/user?u=79196446)
|
| 14 |
+
|
| 15 |
+
### 🧨 Diffusers
|
| 16 |
+
|
| 17 |
+
This model can be used just like any other Stable Diffusion model. For more information,
|
| 18 |
+
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
|
| 19 |
+
|
| 20 |
+
You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
|
| 21 |
+
|
| 22 |
+
```python
|
| 23 |
+
#!pip install diffusers transformers scipy torch
|
| 24 |
+
from diffusers import StableDiffusionPipeline
|
| 25 |
+
import torch
|
| 26 |
+
|
| 27 |
+
model_id = ""nitrosocke/Arcane-Diffusion""
|
| 28 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
| 29 |
+
pipe = pipe.to(""cuda"")
|
| 30 |
+
|
| 31 |
+
prompt = ""arcane style, a magical princess with golden hair""
|
| 32 |
+
image = pipe(prompt).images[0]
|
| 33 |
+
|
| 34 |
+
image.save(""./magical_princess.png"")
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
# Gradio & Colab
|
| 38 |
+
|
| 39 |
+
We also support a [Gradio](https://github.com/gradio-app/gradio) Web UI and Colab with Diffusers to run fine-tuned Stable Diffusion models:
|
| 40 |
+
[](https://huggingface.co/spaces/anzorq/finetuned_diffusion)
|
| 41 |
+
[](https://colab.research.google.com/drive/1j5YvfMZoGdDGdj3O3xRU1m4ujKYsElZO?usp=sharing)
|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
|
| 45 |
+
### Sample images from v3:
|
| 46 |
+

|
| 47 |
+

|
| 48 |
+
### Sample images from the model:
|
| 49 |
+

|
| 50 |
+
### Sample images used for training:
|
| 51 |
+

|
| 52 |
+
|
| 53 |
+
**Version 3** (arcane-diffusion-v3): This version uses the new _train-text-encoder_ setting and improves the quality and edibility of the model immensely. Trained on 95 images from the show in 8000 steps.
|
| 54 |
+
|
| 55 |
+
**Version 2** (arcane-diffusion-v2): This uses the diffusers based dreambooth training and prior-preservation loss is way more effective. The diffusers where then converted with a script to a ckpt file in order to work with automatics repo.
|
| 56 |
+
Training was done with 5k steps for a direct comparison to v1 and results show that it needs more steps for a more prominent result. Version 3 will be tested with 11k steps.
|
| 57 |
+
|
| 58 |
+
**Version 1** (arcane-diffusion-5k): This model was trained using _Unfrozen Model Textual Inversion_ utilizing the _Training with prior-preservation loss_ methods. There is still a slight shift towards the style, while not using the arcane token.
|
| 59 |
+
","{""id"": ""nitrosocke/Arcane-Diffusion"", ""author"": ""nitrosocke"", ""sha"": ""c7d9af168e4885816a62e50f2c5dfb38419f0cb3"", ""last_modified"": ""2023-05-16 09:20:36+00:00"", ""created_at"": ""2022-10-02 11:41:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2448, ""downloads_all_time"": null, ""likes"": 753, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""stable-diffusion"", ""text-to-image"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arcane-diffusion-5k.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arcane-diffusion-output-images.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arcane-diffusion-training-images.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arcane-diffusion-v2.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arcane-diffusion-v3.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arcane-v3-samples-01.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='arcane-v3-samples-02.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='magical_princess.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""anzorq/finetuned_diffusion"", ""darkstorm2150/Stable-Diffusion-Protogen-x3.4-webui"", ""Yntec/ToyWorld"", ""darkstorm2150/protogen-web-ui"", ""akhaliq/anything-v3.0"", ""Yntec/PrintingPress"", ""vorstcavry/ai"", ""kamiyamai/stable-diffusion-webui"", ""yangheng/Super-Resolution-Anime-Diffusion"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""akhaliq/openjourney"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""BilalSardar/Text-To-image-AllModels"", ""AdamOswald1/finetuned_diffusion"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""diffusionai/ImgGenerator"", ""IAmXenos21/stable-diffusion-webui-VORST2"", ""John6666/PrintingPress4"", ""dotmet/Real-ESRGAN-Enhanced-Anime-Diffusion"", ""Nickhilearla135095/maximum_diffusion"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""Rifd/ngees_doang"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Joeythemonster/Text-To-image-AllModels"", ""Evel/Evel_Space"", ""luisrguerra/sd-real-dream-lcm-cpu"", ""Daniela-C/6x_Image_diffusion"", ""riccardogiorato/playground_diffusion"", ""Dao3/Text-To-image-AllModels"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""ConceptArtHouse/webui-gameasset"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""YeOldHermit/StableDiffusion_AnythingV3_ModelCamenduru"", ""zwv9/webui-cpu"", ""duchaba/sd_prompt_helper"", ""kaleidoskop-hug/PrintingPress"", ""Adam111/stable-diffusion-webui"", ""vs4vijay/stable-diffusion"", ""Yasu55/stable-diffusion-webui"", ""ennov8ion/stablediffusion-models"", ""Shocky/Pink-Anime"", ""JoPmt/Multi-SD_Cntrl_Cny_Pse_Img2Img"", ""JoPmt/Img2Img_SD_Control_Canny_Pose_Multi"", ""ReiPlush64/finetuned_diffusion"", ""John6666/ToyWorld4"", ""akhaliq/EimisAnimeDiffusion_1.0v"", ""sasaro/webui"", ""YeOldHermit/Super-Resolution-Anime-Diffusion"", ""Omnibus-archive/Diffusion-Flood"", ""Crossper6/stable-diffusion-webui"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""TopdeckingLands/Diffusion_Space"", ""Malifex/CPU-Anything-V3.0-WebUI"", ""lianzhou/stable-diffusion-webui"", ""Missinginaction/stablediffusionwithnofilter"", ""arthurdias/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper"", ""thestasi/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper"", ""achyuth1344/stable-diffusion-webui"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""Jackflack09/finetuned_diffusion2"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""Nymbo/Game-Creator"", ""Minecraft3193092/Stable-Diffusion-8"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""deaf1296/finetuned_diffusion"", ""fkunn1326/CoolJapaneseDiffusion"", ""mgxwrites/Mgx-Diffusion-v3.0"", ""pieeetre/stable-diffusion-webui"", ""luluneko1/stable-diffusion-webui"", ""Lyra121/finetuned_diffusion"", ""voltcutter/stable-diffusion-webui"", ""Mileena/anything-v3.0"", ""hylee/finetuned_diffusion"", ""Dao3/Top-20-Models"", ""Jackflack09/diffuse-custom"", ""SHOOL45/ImgGen"", ""ichsanbhrd/ImgGenerator"", ""RhythmRemix14/PrintingPressDx"", ""Minecraft3193092/Stable-Diffusion-7"", ""Omnibus/game-test""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-16 09:20:36+00:00"", ""cardData"": ""license: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image"", ""transformersInfo"": null, ""_id"": ""633978e737faf14d3a94bec7"", ""modelId"": ""nitrosocke/Arcane-Diffusion"", ""usedStorage"": 41619195807}",0,,0,,0,,0,,0,"IAmXenos21/stable-diffusion-webui-VORST2, John6666/Diffusion80XX4sg, John6666/PrintingPress4, Nymbo/image_gen_supaqueue, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, anzorq/finetuned_diffusion, darkstorm2150/Stable-Diffusion-Protogen-x3.4-webui, diffusionai/ImgGenerator, huggingface/InferenceSupport/discussions/new?title=nitrosocke/Arcane-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnitrosocke%2FArcane-Diffusion%5D(%2Fnitrosocke%2FArcane-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, phenixrhyder/NSFW-ToyWorld, vorstcavry/ai, yangheng/Super-Resolution-Anime-Diffusion",14
|
BLOOMChat-176B-v1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
sambanovasystems/BLOOMChat-176B-v1,"---
|
| 3 |
+
# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1
|
| 4 |
+
# Doc / guide: https://huggingface.co/docs/hub/model-cards
|
| 5 |
+
license: other
|
| 6 |
+
inference: false
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
# BLOOMChat V1.0
|
| 10 |
+
|
| 11 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 12 |
+
|
| 13 |
+
BLOOMChat is a 176 billion parameter multilingual chat model. It is instruction tuned from [BLOOM (176B)](https://huggingface.co/bigscience/bloom) on assistant-style conversation datasets and supports conversation, question answering and generative answers in multiple languages.
|
| 14 |
+
|
| 15 |
+
## Model Details
|
| 16 |
+
|
| 17 |
+
### Model Description
|
| 18 |
+
|
| 19 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 20 |
+
|
| 21 |
+
- **Developed by:** [SambaNova Systems](https://sambanova.ai/)
|
| 22 |
+
- **Co-developed by:** [Together Computer](https://www.together.xyz/)
|
| 23 |
+
- **Model type:** Language Model
|
| 24 |
+
- **Language(s):** Multiple; see [training data from BLOOM](https://huggingface.co/bigscience/bloom#training-data)
|
| 25 |
+
- **License:** BLOOMChat-176B LICENSE v1.0
|
| 26 |
+
- **Instruction Tuned from model:** [BigScience Group BLOOM](https://huggingface.co/bigscience/bloom)
|
| 27 |
+
|
| 28 |
+
### Basic Information
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
- **Blog Post**: [Link](https://sambanova.ai/blog/introducing-bloomchat-176b-the-multilingual-chat-based-llm/)
|
| 32 |
+
- **Discord**: [Link](https://discord.com/invite/8z2Pe7cpRv)
|
| 33 |
+
- **HF Hosting**: [Chat with me!](https://huggingface.co/spaces/sambanovasystems/BLOOMChat)
|
| 34 |
+
- **Github**: [Link](https://github.com/sambanova/bloomchat)
|
| 35 |
+
|
| 36 |
+
### Licensing
|
| 37 |
+
|
| 38 |
+
To increase accessibility and to support the open-source community, SambaNova is releasing BLOOMChat under a modified version of the Apache 2.0 license, which includes use-based restrictions from BLOOM’s RAIL license. While use-based restrictions are necessarily passed through, there are no blanket restrictions on reuse, distribution, commercialization or adaptation. [Please review SambaNova’s BLOOMChat-176B License](LICENSE)
|
| 39 |
+
|
| 40 |
+
## Uses
|
| 41 |
+
<details>
|
| 42 |
+
<summary>Click to expand</summary>
|
| 43 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 44 |
+
|
| 45 |
+
### Direct Use
|
| 46 |
+
|
| 47 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 48 |
+
This model is intended for commercial and research use.
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
### Out-of-Scope Use
|
| 52 |
+
|
| 53 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
BLOOMChat should NOT be used for:
|
| 57 |
+
|
| 58 |
+
- Mission-critical applications
|
| 59 |
+
- Applications that involve the safety of others
|
| 60 |
+
- Making highly important decisions
|
| 61 |
+
- Important automated pipelines
|
| 62 |
+
|
| 63 |
+
This model is still in early development and can be prone to mistakes and hallucinations, there is still room for improvement. This model is intended to provide the community with a multilingual chat LLM baseline.
|
| 64 |
+
|
| 65 |
+
### Recommendations
|
| 66 |
+
|
| 67 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 68 |
+
|
| 69 |
+
Users should be made aware of the risks, biases, limitations, and restrictions of the model, which are listed down at the bottom of the page.
|
| 70 |
+
|
| 71 |
+
</details>
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
---
|
| 75 |
+
## How to Get Started with the Model
|
| 76 |
+
|
| 77 |
+
<details>
|
| 78 |
+
<summary>Click to expand</summary>
|
| 79 |
+
|
| 80 |
+
### Loading in model with Huggingface
|
| 81 |
+
|
| 82 |
+
```python
|
| 83 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 84 |
+
|
| 85 |
+
tokenizer = AutoTokenizer.from_pretrained(""sambanovasystems/BLOOMChat-176B-v1"")
|
| 86 |
+
model = AutoModelForCausalLM.from_pretrained(""sambanovasystems/BLOOMChat-176B-v1"", device_map=""auto"", torch_dtype=""auto"")
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
### Quick Start Inference on SambaNova's in-house Reconfigurable Dataflow Unit (RDU)
|
| 90 |
+
|
| 91 |
+
The inference code to run the model can be found our [github repo](https://github.com/sambanova/bloomchat/blob/main/rdu_quick_start/inference.py). This code requires the [SambaFlow](https://docs.sambanova.ai/developer/latest/sambaflow-intro.html) SDK to execute. For those interested in running models on RDUs, [please feel free to get in touch](https://sambanova.ai/getstarted).
|
| 92 |
+
|
| 93 |
+
### Quick Start Inference on GPU
|
| 94 |
+
|
| 95 |
+
First create a python virtual environment for these packages
|
| 96 |
+
|
| 97 |
+
```
|
| 98 |
+
python3 -m venv bloomchat_venv
|
| 99 |
+
source bloomchat_venv/bin/activate
|
| 100 |
+
pip install --upgrade pip
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
<!-- Please follow this section [Inference solutions for BLOOM 176B](https://github.com/huggingface/transformers-bloom-inference#bloom-inference-via-command-line) in the Huggingface Tutorial for environment set up and stop before the [BLOOM inference via command-line
|
| 104 |
+
](https://github.com/huggingface/transformers-bloom-inference#bloom-inference-via-command-line) section. -->
|
| 105 |
+
|
| 106 |
+
```
|
| 107 |
+
pip install flask flask_api gunicorn pydantic accelerate huggingface_hub>=0.9.0 deepspeed>=0.7.3 deepspeed-mii==0.0.2
|
| 108 |
+
```
|
| 109 |
+
And then
|
| 110 |
+
```
|
| 111 |
+
pip install transformers==4.27.0
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
You will see messages like this
|
| 115 |
+
```
|
| 116 |
+
ERROR: deepspeed-mii 0.0.2 has requirement transformers==4.21.2, but you'll have transformers 4.27.0 which is incompatible.
|
| 117 |
+
Installing collected packages: transformers
|
| 118 |
+
Found existing installation: transformers 4.21.2
|
| 119 |
+
Uninstalling transformers-4.21.2:
|
| 120 |
+
Successfully uninstalled transformers-4.21.2
|
| 121 |
+
Successfully installed transformers-4.27.0
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
Now let's git clone the [huggingface/transformers-bloom-inference](https://github.com/huggingface/transformers-bloom-inference) repo.
|
| 125 |
+
```
|
| 126 |
+
git clone https://github.com/huggingface/transformers-bloom-inference.git
|
| 127 |
+
cd transformers-bloom-inference/
|
| 128 |
+
```
|
| 129 |
+
And then you need to modify two files in this [transformers-bloom-inference](https://github.com/huggingface/transformers-bloom-inference) repo:
|
| 130 |
+
|
| 131 |
+
- Modifying `inference_server/models/hf_accelerate.py`
|
| 132 |
+
- This is because for our testing of this repo we used 4 80GB A100 GPUs and would run into memory issues
|
| 133 |
+
- Modifying `inference_server/cli.py`
|
| 134 |
+
- This is because the model was trained using specific human, bot tags
|
| 135 |
+
- Trailing spaces may lead to subpar performance
|
| 136 |
+
|
| 137 |
+
Modifications for `inference_server/models/hf_accelerate.py`:
|
| 138 |
+
|
| 139 |
+
```diff
|
| 140 |
+
diff --git a/inference_server/models/hf_accelerate.py b/inference_server/models/hf_accelerate.py
|
| 141 |
+
index 9be3c3f..a8ecb1d 100644
|
| 142 |
+
--- a/inference_server/models/hf_accelerate.py
|
| 143 |
+
+++ b/inference_server/models/hf_accelerate.py
|
| 144 |
+
@@ -1,4 +1,5 @@
|
| 145 |
+
from argparse import Namespace
|
| 146 |
+
+from accelerate.utils.modeling import get_max_memory
|
| 147 |
+
|
| 148 |
+
import torch
|
| 149 |
+
|
| 150 |
+
@@ -12,6 +13,12 @@ class HFAccelerateModel(Model):
|
| 151 |
+
|
| 152 |
+
kwargs = {""pretrained_model_name_or_path"": args.model_name, ""device_map"": ""auto""}
|
| 153 |
+
|
| 154 |
+
+ original_max_memory_dict = get_max_memory()
|
| 155 |
+
+
|
| 156 |
+
+ reduce_max_memory_dict = {device_key: int(original_max_memory_dict[device_key] * 0.85) for device_key in original_max_memory_dict}
|
| 157 |
+
+
|
| 158 |
+
+ kwargs[""max_memory""] = reduce_max_memory_dict
|
| 159 |
+
+
|
| 160 |
+
if get_world_size() > 1:
|
| 161 |
+
kwargs[""device_map""] = ""balanced_low_0""
|
| 162 |
+
|
| 163 |
+
```
|
| 164 |
+
|
| 165 |
+
Modifications for `inference_server/cli.py`:
|
| 166 |
+
|
| 167 |
+
```diff
|
| 168 |
+
diff --git a/inference_server/cli.py b/inference_server/cli.py
|
| 169 |
+
index fc903d5..5450236 100644
|
| 170 |
+
--- a/inference_server/cli.py
|
| 171 |
+
+++ b/inference_server/cli.py
|
| 172 |
+
@@ -22,6 +22,9 @@ def main() -> None:
|
| 173 |
+
while True:
|
| 174 |
+
input_text = input(""Input text: "")
|
| 175 |
+
|
| 176 |
+
+ input_text = input_text.strip()
|
| 177 |
+
+ modified_input_text = f""<human>: {input_text}\n<bot>:""
|
| 178 |
+
+
|
| 179 |
+
if input(""change generate_kwargs? [y/n] "") == ""y"":
|
| 180 |
+
while True:
|
| 181 |
+
try:
|
| 182 |
+
@@ -33,7 +36,7 @@ def main() -> None:
|
| 183 |
+
print(""message ="", e_message)
|
| 184 |
+
continue
|
| 185 |
+
|
| 186 |
+
- response = model.generate(text=[input_text], generate_kwargs=generate_kwargs)
|
| 187 |
+
+ response = model.generate(text=[modified_input_text], generate_kwargs=generate_kwargs)
|
| 188 |
+
|
| 189 |
+
print_rank_0(""Output text:"", response.text[0])
|
| 190 |
+
print_rank_0(""Generated tokens:"", response.num_generated_tokens[0])
|
| 191 |
+
|
| 192 |
+
```
|
| 193 |
+
And now you are good to go!
|
| 194 |
+
|
| 195 |
+
Running command for bf16, NO sampling
|
| 196 |
+
```
|
| 197 |
+
python -m inference_server.cli --model_name sambanovasystems/BLOOMChat-176B-v1 --model_class AutoModelForCausalLM --dtype bf16 --deployment_framework hf_accelerate --generate_kwargs '{""do_sample"": false, ""max_new_tokens"": 512}'
|
| 198 |
+
```
|
| 199 |
+
Running command for bf16, YES sampling
|
| 200 |
+
```
|
| 201 |
+
python -m inference_server.cli --model_name sambanovasystems/BLOOMChat-176B-v1 --model_class AutoModelForCausalLM --dtype bf16 --deployment_framework hf_accelerate --generate_kwargs '{""do_sample"": true, ""temperature"": 0.8, ""repetition_penalty"": 1.2, ""top_p"": 0.9, ""max_new_tokens"": 512}'
|
| 202 |
+
```
|
| 203 |
+
---
|
| 204 |
+
Running command for int8 (sub optimal performance, but fast inference time) NO sampling:
|
| 205 |
+
```
|
| 206 |
+
python -m inference_server.cli --model_name sambanovasystems/BLOOMChat-176B-v1 --model_class AutoModelForCausalLM --dtype int8 --deployment_framework hf_accelerate --generate_kwargs '{""do_sample"": false, ""max_new_tokens"": 512}'
|
| 207 |
+
```
|
| 208 |
+
Running command for int8 (sub optimal performance, but fast inference time) YES sampling:
|
| 209 |
+
```
|
| 210 |
+
python -m inference_server.cli --model_name sambanovasystems/BLOOMChat-176B-v1 --model_class AutoModelForCausalLM --dtype int8 --deployment_framework hf_accelerate --generate_kwargs '{""do_sample"": true, ""temperature"": 0.8, ""repetition_penalty"": 1.2, ""top_p"": 0.9, ""max_new_tokens"": 512}'
|
| 211 |
+
```
|
| 212 |
+
|
| 213 |
+
**DISCLAIMER:** When using int8, the results will be subpar compared to bf16 as the model is being [quantized](https://huggingface.co/blog/hf-bitsandbytes-integration#introduction-to-model-quantization).
|
| 214 |
+
|
| 215 |
+
### Suggested Inference Parameters
|
| 216 |
+
- Temperature: 0.8
|
| 217 |
+
- Repetition penalty: 1.2
|
| 218 |
+
- Top-p: 0.9
|
| 219 |
+
- Max generated tokens: 512
|
| 220 |
+
|
| 221 |
+
### Suggested Prompts To Try in GPU Tutorial
|
| 222 |
+
```
|
| 223 |
+
Input text: Write a script in which Bob accidentally breaks his dad's guitar
|
| 224 |
+
```
|
| 225 |
+
|
| 226 |
+
```
|
| 227 |
+
Input text: Create an itemized list of tasks to complete to start a clothing brand
|
| 228 |
+
```
|
| 229 |
+
|
| 230 |
+
```
|
| 231 |
+
Input text: 十七岁的风是什么颜色的?
|
| 232 |
+
```
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
</details>
|
| 236 |
+
|
| 237 |
+
---
|
| 238 |
+
|
| 239 |
+
## Some example completions for English
|
| 240 |
+
|
| 241 |
+
<details>
|
| 242 |
+
<summary>Click to expand</summary>
|
| 243 |
+
|
| 244 |
+
```
|
| 245 |
+
<human>: What does it mean for the Fed to raise or lower interest rates?
|
| 246 |
+
<bot>: The Federal Reserve (also known as the Fed) is the central banking system of the United States. It was created in 1913, and is an independent agency of the federal government. The Fed has a dual mandate of maximizing employment and stabilizing prices. The Fed sets interest rates, and can influence the money supply and the credit markets through its actions.
|
| 247 |
+
|
| 248 |
+
Raising interest rates means that the Fed is trying to reduce the money supply in the economy, and is trying to discourage borrowing and encourage saving. Lowering interest rates means that the Fed is trying to stimulate economic growth by encouraging borrowing and reducing the cost of credit.
|
| 249 |
+
|
| 250 |
+
The Fed raises or lowers interest rates in order to influence the economy and achieve its goals for employment and price stability. Interest rates are one of the primary tools that the Fed uses to achieve its goals.
|
| 251 |
+
```
|
| 252 |
+
|
| 253 |
+
```
|
| 254 |
+
<human>: Write an engaging and well-written property listing description for selling a house. Address of the house and some of the details are given to you. Fill in the information gap with hallucinations if needed.
|
| 255 |
+
|
| 256 |
+
Property Address: 412 Monterey Ave, Capitola, CA 95010
|
| 257 |
+
Parking: 3 cars + a finished single car garage
|
| 258 |
+
Details:
|
| 259 |
+
- open floorplan
|
| 260 |
+
- window with views to park/mountains
|
| 261 |
+
- Kitchen with shaker cabinetry and white Calcatta quartz counters
|
| 262 |
+
<bot>: This home at 412 Monterey Ave, Capitola, CA 95010 is the perfect place to call home. With its open floor plan, it provides the perfect space for entertaining guests. The kitchen is equipped with shaker cabinetry and white Calcatta quartz counters, providing the perfect space to create culinary masterpieces. The window with views to the park and mountains provide a beautiful view of the outdoors, making this home feel like a piece of paradise. The three-car garage and finished single car garage provide ample parking space, making this home a must-see for anyone looking for a new home.
|
| 263 |
+
|
| 264 |
+
```
|
| 265 |
+
|
| 266 |
+
```
|
| 267 |
+
<human>: What is artificial intelligence? Explain it so an 8 year old will understand.
|
| 268 |
+
<bot>: Artificial Intelligence (AI) is a computer program or machine that can do things that would normally require human intelligence, such as recognizing patterns, making decisions, and learning from experience. AI can be found in many places, such as self-driving cars, smartphones, and even robots. AI is used to make things easier for people, like when your smartphone can translate text into another language or when your voice-controlled speaker can play music for you.
|
| 269 |
+
AI works by using algorithms and big data to learn and improve over time. For example, self-driving cars use AI to analyze the environment around them and make decisions about how to drive. The car gathers data from its sensors, processes it using algorithms, and adjusts its behavior accordingly. This process is called machine learning.
|
| 270 |
+
AI is still a developing technology, and there are still many things that AI programs cannot do yet. However, as AI continues to evolve, it is likely that it will become more and more capable of performing tasks that were once thought to require human intelligence.
|
| 271 |
+
|
| 272 |
+
```
|
| 273 |
+
|
| 274 |
+
</details>
|
| 275 |
+
|
| 276 |
+
---
|
| 277 |
+
|
| 278 |
+
## Some example completions for Multilingual
|
| 279 |
+
<details>
|
| 280 |
+
<summary>Click to expand</summary>
|
| 281 |
+
|
| 282 |
+
#### Arabic
|
| 283 |
+
|
| 284 |
+
```
|
| 285 |
+
<human>: هل العمل نصف دوام فكرة جيدة لمراهقين بين عمر ١٥ و ١٩ عام
|
| 286 |
+
<bot>: نعم، يمكن أن تكون فكرة جيدة لبعض المراهقين في هذا العمر. هناك العديد من الفوائد التي قد يحققها المراهقون إذا عملوا بنصف دوام:
|
| 287 |
+
|
| 288 |
+
- يساعدهم على تعلم المسؤولية وإدارة الوقت: عندما يعمل المراهق بنصف دوام، فإنه يتعلم كيفية إدارة وقته بشكل فعال وكيفية تقسيم ساعات عمله إلى مهام أصغر.
|
| 289 |
+
- يحسن مهارات التواصل والعمل الجماعي: غالبًا ما يكون مكان العمل بيئة متعددة الثقافات حيث يلتقي المراهقون بمجموعة متنوعة من الأشخاص ويتعلمون كيفية التعاون معهم وتبادل الأفكار.
|
| 290 |
+
- يوفر فرصة للتعرف على عالم العمل: يتيح العمل للمراهق التعرف على عالم العمل واكتساب خبرة عملية قيمة ستساعده لاحقًا عند البحث عن وظيفة أو بدء حياته المهنية.
|
| 291 |
+
- يعزز الشعور بالمسؤولية والثقة بالنفس: عندما يعمل المراهق بنصف دوام ويحصل على دخل خاص به، فإنه يكتسب شعورًا أكبر بالمسؤولية والثقة بالنفس.
|
| 292 |
+
|
| 293 |
+
ومع ذلك، لا ينبغي إجبار المراهقين على العمل بنصف دوام دون موافقتهم المسبقة. يجب أيضًا مراعاة أعمارهم وقدراتهم البدنية والعقلية قبل توظيفهم.
|
| 294 |
+
```
|
| 295 |
+
|
| 296 |
+
**DISCLAIMER**: Text ordering may be off for Arabic due to code block not being able to render
|
| 297 |
+
#### Chinese
|
| 298 |
+
|
| 299 |
+
```
|
| 300 |
+
<human>: 请向我推荐一部周末看的电影并说明原因。
|
| 301 |
+
<bot>: “强烈向您推荐《美丽人生》这部影片。该片由罗伯托·贝尼尼自导自演,讲述了二战时期一对犹太父子被送进了纳粹集中营,父亲用自己的想象力为儿子编织了一个美好童真的世界,让儿子的心灵没有受到丝毫伤害的故事。
|
| 302 |
+
|
| 303 |
+
该片被誉为“最伟大的战争片”、“人类有史以来最佳影片之一”,豆瓣评分高达9.6分。它不仅是一部感人的亲情片,更是一部富含哲理的励志片和人生启示录。片中父亲的乐观豁达、幽默风趣的人生态度,对任何事都抱有积极热情的态度,即使在残酷无比的集中营里也是如此,深深地感染着观众。同时,这也是一部关于爱与希望的电影,让我们更加珍惜当下所拥有的一切,学会如何去爱身边的每一个人。
|
| 304 |
+
|
| 305 |
+
总之,这是一部值得一看再看的好电影,相信您一定会喜欢的!
|
| 306 |
+
|
| 307 |
+
```
|
| 308 |
+
|
| 309 |
+
#### French
|
| 310 |
+
|
| 311 |
+
```
|
| 312 |
+
<human>: Il y a t’il un petit-déjeuner de 700-1000 calories sans oeuf mais qui inclut des protéines.
|
| 313 |
+
<bot>: Oui, voici une recette de petit déjeuner à base d'avoine et de fromage blanc :
|
| 314 |
+
Ingrédients :
|
| 315 |
+
- 1 tasse (250 ml) de lait d'amande ou autre lait végétal
|
| 316 |
+
- 2 cuillères à soupe bombées (30 g) de flocons d'avoine
|
| 317 |
+
- 1/2 banane mûre écrasée
|
| 318 |
+
- 1/4 de pomme fraîche épluchée et coupée en dés
|
| 319 |
+
- 1 cuillère à soupe de beurre d'amande ou autre purée d'oléagineux
|
| 320 |
+
- 1 cuillère à café de cannelle moulue
|
| 321 |
+
- 1 pincée de sel
|
| 322 |
+
Préparation :
|
| 323 |
+
1. Dans une petite casserole, faites chauffer le lait jusqu'à ce qu'il soit chaud.
|
| 324 |
+
2. Ajoutez les flocons d'avoine, la banane, la pomme, le beurre d'amande, la cannelle et le sel dans le bol du mixeur et mélangez bien pour obtenir une pâte homogène.
|
| 325 |
+
3. Versez le mélange dans le lait chaud et remuez rapidement avec une spatule en bois pour éviter que les flocons d'avoine ne s'agglutinent au fond de la casserole.
|
| 326 |
+
4. Laissez mijoter sur feu doux pendant environ 5 minutes tout en continuant de remuer régulièrement.
|
| 327 |
+
5. Transférez ensuite le porridge dans un bol et servez immédiatement. Vous pouvez ajouter quelques fruits rouges frais pour décorer et apporter encore plus de gourmandise à votre petit déjeuner ! Bon appétit !
|
| 328 |
+
```
|
| 329 |
+
|
| 330 |
+
#### Hindi
|
| 331 |
+
```
|
| 332 |
+
<human>: दिल्ली से जयपुर कैसे जाये? बस से या कार से? दोनो के बारे मे विस्तार मे बताये.
|
| 333 |
+
<bot>: दिल्ली से जयपुर जाने का सबसे आसान और तेज़ तरीका है कि आप अपनी खुद की कार लेकर जाएं, क्योंकि यह आपको अपने समय पर निर्भर रहने की स्वतंत्रता देता है। हालांकि, यदि आपके पास अपनी कार नहीं है, तो आप दिल्ली से जयपुर तक एक निजी वाहन किराए पर ले सकते हैं। यदि आप सार्वजनिक परिवहन (बस) का उपयोग करना चाहते हैं, तो दिल्ली में कई सरकारी और निजी बस सेवाएं उपलब्ध हैं जो जयपुर को जोड़ती हैं। दिल्ली से जयपुर जाने वाली कुछ प्रमुख बसें हैं-
|
| 334 |
+
1. राजस्थान रोडवेज: ये राज्य सरकार द्वारा संचालित बसें हैं जो दिल्ली से जयपुर के बीच चलती हैं। ये नियमित रूप से चलती हैं और इनके लिए अग्रिम आरक्षण आवश्यक नहीं है।
|
| 335 |
+
2. ALSA: यह एक निजी कंपनी है जो दिल्ली से जयपुर के बीच नियमित रूप से चलती है। इसके टिकटों की बुकिंग ऑनलाइन और ऑफलाइन दोनों माध्यमों से की जा सकती है।
|
| 336 |
+
3. RedBus: यह एक ऑनलाइन बस बुकिंग वेबसाइट है जहां से आप दिल्ली से जयपुर के बीच चलनेवाली विभिन्न कंपनियों की बसों में सीट आरक्षित करा सकते हैं।
|
| 337 |
+
4. Ola and Uber: इन ऐप्स का उपयोग करके आप द���ल्ली से जयपुर तक कैब राइड बुक कर सकते हैं, लेकिन ध्यान रखें कि यह विकल्प सस्ता नहीं होगा। दिल्ली से जयपुर जाने का सबसे अच्छा विकल्प आपकी व्यक्तिगत आवश्यकताओं, बजट, और उपलब्धता पर निर्भर करता है। यदि आप अपनी खुद की कार रखते हैं, तो इसका लाभ उठाएं अन्यथा किसी भी सार्वजनिक परिवहन साधन का चयन करें जो आपकी आवश्यकताओं को पूरा करे।
|
| 338 |
+
```
|
| 339 |
+
|
| 340 |
+
#### Spanish
|
| 341 |
+
|
| 342 |
+
```
|
| 343 |
+
<human>: ¿Me puedes dar algunas recomendaciones de juegos? Estoy buscando juegos de rol con un sistema de magia único, un sistema de construcción único y una buena historia. Por favor limítate a juegos que se puedan jugar en PC directamente, o por emulación.
|
| 344 |
+
<bot>: Claro! Aquí te dejo tres recomendaciones:
|
| 345 |
+
1. The Outer Worlds: es un juego de rol de ciencia ficción con un sistema de magia basado en la tecnología alienígena que permite al jugador personalizar sus habilidades para adaptarse a su estilo de juego.
|
| 346 |
+
2. Divinity: Original Sin 2 - Definitive Edition: este juego combina elementos de RPG con estrategia en tiempo real (RTS), lo que le da al jugador un control total sobre cómo construir y mejorar su personaje y el mundo que lo rodea.
|
| 347 |
+
3. Pillars of Eternity: esta aventura de fantasía oscura tiene lugar en un vasto mundo abierto donde los jugadores pueden elegir entre diferentes clases y desarrollar relaciones con otros personajes del juego. La mecánica de combate está basada en turnos y utiliza hechizos y armas cuerpo a cuerpo.
|
| 348 |
+
Estos son solo algunos ejemplos de juegos que podrían interesarte según tus criterios. Hay muchos más títulos disponibles en el mercado que podrían ajustarse a tu gusto, así que no dudes en explorar otras opciones si estos juegos no cumplen con tus expectativas.
|
| 349 |
+
```
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
</details>
|
| 353 |
+
|
| 354 |
+
---
|
| 355 |
+
|
| 356 |
+
## Evaluation Graphs
|
| 357 |
+
|
| 358 |
+
<details>
|
| 359 |
+
<summary>Click to expand</summary>
|
| 360 |
+
|
| 361 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
<figure style=""text-align:center;"">
|
| 365 |
+
<figcaption><b>BLOOMChat vs Baselines Model in Human Preference Rankings</b></figcaption>
|
| 366 |
+
</figure>
|
| 367 |
+
<br>
|
| 368 |
+
|
| 369 |
+

|
| 370 |
+
<figure style=""text-align:center;"">
|
| 371 |
+
<figcaption><b>BLOOMChat vs GPT-4 in Human Preference Ranking</b></figcaption>
|
| 372 |
+
</figure>
|
| 373 |
+
<br>
|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
<figure style=""text-align:center;"">
|
| 377 |
+
<figcaption><b>BLOOMChat surpasses other Bloom variants and state-of-the-art open-source chat models in translation tasks [NOTE: Evaluation of the BLOOM and BLOOMZ in WMT18 en->zh zh->en used (human, bot) ChatML tags due to an unintentional configuration. Results might be suboptimal.]</b></figcaption>
|
| 378 |
+
</figure>
|
| 379 |
+
<br>
|
| 380 |
+
|
| 381 |
+
</details>
|
| 382 |
+
|
| 383 |
+
---
|
| 384 |
+
|
| 385 |
+
## Training Details
|
| 386 |
+
|
| 387 |
+
<details>
|
| 388 |
+
<summary>Click to expand</summary>
|
| 389 |
+
|
| 390 |
+
### Training Data
|
| 391 |
+
|
| 392 |
+
<!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 393 |
+
|
| 394 |
+
- [OIG dataset from OpenChatKit](https://huggingface.co/datasets/laion/OIG)
|
| 395 |
+
- [Dolly 2.0](https://huggingface.co/datasets/databricks/databricks-dolly-15k)
|
| 396 |
+
- [Oasst1](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
| 397 |
+
|
| 398 |
+
### Training Procedure
|
| 399 |
+
|
| 400 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 401 |
+
|
| 402 |
+
We trained BLOOMChat with [SambaNova DataScale systems](https://sambanova.ai/products/datascale/) with SambaNova's in-house Reconfigurable Dataflow Unit (RDU). We started from [BLOOM (176B)](https://huggingface.co/bigscience/bloom), an open-source multilingual LLM pretrained by the [BigScience group](https://huggingface.co/bigscience). We instruction-tune BLOOM (176B) on OpenChatKit with each data source subsampled to 100k for one epoch, followed by three epochs over the combined OpenChatKit and Dolly 2.0.
|
| 403 |
+
All of the code used to prepare the datasets and the scripts to run training and inference are open-sourced and freely available at [sambanova/bloomchat](https://github.com/sambanova/bloomchat/tree/main)
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
### Prompting Style Used For Training
|
| 407 |
+
```
|
| 408 |
+
<human>: {input1 that the user wants from the bot}
|
| 409 |
+
<bot>: {response1}</s>
|
| 410 |
+
<human>: {input2 that the user wants from the bot}
|
| 411 |
+
<bot>: {response2}</s>
|
| 412 |
+
```
|
| 413 |
+
|
| 414 |
+
### Hyperparameters
|
| 415 |
+
|
| 416 |
+
**Instruction-tuned Training on OIG**
|
| 417 |
+
|
| 418 |
+
- Hardware: SambaNova Reconfigurable Dataflow Unit (RDU)
|
| 419 |
+
- Optimizer: AdamW
|
| 420 |
+
- Grad accumulation: 1
|
| 421 |
+
- Epochs: 1
|
| 422 |
+
- Global Batch size: 128
|
| 423 |
+
- Batch tokens: 128 * 2048 = 262,144 tokens
|
| 424 |
+
- Learning Rate: 1e-5
|
| 425 |
+
- Learning Rate Scheduler: Cosine Schedule with Warmup
|
| 426 |
+
- Warmup Steps: 0
|
| 427 |
+
- End Learning Ratio: 0.1
|
| 428 |
+
- Weight decay: 0.1
|
| 429 |
+
|
| 430 |
+
**Instruction-tuned Training on Dolly 2.0 and Oasst1**
|
| 431 |
+
|
| 432 |
+
- Hardware: SambaNova Reconfigurable Dataflow Unit (RDU)
|
| 433 |
+
- Optimizer: AdamW
|
| 434 |
+
- Grad accumulation: 1
|
| 435 |
+
- Epochs: 3
|
| 436 |
+
- Global Batch size: 128
|
| 437 |
+
- Batch tokens: 128 * 2048 = 262,144 tokens
|
| 438 |
+
- Learning Rate: 1e-5
|
| 439 |
+
- Learning Rate Scheduler: Cosine Schedule with Warmup
|
| 440 |
+
- Warmup Steps: 0
|
| 441 |
+
- End Learning Ratio: 0.1
|
| 442 |
+
- Weight decay: 0.1
|
| 443 |
+
|
| 444 |
+
</details>
|
| 445 |
+
|
| 446 |
+
---
|
| 447 |
+
|
| 448 |
+
## Bias, Risks, and Limitations
|
| 449 |
+
|
| 450 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 451 |
+
|
| 452 |
+
Like all LLMs, BLOOMChat has certain limitations:
|
| 453 |
+
- Hallucination: BLOOMChat may sometimes generate responses that contain plausible-sounding but factually incorrect or irrelevant information.
|
| 454 |
+
- Code Switching: The model might unintentionally switch between languages or dialects within a single response, affecting the coherence and understandability of the output.
|
| 455 |
+
- Repetition: BLOOMChat may produce repetitive phrases or sentences, leading to less engaging and informative responses.
|
| 456 |
+
- Coding and Math: The model's performance in generating accurate code or solving complex mathematical problems may be limited.
|
| 457 |
+
- Toxicity: BLOOMChat may inadvertently generate responses containing inappropriate or harmful content.
|
| 458 |
+
|
| 459 |
+
## Acknowledgment
|
| 460 |
+
|
| 461 |
+
We would like to extend our gratitude to [Together](https://www.together.xyz/) for their insightful technical discussions on overall project planning, data processing, model training, human evaluation experiment design, open-source endeavors, and their contributions on data processing code on OpenChatKit, OASST1, and Dolly 2.0.
|
| 462 |
+
|
| 463 |
+
We are grateful to the various researchers and open-source projects that have contributed to the development of BLOOMChat. We thank [BigScience](https://bigscience.huggingface.co/) for providing the [BLOOM](https://huggingface.co/bigscience/bloom) model, which served as the base for our instruction tuning. We also thank [LAION](https://laion.ai/) for their [OIG dataset](https://huggingface.co/datasets/laion/OIG), OpenAssistant Conversations Dataset ([OASST1](https://huggingface.co/datasets/OpenAssistant/oasst1)) and also thank [Databricks](https://www.databricks.com/) for providing [Dolly 2.0](https://huggingface.co/datasets/databricks/databricks-dolly-15k), to provide the dataset that we instruction tuned on.
|
| 464 |
+
|
| 465 |
+
We appreciate [lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness) and [BigScience](https://bigscience.huggingface.co/) for their essential benchmarking contributions, which is very helpful in evaluating BLOOMChat's performance. We appreciate the inspiration from the wave of various recent open-source chat models, including [OpenAssistant-30B](https://huggingface.co/OpenAssistant/oasst-sft-7-llama-30b-xor), [LLaMA-Adapter-V2-65B](https://github.com/ZrrSkywalker/LLaMA-Adapter/tree/main/llama_adapter_v2_chat65b), [Vicuna-13b](https://huggingface.co/lmsys/vicuna-13b-delta-v0), [Koala-13b](https://huggingface.co/TheBloke/koala-13B-HF), [OASST-Pythia-12b](https://huggingface.co/OpenAssistant/oasst-sft-1-pythia-12b), [Alpaca-13b](https://huggingface.co/anon8231489123/gpt4-x-alpaca-13b-native-4bit-128g), [ChatGLM-6b](https://github.com/THUDM/ChatGLM-6B), [FastChat-T5-3b](https://huggingface.co/lmsys/fastchat-t5-3b-v1.0), [Dolly-v2-12b](https://huggingface.co/databricks/dolly-v2-12b), [LLaMA-13b](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/), [StableLM-Tuned-Alpha-7b](https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b), [RedPajama-INCITE-Chat-7B-v0.1](https://huggingface.co/togethercomputer/RedPajama-INCITE-Chat-7B-v0.1), [RedPajama-INCITE-Chat-3B-v1](https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-7B-v0.1), [MPT-7B-Chat](https://huggingface.co/mosaicml/mpt-7b-chat) and so on. We look forward to witnessing the continued growth and success of open-source chat-based models.
|
| 466 |
+
|
| 467 |
+
We highly appreciate the hard work and dedication of these researchers and organizations towards the advancement of the open-source community. Their contributions were invaluable in the development of BLOOMChat, and we hope that our model can contribute to further advancements in the field.
|
| 468 |
+
|
| 469 |
+
## Cite BLOOMChat
|
| 470 |
+
```
|
| 471 |
+
@software{bloomchat,
|
| 472 |
+
title = {{BLOOMChat: a New Open Multilingual Chat LLM}},
|
| 473 |
+
author = {SambaNova Systems, Together Computer},
|
| 474 |
+
url = {https://huggingface.co/sambanovasystems/BLOOMChat-176B-v1}
|
| 475 |
+
month = {5},
|
| 476 |
+
year = {2023},
|
| 477 |
+
version = {1.0},
|
| 478 |
+
}
|
| 479 |
+
```","{""id"": ""sambanovasystems/BLOOMChat-176B-v1"", ""author"": ""sambanovasystems"", ""sha"": ""cdae1b2e0af1778258306106522a3dda6abb0276"", ""last_modified"": ""2023-05-19 20:34:37+00:00"", ""created_at"": ""2023-05-10 21:17:39+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 308, ""downloads_all_time"": null, ""likes"": 364, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""bloom"", ""text-generation"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""BloomForCausalLM""], ""model_type"": ""bloom"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Human_evaluation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Human_evaluation_gpt4.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/Multilingual_capabilities_comparison.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-01-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-02-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-03-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-04-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-05-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-06-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-07-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-08-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-09-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-10-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-11-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-12-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-13-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-14-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-15-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-16-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-17-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-18-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-19-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-20-of-20.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""sddwt/guanaco""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-19 20:34:37+00:00"", ""cardData"": ""license: other\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""645c09f30120c98d16abc032"", ""modelId"": ""sambanovasystems/BLOOMChat-176B-v1"", ""usedStorage"": 719390423273}",0,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/new?title=sambanovasystems/BLOOMChat-176B-v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsambanovasystems%2FBLOOMChat-176B-v1%5D(%2Fsambanovasystems%2FBLOOMChat-176B-v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, sambanovasystems/BLOOMChat, sddwt/guanaco",3
|
Baichuan-7B_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
baichuan-inc/Baichuan-7B,"---
|
| 3 |
+
language:
|
| 4 |
+
- zh
|
| 5 |
+
- en
|
| 6 |
+
pipeline_tag: text-generation
|
| 7 |
+
inference: false
|
| 8 |
+
---
|
| 9 |
+
# Baichuan-7B
|
| 10 |
+
|
| 11 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 12 |
+
|
| 13 |
+
Baichuan-7B是由百川智能开发的一个开源的大规模预训练模型。基于Transformer结构,在大约1.2万亿tokens上训练的70亿参数模型,支持中英双语,上下文窗口长度为4096。在标准的中文和英文权威benchmark(C-EVAL/MMLU)上均取得同尺寸最好的效果。
|
| 14 |
+
|
| 15 |
+
如果希望使用Baichuan-7B(如进行推理、Finetune等),我们推荐使用配套代码库[Baichuan-7B](https://github.com/baichuan-inc/Baichuan-7B)。
|
| 16 |
+
|
| 17 |
+
Baichuan-7B is an open-source large-scale pre-trained model developed by Baichuan Intelligent Technology. Based on the Transformer architecture, it is a model with 7 billion parameters trained on approximately 1.2 trillion tokens. It supports both Chinese and English, with a context window length of 4096. It achieves the best performance of its size on standard Chinese and English authoritative benchmarks (C-EVAL/MMLU).
|
| 18 |
+
|
| 19 |
+
If you wish to use Baichuan-7B (for inference, finetuning, etc.), we recommend using the accompanying code library [Baichuan-7B](https://github.com/baichuan-inc/Baichuan-7B).
|
| 20 |
+
|
| 21 |
+
## Why use Baichuan-7B
|
| 22 |
+
|
| 23 |
+
- 在同尺寸模型中Baichuan-7B达到了目前SOTA的水平,参考下面MMLU指标
|
| 24 |
+
- Baichuan-7B使用自有的中英文双语语料进行训练,在中文上进行优化,在C-Eval达到SOTA水平
|
| 25 |
+
- 不同于LLaMA完全禁止商业使用,Baichuan-7B使用更宽松的开源协议,允许用于商业目的
|
| 26 |
+
|
| 27 |
+
- Among models of the same size, Baichuan-7B has achieved the current state-of-the-art (SOTA) level, as evidenced by the following MMLU metrics.
|
| 28 |
+
- Baichuan-7B is trained on proprietary bilingual Chinese-English corpora, optimized for Chinese, and achieves SOTA performance on C-Eval.
|
| 29 |
+
- Unlike LLaMA, which completely prohibits commercial use, Baichuan-7B employs a more lenient open-source license, allowing for commercial purposes.
|
| 30 |
+
|
| 31 |
+
## How to Get Started with the Model
|
| 32 |
+
|
| 33 |
+
如下是一个使用Baichuan-7B进行1-shot推理的任务,根据作品给出作者名,正确输出为""夜雨寄北->李商隐""
|
| 34 |
+
```python
|
| 35 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 36 |
+
|
| 37 |
+
tokenizer = AutoTokenizer.from_pretrained(""baichuan-inc/Baichuan-7B"", trust_remote_code=True)
|
| 38 |
+
model = AutoModelForCausalLM.from_pretrained(""baichuan-inc/Baichuan-7B"", device_map=""auto"", trust_remote_code=True)
|
| 39 |
+
inputs = tokenizer('登鹳雀楼->王之涣\n夜雨寄北->', return_tensors='pt')
|
| 40 |
+
inputs = inputs.to('cuda:0')
|
| 41 |
+
pred = model.generate(**inputs, max_new_tokens=64,repetition_penalty=1.1)
|
| 42 |
+
print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
The following is a task of performing 1-shot inference using Baichuan-7B, where the author's name is given based on the work, with the correct output being ""One Hundred Years of Solitude->Gabriel Garcia Marquez""
|
| 46 |
+
```python
|
| 47 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 48 |
+
|
| 49 |
+
tokenizer = AutoTokenizer.from_pretrained(""baichuan-inc/Baichuan-7B"", trust_remote_code=True)
|
| 50 |
+
model = AutoModelForCausalLM.from_pretrained(""baichuan-inc/Baichuan-7B"", device_map=""auto"", trust_remote_code=True)
|
| 51 |
+
inputs = tokenizer('Hamlet->Shakespeare\nOne Hundred Years of Solitude->', return_tensors='pt')
|
| 52 |
+
inputs = inputs.to('cuda:0')
|
| 53 |
+
pred = model.generate(**inputs, max_new_tokens=64,repetition_penalty=1.1)
|
| 54 |
+
print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
## Model Details
|
| 58 |
+
|
| 59 |
+
### Model Description
|
| 60 |
+
|
| 61 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 62 |
+
|
| 63 |
+
- **Developed by:** 百川智能(Baichuan Intelligent Technology)
|
| 64 |
+
- **Email**: opensource@baichuan-inc.com
|
| 65 |
+
- **Language(s) (NLP):** Chinese/English
|
| 66 |
+
- **License:** [Baichuan-7B License](https://huggingface.co/baichuan-inc/Baichuan-7B/blob/main/baichuan-7B%20%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf)
|
| 67 |
+
|
| 68 |
+
### Model Sources
|
| 69 |
+
|
| 70 |
+
<!-- Provide the basic links for the model. -->
|
| 71 |
+
|
| 72 |
+
整体模型基于标准的Transformer结构,我们采用了和LLaMA一样的模型设计
|
| 73 |
+
- **Position Embedding**:采用rotary-embedding,是现阶段被大多数模型采用的位置编码方案,具有很好的外推性。
|
| 74 |
+
- **Feedforward Layer**:采用SwiGLU,Feedforward变化为(8/3)倍的隐含层大小,即11008。
|
| 75 |
+
- **Layer Normalization**: 基于[RMSNorm](https://arxiv.org/abs/1910.07467)的Pre-Normalization。
|
| 76 |
+
|
| 77 |
+
具体参数和见下表
|
| 78 |
+
| Hyperparameter | Value |
|
| 79 |
+
|----------------|-------|
|
| 80 |
+
|n_parameters | 7000559616 |
|
| 81 |
+
|n_layers | 32 |
|
| 82 |
+
| n_heads | 32 |
|
| 83 |
+
| d_model | 4096 |
|
| 84 |
+
| vocab size | 64000 |
|
| 85 |
+
| sequence length | 4096 |
|
| 86 |
+
|
| 87 |
+
The overall model is based on the standard Transformer structure, and we have adopted the same model design as LLaMA:
|
| 88 |
+
|
| 89 |
+
- Position Embedding: We use rotary-embedding, which is the position encoding scheme adopted by most models at this stage, and it has excellent extrapolation capabilities.
|
| 90 |
+
- Feedforward Layer: We use SwiGLU. The feedforward changes to (8/3) times the size of the hidden layer, that is, 11008.
|
| 91 |
+
- Layer Normalization: Pre-Normalization based on [RMSNorm](https://arxiv.org/abs/1910.07467).
|
| 92 |
+
|
| 93 |
+
The specific parameters are as follows:
|
| 94 |
+
| Hyperparameter | Value |
|
| 95 |
+
|----------------|-------|
|
| 96 |
+
|n_parameters | 7000559616 |
|
| 97 |
+
|n_layers | 32 |
|
| 98 |
+
| n_heads | 32 |
|
| 99 |
+
| d_model | 4096 |
|
| 100 |
+
| vocab size | 64000 |
|
| 101 |
+
| sequence length | 4096 |
|
| 102 |
+
|
| 103 |
+
## Uses
|
| 104 |
+
|
| 105 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 106 |
+
|
| 107 |
+
### Downstream Use
|
| 108 |
+
|
| 109 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 110 |
+
我们同时开源出了和本模型配套的训练代码,允许进行高效的Finetune用于下游任务,具体参见[Baichuan-7B](https://github.com/baichuan-inc/Baichuan-7B)。
|
| 111 |
+
|
| 112 |
+
We have also open-sourced the training code that accompanies this model, allowing for efficient finetuning for downstream tasks. For more details, please refer to [Baichuan-7B](https://github.com/baichuan-inc/Baichuan-7B).
|
| 113 |
+
|
| 114 |
+
### Out-of-Scope Use
|
| 115 |
+
|
| 116 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 117 |
+
在没有充分评估风险和采取缓解措施的情况下投入生产使用;任何可能被视为不负责任或有害的使用案例。
|
| 118 |
+
|
| 119 |
+
Production use without adequate assessment of risks and mitigation; any use cases which may be considered irresponsible or harmful.
|
| 120 |
+
|
| 121 |
+
## Bias, Risks, and Limitations
|
| 122 |
+
|
| 123 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 124 |
+
|
| 125 |
+
Baichuan-7B可能会产生事实上不正确的输出,不应依赖它产生事实上准确的信息。Baichuan-7B是在各种公共数据集上进行训练的。尽管我们已经做出了巨大的努力来清洗预训练数据,但这个模型可能会生成淫秽、偏见或其他冒犯性的输出。
|
| 126 |
+
|
| 127 |
+
Baichuan-7B can produce factually incorrect output, and should not be relied on to produce factually accurate information. Baichuan-7B was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs.
|
| 128 |
+
|
| 129 |
+
## Training Details
|
| 130 |
+
|
| 131 |
+
训练具体设置参见[Baichuan-7B](https://github.com/baichuan-inc/Baichuan-7B)。
|
| 132 |
+
|
| 133 |
+
For specific training settings, please refer to [Baichuan-7B](https://github.com/baichuan-inc/Baichuan-7B).
|
| 134 |
+
|
| 135 |
+
## Evaluation
|
| 136 |
+
|
| 137 |
+
### 中文评测
|
| 138 |
+
#### C-Eval
|
| 139 |
+
[CEval数据集](https://cevalbenchmark.com/index.html)是一个全面的中文基础模型评测数据集,涵盖了52个学科和四个难度的级别。我们使用该数据集的dev集作为few-shot的来源,在test集上进行了5-shot测试。
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
| Model 5-shot | Average | Avg(Hard) | STEM | Social Sciences | Humanities | Others |
|
| 143 |
+
|-----------------------------|---------|-----------|------|-----------------|------------|--------|
|
| 144 |
+
| GPT-4 | 68.7 | 54.9 | 67.1 | 77.6 | 64.5 | 67.8 |
|
| 145 |
+
| ChatGPT | 54.4 | 41.4 | 52.9 | 61.8 | 50.9 | 53.6 |
|
| 146 |
+
| Claude-v1.3 | 54.2 | 39.0 | 51.9 | 61.7 | 52.1 | 53.7 |
|
| 147 |
+
| Claude-instant-v1.0 | 45.9 | 35.5 | 43.1 | 53.8 | 44.2 | 45.4 |
|
| 148 |
+
| moss-moon-003-base (16B) | 27.4 | 24.5 | 27.0 | 29.1 | 27.2 | 26.9 |
|
| 149 |
+
| Ziya-LLaMA-13B-pretrain | 30.2 | 22.7 | 27.7 | 34.4 | 32.0 | 28.9 |
|
| 150 |
+
| LLaMA-7B-hf | 27.1 | 25.9 | 27.1 | 26.8 | 27.9 | 26.3 |
|
| 151 |
+
| ChatGLM-6B | 34.5 | 23.1 | 30.4 | 39.6 | 37.4 | 34.5 |
|
| 152 |
+
| Falcon-7B | 25.8 | 24.3 | 25.8 | 26.0 | 25.8 | 25.6 |
|
| 153 |
+
| Open-LLaMA-v2-pretrain (7B) | 24.0 | 22.5 | 23.1 | 25.3 | 25.2 | 23.2 |
|
| 154 |
+
| TigerBot-7B-base | 25.7 | 27.0 | 27.3 | 24.7 | 23.4 | 26.1 |
|
| 155 |
+
| Aquila-7B<sup>*</sup> | 25.5 | 25.2 | 25.6 | 24.6 | 25.2 | 26.6 |
|
| 156 |
+
| BLOOM-7B | 22.8 | 20.2 | 21.8 | 23.3 | 23.9 | 23.3 |
|
| 157 |
+
| BLOOMZ-7B | 35.7 | 25.8 | 31.3 | 43.5 | 36.6 | 35.6 |
|
| 158 |
+
| **Baichuan-7B** | 42.8 | 31.5 | 38.2 | 52.0 | 46.2 | 39.3 |
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
#### Gaokao
|
| 162 |
+
[Gaokao](https://github.com/ExpressAI/AI-Gaokao) 是一个以中国高考题作为评测大语言模型能力的数据集,用以评估模型的语言能力和逻辑推理能力。
|
| 163 |
+
我们只保留了其中的单项选择题,并对所有模型进行统一5-shot测试。
|
| 164 |
+
|
| 165 |
+
以下是测试的结果。
|
| 166 |
+
|
| 167 |
+
| Model | Average |
|
| 168 |
+
|-------------------------|-----------------|
|
| 169 |
+
| Open-LLaMA-v2-pretrain | 21.41 |
|
| 170 |
+
| Ziya-LLaMA-13B-pretrain | 23.17 |
|
| 171 |
+
| Falcon-7B | 23.98 |
|
| 172 |
+
| TigerBot-7B-base | 25.94 |
|
| 173 |
+
| LLaMA-7B | 27.81 |
|
| 174 |
+
| ChatGLM-6B | 21.41 |
|
| 175 |
+
| BLOOM-7B | 26.96 |
|
| 176 |
+
| BLOOMZ-7B | 28.72 |
|
| 177 |
+
| Aquila-7B<sup>*</sup> | 24.39 |
|
| 178 |
+
| **Baichuan-7B** | **36.24** |
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
#### AGIEval
|
| 182 |
+
[AGIEval](https://github.com/microsoft/AGIEval) 旨在评估模型的认知和解决问题相关的任务中的一般能力。
|
| 183 |
+
我们只保留了其中的四选一单项选择题,随机划分后对所有模型进行了统一5-shot测试。
|
| 184 |
+
|
| 185 |
+
| Model | Average |
|
| 186 |
+
|-------------------------|-----------------|
|
| 187 |
+
| Open-LLaMA-v2-pretrain | 23.49 |
|
| 188 |
+
| Ziya-LLaMA-13B-pretrain | 27.64 |
|
| 189 |
+
| Falcon-7B | 27.18 |
|
| 190 |
+
| TigerBot-7B-base | 25.19 |
|
| 191 |
+
| LLaMA-7B | 28.17 |
|
| 192 |
+
| ChatGLM-6B | 23.49 |
|
| 193 |
+
| BLOOM-7B | 26.55 |
|
| 194 |
+
| BLOOMZ-7B | 30.27 |
|
| 195 |
+
| Aquila-7B<sup>*</sup> | 25.58 |
|
| 196 |
+
| **Baichuan-7B** | **34.44** |
|
| 197 |
+
|
| 198 |
+
<sup>*</sup>其中Aquila模型来源于[智源官方网站](https://model.baai.ac.cn/model-detail/100098),仅做参考
|
| 199 |
+
|
| 200 |
+
### English Leaderboard
|
| 201 |
+
In addition to Chinese, we also tested the model's performance in English.
|
| 202 |
+
|
| 203 |
+
#### MMLU
|
| 204 |
+
|
| 205 |
+
[MMLU](https://arxiv.org/abs/2009.03300) is an English evaluation dataset that includes 57 multiple-choice tasks, covering elementary mathematics, American history, computer science, law, etc. The difficulty ranges from high school level to expert level, making it a mainstream LLM evaluation dataset.
|
| 206 |
+
|
| 207 |
+
We adopted the [open-source]((https://github.com/hendrycks/test)) evaluation scheme, and the final 5-shot results are as follows:
|
| 208 |
+
|
| 209 |
+
| Model | Humanities | Social Sciences | STEM | Other | Average |
|
| 210 |
+
|----------------------------------------|-----------:|:---------------:|:----:|:-----:|:-------:|
|
| 211 |
+
| LLaMA-7B<sup>2</sup> | 34.0 | 38.3 | 30.5 | 38.1 | 35.1 |
|
| 212 |
+
| Falcon-7B<sup>1</sup> | - | - | - | - | 35.0 |
|
| 213 |
+
| mpt-7B<sup>1</sup> | - | - | - | - | 35.6 |
|
| 214 |
+
| ChatGLM-6B<sup>0</sup> | 35.4 | 41.0 | 31.3 | 40.5 | 36.9 |
|
| 215 |
+
| BLOOM 7B<sup>0</sup> | 25.0 | 24.4 | 26.5 | 26.4 | 25.5 |
|
| 216 |
+
| BLOOMZ 7B<sup>0</sup> | 31.3 | 42.1 | 34.4 | 39.0 | 36.1 |
|
| 217 |
+
| moss-moon-003-base (16B)<sup>0</sup> | 24.2 | 22.8 | 22.4 | 24.4 | 23.6 |
|
| 218 |
+
| moss-moon-003-sft (16B)<sup>0</sup> | 30.5 | 33.8 | 29.3 | 34.4 | 31.9 |
|
| 219 |
+
| **Baichuan-7B<sup>0</sup>** | 38.4 | 48.9 | 35.6 | 48.1 | 42.3 |
|
| 220 |
+
|
| 221 |
+
The superscript in the Model column indicates the source of the results.
|
| 222 |
+
```
|
| 223 |
+
0:reimplemented
|
| 224 |
+
1:https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard
|
| 225 |
+
2:https://paperswithcode.com/sota/multi-task-language-understanding-on-mmlu
|
| 226 |
+
```
|
| 227 |
+
|
| 228 |
+
## Our Group
|
| 229 |
+

|
| 230 |
+
","{""id"": ""baichuan-inc/Baichuan-7B"", ""author"": ""baichuan-inc"", ""sha"": ""5d86e56a58fe4a5b3292cd9bb7468afef6f93eab"", ""last_modified"": ""2024-01-09 07:45:22+00:00"", ""created_at"": ""2023-06-13 07:47:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 14983, ""downloads_all_time"": null, ""likes"": 838, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""baichuan"", ""text-generation"", ""custom_code"", ""zh"", ""en"", ""arxiv:1910.07467"", ""arxiv:2009.03300"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\npipeline_tag: text-generation\ninference: false"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6731\u5229\u5b89\uff0c\u6211\u559c\u6b22""}, {""text"": ""\u6211\u53eb\u6258\u9a6c\u65af\uff0c\u6211\u7684\u4e3b\u8981""}, {""text"": ""\u6211\u53eb\u739b\u4e3d\u4e9a\uff0c\u6211\u6700\u559c\u6b22\u7684""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u662f""}, {""text"": ""\u4ece\u524d\uff0c""}], ""model_index"": null, ""config"": {""architectures"": [""BaiChuanForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_baichuan.BaiChuanConfig"", ""AutoModelForCausalLM"": ""modeling_baichuan.BaiChuanForCausalLM""}, ""model_type"": ""baichuan"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_baichuan.BaiChuanForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='License Agreement for Baichuan-7B Model.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='baichuan-7B \u6a21\u578b\u8bb8\u53ef\u534f\u8bae.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='handler.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""LinkSoul/LLaSM"", ""gsaivinay/open_llm_leaderboard"", ""LinkSoul/Chinese-LLaVa"", ""GTBench/GTBench"", ""ysharma/baichuan-7B"", ""Vikhrmodels/small-shlepa-lb"", ""Justinrune/LLaMA-Factory"", ""yhavinga/dutch-tokenizer-arena"", ""kenken999/fastapi_django_main_live"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""officialhimanshu595/llama-factory"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""li-qing/FIRE"", ""BAAI/open_flageval_vlm_leaderboard"", ""b1sheng/kg_llm_leaderboard_test"", ""neubla/neubla-llm-evaluation-board"", ""tianleliphoebe/visual-arena"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""Ashmal/MobiLlama"", ""xiaoyiAILab/testbaichuan"", ""xiaoyiAILab/testbaichuan1"", ""gaogao131/chatbaichuan7B"", ""alexshengzhili/calahealthgpt"", ""XuBailing/CongMa2"", ""smothiki/open_llm_leaderboard"", ""blackwingedkite/gutalk"", ""pngwn/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard_two"", ""blackwingedkite/alpaca2_clas"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""kbmlcoding/open_llm_leaderboard_free"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""Bofeee5675/FIRE"", ""evelyn-lo/evelyn"", ""yuantao-infini-ai/demo_test"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""zxsipola123456/article"", ""zjasper666/bf16_vs_fp8"", ""martinakaduc/melt"", ""mjalg/IFEvalTR"", ""cloneQ/internLMRAG"", ""hujin0929/LlamaIndex_RAG"", ""flyfive0315/internLlamaIndex"", ""sunxiaokang/llamaindex_RAG_web"", ""kai119/llama"", ""qxy826982153/LlamaIndexRAG"", ""msun415/Llamole""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-01-09 07:45:22+00:00"", ""cardData"": ""language:\n- zh\n- en\npipeline_tag: text-generation\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_baichuan.BaiChuanForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""64881f0445a9218318de4a23"", ""modelId"": ""baichuan-inc/Baichuan-7B"", ""usedStorage"": 28005568432}",0,,0,"https://huggingface.co/jeeejeee/baichuan7b-zero-init, https://huggingface.co/Sylvia2025/baichuan-7B-alpaca-gpt4-zh, https://huggingface.co/jeeejeee/baichuan-7b-lora-zero-regex",3,https://huggingface.co/PrunaAI/baichuan-inc-Baichuan-7B-GGUF-smashed,1,,0,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, Justinrune/LLaMA-Factory, LinkSoul/Chinese-LLaVa, LinkSoul/LLaSM, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=baichuan-inc/Baichuan-7B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bbaichuan-inc%2FBaichuan-7B%5D(%2Fbaichuan-inc%2FBaichuan-7B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, yhavinga/dutch-tokenizer-arena",13
|
BioMistral-7B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
CogView4-6B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
THUDM/CogView4-6B,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
language:
|
| 5 |
+
- zh
|
| 6 |
+
- en
|
| 7 |
+
base_model:
|
| 8 |
+
- THUDM/glm-4-9b
|
| 9 |
+
pipeline_tag: text-to-image
|
| 10 |
+
library_name: diffusers
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
# CogView4-6B
|
| 14 |
+
|
| 15 |
+
<p style=""text-align: center;"">
|
| 16 |
+
<div align=""center"">
|
| 17 |
+
<img src=https://github.com/THUDM/CogView4/raw/main/resources/logo.svg width=""50%""/>
|
| 18 |
+
</div>
|
| 19 |
+
<p align=""center"">
|
| 20 |
+
<a href=""https://huggingface.co/spaces/THUDM-HF-SPACE/CogView4"">🤗 Space | </a>
|
| 21 |
+
<a href=""https://github.com/THUDM/CogView4"">🌐 Github </a> |
|
| 22 |
+
<a href=""https://arxiv.org/pdf/2403.05121"">📜 CogView3 Paper </a>
|
| 23 |
+
</p>
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
## Inference Requirements and Model Introduction
|
| 28 |
+
|
| 29 |
+
+ Resolution: Width and height must be between `512px` and `2048px`, divisible by `32`, and ensure the maximum number of
|
| 30 |
+
pixels does not exceed `2^21` px.
|
| 31 |
+
+ Precision: BF16 / FP32 (FP16 is not supported as it will cause overflow resulting in completely black images)
|
| 32 |
+
|
| 33 |
+
Using `BF16` precision with `batchsize=4` for testing, the memory usage is shown in the table below:
|
| 34 |
+
|
| 35 |
+
| Resolution | enable_model_cpu_offload OFF | enable_model_cpu_offload ON | enable_model_cpu_offload ON </br> Text Encoder 4bit |
|
| 36 |
+
|-------------|------------------------------|-----------------------------|-----------------------------------------------------|
|
| 37 |
+
| 512 * 512 | 33GB | 20GB | 13G |
|
| 38 |
+
| 1280 * 720 | 35GB | 20GB | 13G |
|
| 39 |
+
| 1024 * 1024 | 35GB | 20GB | 13G |
|
| 40 |
+
| 1920 * 1280 | 39GB | 20GB | 14G |
|
| 41 |
+
|
| 42 |
+
## Quick Start
|
| 43 |
+
|
| 44 |
+
First, ensure you install the `diffusers` library from source.
|
| 45 |
+
|
| 46 |
+
```shell
|
| 47 |
+
pip install git+https://github.com/huggingface/diffusers.git
|
| 48 |
+
cd diffusers
|
| 49 |
+
pip install -e .
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
Then, run the following code:
|
| 53 |
+
|
| 54 |
+
```python
|
| 55 |
+
from diffusers import CogView4Pipeline
|
| 56 |
+
|
| 57 |
+
pipe = CogView4Pipeline.from_pretrained(""THUDM/CogView4-6B"", torch_dtype=torch.bfloat16)
|
| 58 |
+
|
| 59 |
+
# Open it for reduce GPU memory usage
|
| 60 |
+
pipe.enable_model_cpu_offload()
|
| 61 |
+
pipe.vae.enable_slicing()
|
| 62 |
+
pipe.vae.enable_tiling()
|
| 63 |
+
|
| 64 |
+
prompt = ""A vibrant cherry red sports car sits proudly under the gleaming sun, its polished exterior smooth and flawless, casting a mirror-like reflection. The car features a low, aerodynamic body, angular headlights that gaze forward like predatory eyes, and a set of black, high-gloss racing rims that contrast starkly with the red. A subtle hint of chrome embellishes the grille and exhaust, while the tinted windows suggest a luxurious and private interior. The scene conveys a sense of speed and elegance, the car appearing as if it's about to burst into a sprint along a coastal road, with the ocean's azure waves crashing in the background.""
|
| 65 |
+
image = pipe(
|
| 66 |
+
prompt=prompt,
|
| 67 |
+
guidance_scale=3.5,
|
| 68 |
+
num_images_per_prompt=1,
|
| 69 |
+
num_inference_steps=50,
|
| 70 |
+
width=1024,
|
| 71 |
+
height=1024,
|
| 72 |
+
).images[0]
|
| 73 |
+
|
| 74 |
+
image.save(""cogview4.png"")
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
### Model Metrics
|
| 78 |
+
|
| 79 |
+
We've tested on multiple benchmarks and achieved the following scores:
|
| 80 |
+
|
| 81 |
+
#### DPG-Bench
|
| 82 |
+
|
| 83 |
+
| Model | Overall | Global | Entity | Attribute | Relation | Other |
|
| 84 |
+
|-----------------|-----------|-----------|-----------|-----------|-----------|-----------|
|
| 85 |
+
| SDXL | 74.65 | 83.27 | 82.43 | 80.91 | 86.76 | 80.41 |
|
| 86 |
+
| PixArt-alpha | 71.11 | 74.97 | 79.32 | 78.60 | 82.57 | 76.96 |
|
| 87 |
+
| SD3-Medium | 84.08 | 87.90 | **91.01** | 88.83 | 80.70 | 88.68 |
|
| 88 |
+
| DALL-E 3 | 83.50 | **90.97** | 89.61 | 88.39 | 90.58 | 89.83 |
|
| 89 |
+
| Flux.1-dev | 83.79 | 85.80 | 86.79 | 89.98 | 90.04 | **89.90** |
|
| 90 |
+
| Janus-Pro-7B | 84.19 | 86.90 | 88.90 | 89.40 | 89.32 | 89.48 |
|
| 91 |
+
| **CogView4-6B** | **85.13** | 83.85 | 90.35 | **91.17** | **91.14** | 87.29 |
|
| 92 |
+
|
| 93 |
+
#### GenEval
|
| 94 |
+
|
| 95 |
+
| Model | Overall | Single Obj. | Two Obj. | Counting | Colors | Position | Color attribution |
|
| 96 |
+
|-----------------|----------|-------------|----------|----------|----------|----------|-------------------|
|
| 97 |
+
| SDXL | 0.55 | 0.98 | 0.74 | 0.39 | 0.85 | 0.15 | 0.23 |
|
| 98 |
+
| PixArt-alpha | 0.48 | 0.98 | 0.50 | 0.44 | 0.80 | 0.08 | 0.07 |
|
| 99 |
+
| SD3-Medium | 0.74 | **0.99** | **0.94** | 0.72 | 0.89 | 0.33 | 0.60 |
|
| 100 |
+
| DALL-E 3 | 0.67 | 0.96 | 0.87 | 0.47 | 0.83 | 0.43 | 0.45 |
|
| 101 |
+
| Flux.1-dev | 0.66 | 0.98 | 0.79 | **0.73** | 0.77 | 0.22 | 0.45 |
|
| 102 |
+
| Janus-Pro-7B | **0.80** | **0.99** | 0.89 | 0.59 | **0.90** | **0.79** | **0.66** |
|
| 103 |
+
| **CogView4-6B** | 0.73 | **0.99** | 0.86 | 0.66 | 0.79 | 0.48 | 0.58 |
|
| 104 |
+
|
| 105 |
+
#### T2I-CompBench
|
| 106 |
+
|
| 107 |
+
| Model | Color | Shape | Texture | 2D-Spatial | 3D-Spatial | Numeracy | Non-spatial Clip | Complex 3-in-1 |
|
| 108 |
+
|-----------------|------------|------------|------------|------------|------------|------------|------------------|----------------|
|
| 109 |
+
| SDXL | 0.5879 | 0.4687 | 0.5299 | 0.2133 | 0.3566 | 0.4988 | 0.3119 | 0.3237 |
|
| 110 |
+
| PixArt-alpha | 0.6690 | 0.4927 | 0.6477 | 0.2064 | 0.3901 | 0.5058 | **0.3197** | 0.3433 |
|
| 111 |
+
| SD3-Medium | **0.8132** | 0.5885 | **0.7334** | **0.3200** | **0.4084** | 0.6174 | 0.3140 | 0.3771 |
|
| 112 |
+
| DALL-E 3 | 0.7785 | **0.6205** | 0.7036 | 0.2865 | 0.3744 | 0.5880 | 0.3003 | 0.3773 |
|
| 113 |
+
| Flux.1-dev | 0.7572 | 0.5066 | 0.6300 | 0.2700 | 0.3992 | 0.6165 | 0.3065 | 0.3628 |
|
| 114 |
+
| Janus-Pro-7B | 0.5145 | 0.3323 | 0.4069 | 0.1566 | 0.2753 | 0.4406 | 0.3137 | 0.3806 |
|
| 115 |
+
| **CogView4-6B** | 0.7786 | 0.5880 | 0.6983 | 0.3075 | 0.3708 | **0.6626** | 0.3056 | **0.3869** |
|
| 116 |
+
|
| 117 |
+
## Chinese Text Accuracy Evaluation
|
| 118 |
+
|
| 119 |
+
| Model | Precision | Recall | F1 Score | Pick@4 |
|
| 120 |
+
|-----------------|------------|------------|------------|------------|
|
| 121 |
+
| Kolors | 0.6094 | 0.1886 | 0.2880 | 0.1633 |
|
| 122 |
+
| **CogView4-6B** | **0.6969** | **0.5532** | **0.6168** | **0.3265** |
|
| 123 |
+
|
| 124 |
+
## Citation
|
| 125 |
+
|
| 126 |
+
🌟 If you find our work helpful, please consider citing our paper and leaving valuable stars
|
| 127 |
+
|
| 128 |
+
```
|
| 129 |
+
@article{zheng2024cogview3,
|
| 130 |
+
title={Cogview3: Finer and faster text-to-image generation via relay diffusion},
|
| 131 |
+
author={Zheng, Wendi and Teng, Jiayan and Yang, Zhuoyi and Wang, Weihan and Chen, Jidong and Gu, Xiaotao and Dong, Yuxiao and Ding, Ming and Tang, Jie},
|
| 132 |
+
journal={arXiv preprint arXiv:2403.05121},
|
| 133 |
+
year={2024}
|
| 134 |
+
}
|
| 135 |
+
```
|
| 136 |
+
|
| 137 |
+
## License
|
| 138 |
+
|
| 139 |
+
This model is released under the [Apache 2.0 License](LICENSE).
|
| 140 |
+
","{""id"": ""THUDM/CogView4-6B"", ""author"": ""THUDM"", ""sha"": ""63a52b7f6dace7033380cd6da14d0915eab3e6b5"", ""last_modified"": ""2025-03-11 08:10:58+00:00"", ""created_at"": ""2025-03-03 12:19:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 302650, ""downloads_all_time"": null, ""likes"": 215, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""text-to-image"", ""zh"", ""en"", ""arxiv:2403.05121"", ""base_model:THUDM/glm-4-9b"", ""base_model:finetune:THUDM/glm-4-9b"", ""license:apache-2.0"", ""diffusers:CogView4Pipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- THUDM/glm-4-9b\nlanguage:\n- zh\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\npipeline_tag: text-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""CogView4Pipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""THUDM-HF-SPACE/CogView4"", ""asifrana5/THUDM-CogView4-6B"", ""Mrshll2691/THUDM-CogView4-6B"", ""shubhchn/THUDM-CogView4-6B""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-11 08:10:58+00:00"", ""cardData"": ""base_model:\n- THUDM/glm-4-9b\nlanguage:\n- zh\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\npipeline_tag: text-to-image"", ""transformersInfo"": null, ""_id"": ""67c59e6ef872c9b6b6f8fc17"", ""modelId"": ""THUDM/CogView4-6B"", ""usedStorage"": 31128922533}",0,"https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0, https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0, https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas",3,,0,https://huggingface.co/p1atdev/CogView4-6B-quanto_int8,1,,0,"Mrshll2691/THUDM-CogView4-6B, THUDM-HF-SPACE/CogView4, asifrana5/THUDM-CogView4-6B, shubhchn/THUDM-CogView4-6B",4
|
| 141 |
+
finetrainers/CogView4-6B-rider-waite-tarot-v0,"---
|
| 142 |
+
base_model:
|
| 143 |
+
- THUDM/CogView4-6B
|
| 144 |
+
datasets:
|
| 145 |
+
- multimodalart/1920-raider-waite-tarot-public-domain
|
| 146 |
+
library_name: diffusers
|
| 147 |
+
license: other
|
| 148 |
+
license_link: https://huggingface.co/THUDM/CogView4-6B/blob/main/LICENSE
|
| 149 |
+
widget:
|
| 150 |
+
- text: >-
|
| 151 |
+
TRTCRD a trtcrd of a knight mounting a running horse wearing an armor and holding a staff, \""knight of wands\""
|
| 152 |
+
output:
|
| 153 |
+
url: final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741227419.png
|
| 154 |
+
- text: >-
|
| 155 |
+
TRTCRD a trtcrd of a woman sitting on a throne, wearing a crown and holding a trophee, \""queen of cups\""
|
| 156 |
+
output:
|
| 157 |
+
url: final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741227417.png
|
| 158 |
+
- text: >-
|
| 159 |
+
TRTCRD a trtcrd of a person in a red robe holding a scale and giving coins to two kneeling figures, surrounded by six pentacles
|
| 160 |
+
output:
|
| 161 |
+
url: final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741227455.png
|
| 162 |
+
tags:
|
| 163 |
+
- text-to-image
|
| 164 |
+
- diffusers-training
|
| 165 |
+
- diffusers
|
| 166 |
+
- template:sd-lora
|
| 167 |
+
- cogview4
|
| 168 |
+
---
|
| 169 |
+
|
| 170 |
+
<Gallery />
|
| 171 |
+
|
| 172 |
+
This is a LoRA fine-tune of the [THUDM/CogView4-6B](https://huggingface.co/THUDM/CogView4-6B) model.
|
| 173 |
+
|
| 174 |
+
Code: https://github.com/a-r-r-o-w/finetrainers
|
| 175 |
+
|
| 176 |
+
Inference code:
|
| 177 |
+
|
| 178 |
+
```python
|
| 179 |
+
import torch
|
| 180 |
+
from diffusers import CogView4Pipeline
|
| 181 |
+
from diffusers.utils import export_to_video
|
| 182 |
+
|
| 183 |
+
pipe = CogView4Pipeline.from_pretrained(
|
| 184 |
+
""THUDM/CogView4-6B"", torch_dtype=torch.bfloat16
|
| 185 |
+
).to(""cuda"")
|
| 186 |
+
pipe.load_lora_weights(""finetrainers/CogView4-6B-rider-waite-tarot-v0"", adapter_name=""cogview4-lora"")
|
| 187 |
+
pipe.set_adapters([""cogview4-lora""], [0.9])
|
| 188 |
+
|
| 189 |
+
image = pipe(""<my-awesome-prompt>"").images[0]
|
| 190 |
+
image.save(""output.png"")
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
Training logs are available on WandB [here](https://wandb.ai/aryanvs/finetrainers-cogview4).
|
| 194 |
+
|
| 195 |
+
NOTE: this checkpoint uses sigmas logit_normal weighting. For shifted_sigmas logit_normal weighting, check https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas","{""id"": ""finetrainers/CogView4-6B-rider-waite-tarot-v0"", ""author"": ""finetrainers"", ""sha"": ""273991e0fb53d0e6b94d5899216854d2d6448a22"", ""last_modified"": ""2025-03-06 11:15:33+00:00"", ""created_at"": ""2025-03-06 11:07:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 11, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""diffusers-training"", ""template:sd-lora"", ""cogview4"", ""dataset:multimodalart/1920-raider-waite-tarot-public-domain"", ""base_model:THUDM/CogView4-6B"", ""base_model:finetune:THUDM/CogView4-6B"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- THUDM/CogView4-6B\ndatasets:\n- multimodalart/1920-raider-waite-tarot-public-domain\nlibrary_name: diffusers\nlicense: other\nlicense_link: https://huggingface.co/THUDM/CogView4-6B/blob/main/LICENSE\ntags:\n- text-to-image\n- diffusers-training\n- diffusers\n- template:sd-lora\n- cogview4\nwidget:\n- text: TRTCRD a trtcrd of a knight mounting a running horse wearing an armor and\n holding a staff, \\\""knight of wands\\\""\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741227419.png\n- text: TRTCRD a trtcrd of a woman sitting on a throne, wearing a crown and holding\n a trophee, \\\""queen of cups\\\""\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741227417.png\n- text: TRTCRD a trtcrd of a person in a red robe holding a scale and giving coins\n to two kneeling figures, surrounded by six pentacles\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741227455.png"", ""widget_data"": [{""text"": ""TRTCRD a trtcrd of a knight mounting a running horse wearing an armor and holding a staff, \\\""knight of wands\\\"""", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741227419.png""}}, {""text"": ""TRTCRD a trtcrd of a woman sitting on a throne, wearing a crown and holding a trophee, \\\""queen of cups\\\"""", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741227417.png""}}, {""text"": ""TRTCRD a trtcrd of a person in a red robe holding a scale and giving coins to two kneeling figures, surrounded by six pentacles"", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741227455.png""}}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741227419.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741227456.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741227467.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741227490.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741227455.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741227467.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741227490.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741227417.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_lora_weights.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-06 11:15:33+00:00"", ""cardData"": ""base_model:\n- THUDM/CogView4-6B\ndatasets:\n- multimodalart/1920-raider-waite-tarot-public-domain\nlibrary_name: diffusers\nlicense: other\nlicense_link: https://huggingface.co/THUDM/CogView4-6B/blob/main/LICENSE\ntags:\n- text-to-image\n- diffusers-training\n- diffusers\n- template:sd-lora\n- cogview4\nwidget:\n- text: TRTCRD a trtcrd of a knight mounting a running horse wearing an armor and\n holding a staff, \\\""knight of wands\\\""\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741227419.png\n- text: TRTCRD a trtcrd of a woman sitting on a throne, wearing a crown and holding\n a trophee, \\\""queen of cups\\\""\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741227417.png\n- text: TRTCRD a trtcrd of a person in a red robe holding a scale and giving coins\n to two kneeling figures, surrounded by six pentacles\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741227455.png"", ""transformersInfo"": null, ""_id"": ""67c981dc29b1822577a561e0"", ""modelId"": ""finetrainers/CogView4-6B-rider-waite-tarot-v0"", ""usedStorage"": 126955231}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=finetrainers/CogView4-6B-rider-waite-tarot-v0&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfinetrainers%2FCogView4-6B-rider-waite-tarot-v0%5D(%2Ffinetrainers%2FCogView4-6B-rider-waite-tarot-v0)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 196 |
+
finetrainers/CogView4-6B-Edit-LoRA-v0,"---
|
| 197 |
+
base_model:
|
| 198 |
+
- THUDM/CogView4-6B
|
| 199 |
+
datasets:
|
| 200 |
+
- sayapaul/OmniEdit-mini
|
| 201 |
+
library_name: diffusers
|
| 202 |
+
widget:
|
| 203 |
+
- text: >-
|
| 204 |
+
Change it to look like it's in the style of an impasto painting.
|
| 205 |
+
output:
|
| 206 |
+
url: output1.png
|
| 207 |
+
- text: >-
|
| 208 |
+
change the setting to spring with blooming trees
|
| 209 |
+
output:
|
| 210 |
+
url: output2.png
|
| 211 |
+
- text: >-
|
| 212 |
+
transform the setting to a stormy space
|
| 213 |
+
output:
|
| 214 |
+
url: output3.png
|
| 215 |
+
tags:
|
| 216 |
+
- text-to-image
|
| 217 |
+
- diffusers-training
|
| 218 |
+
- diffusers
|
| 219 |
+
- template:sd-lora
|
| 220 |
+
- cogview4
|
| 221 |
+
- finetrainers
|
| 222 |
+
---
|
| 223 |
+
|
| 224 |
+
<Gallery />
|
| 225 |
+
|
| 226 |
+
This is a Control LoRA for making small edits to images with the [THUDM/CogView4-6B](https://huggingface.co/THUDM/CogView4-6B) model.
|
| 227 |
+
|
| 228 |
+
Code: https://github.com/a-r-r-o-w/finetrainers
|
| 229 |
+
|
| 230 |
+
> [!IMPORTANT]
|
| 231 |
+
> This is an experimental checkpoint and its poor generalization is well-known.
|
| 232 |
+
|
| 233 |
+
Inference code:
|
| 234 |
+
|
| 235 |
+
```python
|
| 236 |
+
# For now, must use this branch of finetrainers: https://github.com/a-r-r-o-w/finetrainers/blob/f3e27cc39a2bc804cb373ea15522576e57f46d23/finetrainers/models/cogview4/control_specification.py
|
| 237 |
+
|
| 238 |
+
import torch
|
| 239 |
+
from diffusers import CogView4Pipeline
|
| 240 |
+
from diffusers.utils import load_image
|
| 241 |
+
from finetrainers.models.utils import _expand_linear_with_zeroed_weights
|
| 242 |
+
from finetrainers.patches import load_lora_weights
|
| 243 |
+
from finetrainers.patches.dependencies.diffusers.control import control_channel_concat
|
| 244 |
+
|
| 245 |
+
dtype = torch.bfloat16
|
| 246 |
+
device = torch.device(""cuda"")
|
| 247 |
+
generator = torch.Generator().manual_seed(0)
|
| 248 |
+
|
| 249 |
+
pipe = CogView4Pipeline.from_pretrained(""THUDM/CogView4-6B"", torch_dtype=dtype)
|
| 250 |
+
|
| 251 |
+
in_channels = pipe.transformer.config.in_channels
|
| 252 |
+
patch_channels = pipe.transformer.patch_embed.proj.in_features
|
| 253 |
+
pipe.transformer.patch_embed.proj = _expand_linear_with_zeroed_weights(pipe.transformer.patch_embed.proj, new_in_features=2 * patch_channels)
|
| 254 |
+
|
| 255 |
+
load_lora_weights(pipe, ""finetrainers/CogView4-6B-Edit-LoRA-v0"", ""cogview4-lora"")
|
| 256 |
+
pipe.set_adapters(""cogview4-lora"", 0.9)
|
| 257 |
+
pipe.to(device)
|
| 258 |
+
|
| 259 |
+
prompt = ""Make the image look like it's from an ancient Egyptian mural.""
|
| 260 |
+
control_image = load_image(""examples/training/control/cogview4/omni_edit/validation_dataset/0.png"")
|
| 261 |
+
height, width = 1024, 1024
|
| 262 |
+
|
| 263 |
+
with torch.no_grad():
|
| 264 |
+
latents = pipe.prepare_latents(1, in_channels, height, width, dtype, device, generator)
|
| 265 |
+
control_image = pipe.image_processor.preprocess(control_image, height=height, width=width)
|
| 266 |
+
control_image = control_image.to(device=device, dtype=dtype)
|
| 267 |
+
control_latents = pipe.vae.encode(control_image).latent_dist.sample(generator=generator)
|
| 268 |
+
control_latents = (control_latents - pipe.vae.config.shift_factor) * pipe.vae.config.scaling_factor
|
| 269 |
+
|
| 270 |
+
with control_channel_concat(pipe.transformer, [""hidden_states""], [control_latents], dims=[1]):
|
| 271 |
+
image = pipe(prompt, latents=latents, num_inference_steps=30, generator=generator).images[0]
|
| 272 |
+
|
| 273 |
+
image.save(""output.png"")
|
| 274 |
+
```
|
| 275 |
+
","{""id"": ""finetrainers/CogView4-6B-Edit-LoRA-v0"", ""author"": ""finetrainers"", ""sha"": ""b8822b373d656a3a6020b134724a629f99837e92"", ""last_modified"": ""2025-04-06 14:03:29+00:00"", ""created_at"": ""2025-04-06 13:37:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 20, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""diffusers-training"", ""template:sd-lora"", ""cogview4"", ""finetrainers"", ""dataset:sayapaul/OmniEdit-mini"", ""base_model:THUDM/CogView4-6B"", ""base_model:finetune:THUDM/CogView4-6B"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- THUDM/CogView4-6B\ndatasets:\n- sayapaul/OmniEdit-mini\nlibrary_name: diffusers\ntags:\n- text-to-image\n- diffusers-training\n- diffusers\n- template:sd-lora\n- cogview4\n- finetrainers\nwidget:\n- text: Change it to look like it's in the style of an impasto painting.\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output1.png\n- text: change the setting to spring with blooming trees\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output2.png\n- text: transform the setting to a stormy space\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output3.png"", ""widget_data"": [{""text"": ""Change it to look like it's in the style of an impasto painting."", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output1.png""}}, {""text"": ""change the setting to spring with blooming trees"", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output2.png""}}, {""text"": ""transform the setting to a stormy space"", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output3.png""}}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='output1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='output2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='output3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_lora_weights.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-06 14:03:29+00:00"", ""cardData"": ""base_model:\n- THUDM/CogView4-6B\ndatasets:\n- sayapaul/OmniEdit-mini\nlibrary_name: diffusers\ntags:\n- text-to-image\n- diffusers-training\n- diffusers\n- template:sd-lora\n- cogview4\n- finetrainers\nwidget:\n- text: Change it to look like it's in the style of an impasto painting.\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output1.png\n- text: change the setting to spring with blooming trees\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output2.png\n- text: transform the setting to a stormy space\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-Edit-LoRA-v0/resolve/main/output3.png"", ""transformersInfo"": null, ""_id"": ""67f283a0761ff5af73749d2d"", ""modelId"": ""finetrainers/CogView4-6B-Edit-LoRA-v0"", ""usedStorage"": 1017265091}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=finetrainers/CogView4-6B-Edit-LoRA-v0&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfinetrainers%2FCogView4-6B-Edit-LoRA-v0%5D(%2Ffinetrainers%2FCogView4-6B-Edit-LoRA-v0)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 276 |
+
finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas,"---
|
| 277 |
+
base_model:
|
| 278 |
+
- THUDM/CogView4-6B
|
| 279 |
+
datasets:
|
| 280 |
+
- multimodalart/1920-raider-waite-tarot-public-domain
|
| 281 |
+
library_name: diffusers
|
| 282 |
+
license: other
|
| 283 |
+
license_link: https://huggingface.co/THUDM/CogView4-6B/blob/main/LICENSE
|
| 284 |
+
widget:
|
| 285 |
+
- text: >-
|
| 286 |
+
TRTCRD a trtcrd of a knight mounting a running horse wearing an armor and holding a staff, \""knight of wands\""
|
| 287 |
+
output:
|
| 288 |
+
url: final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741262717.173147.png
|
| 289 |
+
- text: >-
|
| 290 |
+
TRTCRD a trtcrd of a woman sitting on a throne, wearing a crown and holding a trophee, \""queen of cups\""
|
| 291 |
+
output:
|
| 292 |
+
url: final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741262717.2762468.png
|
| 293 |
+
- text: >-
|
| 294 |
+
TRTCRD a trtcrd of a person in a red robe holding a scale and giving coins to two kneeling figures, surrounded by six pentacles
|
| 295 |
+
output:
|
| 296 |
+
url: final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741262755.184284.png
|
| 297 |
+
tags:
|
| 298 |
+
- text-to-image
|
| 299 |
+
- diffusers-training
|
| 300 |
+
- diffusers
|
| 301 |
+
- template:sd-lora
|
| 302 |
+
- cogview4
|
| 303 |
+
---
|
| 304 |
+
|
| 305 |
+
<Gallery />
|
| 306 |
+
|
| 307 |
+
This is a LoRA fine-tune of the [THUDM/CogView4-6B](https://huggingface.co/THUDM/CogView4-6B) model.
|
| 308 |
+
|
| 309 |
+
Code: https://github.com/a-r-r-o-w/finetrainers
|
| 310 |
+
|
| 311 |
+
Inference code:
|
| 312 |
+
|
| 313 |
+
```python
|
| 314 |
+
import torch
|
| 315 |
+
from diffusers import CogView4Pipeline
|
| 316 |
+
from diffusers.utils import export_to_video
|
| 317 |
+
|
| 318 |
+
pipe = CogView4Pipeline.from_pretrained(
|
| 319 |
+
""THUDM/CogView4-6B"", torch_dtype=torch.bfloat16
|
| 320 |
+
).to(""cuda"")
|
| 321 |
+
pipe.load_lora_weights(""finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas"", adapter_name=""cogview4-lora"")
|
| 322 |
+
pipe.set_adapters([""cogview4-lora""], [0.9])
|
| 323 |
+
|
| 324 |
+
image = pipe(""<my-awesome-prompt>"").images[0]
|
| 325 |
+
image.save(""output.png"")
|
| 326 |
+
```
|
| 327 |
+
|
| 328 |
+
Training logs are available on WandB [here](https://wandb.ai/aryanvs/finetrainers-cogview4).
|
| 329 |
+
|
| 330 |
+
NOTE: this checkpoint uses shifted_sigmas logit_normal weighting. For sigmas logit_normal weighting, check https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0","{""id"": ""finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas"", ""author"": ""finetrainers"", ""sha"": ""a68387c3f9226853e1c41667313f4dc8c4c1b332"", ""last_modified"": ""2025-03-07 14:10:43+00:00"", ""created_at"": ""2025-03-07 14:06:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 30, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""diffusers-training"", ""template:sd-lora"", ""cogview4"", ""dataset:multimodalart/1920-raider-waite-tarot-public-domain"", ""base_model:THUDM/CogView4-6B"", ""base_model:finetune:THUDM/CogView4-6B"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- THUDM/CogView4-6B\ndatasets:\n- multimodalart/1920-raider-waite-tarot-public-domain\nlibrary_name: diffusers\nlicense: other\nlicense_link: https://huggingface.co/THUDM/CogView4-6B/blob/main/LICENSE\ntags:\n- text-to-image\n- diffusers-training\n- diffusers\n- template:sd-lora\n- cogview4\nwidget:\n- text: TRTCRD a trtcrd of a knight mounting a running horse wearing an armor and\n holding a staff, \\\""knight of wands\\\""\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741262717.173147.png\n- text: TRTCRD a trtcrd of a woman sitting on a throne, wearing a crown and holding\n a trophee, \\\""queen of cups\\\""\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741262717.2762468.png\n- text: TRTCRD a trtcrd of a person in a red robe holding a scale and giving coins\n to two kneeling figures, surrounded by six pentacles\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741262755.184284.png"", ""widget_data"": [{""text"": ""TRTCRD a trtcrd of a knight mounting a running horse wearing an armor and holding a staff, \\\""knight of wands\\\"""", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741262717.173147.png""}}, {""text"": ""TRTCRD a trtcrd of a woman sitting on a throne, wearing a crown and holding a trophee, \\\""queen of cups\\\"""", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741262717.2762468.png""}}, {""text"": ""TRTCRD a trtcrd of a person in a red robe holding a scale and giving coins to two kneeling figures, surrounded by six pentacles"", ""output"": {""url"": ""https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741262755.184284.png""}}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741262717.173147.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741262754.3666801.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741262766.1843963.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741262789.3196418.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741262755.184284.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741262767.0774777.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741262790.309148.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741262717.2762468.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_lora_weights.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-07 14:10:43+00:00"", ""cardData"": ""base_model:\n- THUDM/CogView4-6B\ndatasets:\n- multimodalart/1920-raider-waite-tarot-public-domain\nlibrary_name: diffusers\nlicense: other\nlicense_link: https://huggingface.co/THUDM/CogView4-6B/blob/main/LICENSE\ntags:\n- text-to-image\n- diffusers-training\n- diffusers\n- template:sd-lora\n- cogview4\nwidget:\n- text: TRTCRD a trtcrd of a knight mounting a running horse wearing an armor and\n holding a staff, \\\""knight of wands\\\""\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-0-2-TRTCRD-a-trtcrd-of-a-knig-1741262717.173147.png\n- text: TRTCRD a trtcrd of a woman sitting on a throne, wearing a crown and holding\n a trophee, \\\""queen of cups\\\""\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-woma-1741262717.2762468.png\n- text: TRTCRD a trtcrd of a person in a red robe holding a scale and giving coins\n to two kneeling figures, surrounded by six pentacles\n output:\n url: https://huggingface.co/finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas/resolve/main/final-5000-1-2-TRTCRD-a-trtcrd-of-a-pers-1741262755.184284.png"", ""transformersInfo"": null, ""_id"": ""67cafd800a2a716f257f0211"", ""modelId"": ""finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas"", ""usedStorage"": 127103362}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=finetrainers/CogView4-6B-rider-waite-tarot-v0-shifted-sigmas&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfinetrainers%2FCogView4-6B-rider-waite-tarot-v0-shifted-sigmas%5D(%2Ffinetrainers%2FCogView4-6B-rider-waite-tarot-v0-shifted-sigmas)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
ControlNet-modules-safetensors_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
webui/ControlNet-modules-safetensors,"This repository hosts pruned `.safetensors` modules of [ControlNet](https://huggingface.co/lllyasviel/ControlNet), by [lllyasviel](https://huggingface.co/lllyasviel) and [T2I-Adapters](https://huggingface.co/TencentARC/T2I-Adapter), [TencentARC Team](https://huggingface.co/TencentARC)
|
| 3 |
+
|
| 4 |
+
The modules are meant for [this extension for AUTOMATIC1111/stable-diffusion-webui](https://github.com/Mikubill/sd-webui-controlnet), but should work for different webuis too if they have it implemented. cheers!🥂","{""id"": ""webui/ControlNet-modules-safetensors"", ""author"": ""webui"", ""sha"": ""8148814d89be1b115ae02db98b440aa83b8c0d78"", ""last_modified"": ""2023-03-07 03:26:16+00:00"", ""created_at"": ""2023-02-14 20:52:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1436, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": null, ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cldm_v15.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cldm_v21.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_canny-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_depth-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_hed-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_mlsd-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_normal-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_openpose-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_scribble-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_seg-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2iadapter_canny-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2iadapter_color-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2iadapter_depth-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2iadapter_keypose-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2iadapter_openpose-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2iadapter_seg-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2iadapter_sketch-fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='t2iadapter_style-fp16.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""fffiloni/ControlNet-Video"", ""carloscar/stable-diffusion-webui-controlnet-docker"", ""abidlabs/ControlNet"", ""yuan2023/stable-diffusion-webui-controlnet-docker"", ""bobu5/SD-webui-controlnet-docker"", ""meowingamogus69/stable-diffusion-webui-controlnet-docker"", ""ysharma/ControlNet_Image_Comparison"", ""AIFILMS/ControlNet-Video"", ""SUPERSHANKY/ControlNet_Colab"", ""Yuhkil/AnythingV5NewUI"", ""protoxx91/stable-diffusion-webui-controlnet-docker"", ""nivere/ControlNet-Video"", ""baby123/sd"", ""michael135/dontalk"", ""xcgc/SD-webui-controlnet-docker"", ""LOUISACHEN/ControlNet-Video"", ""zixuan1313/ControlNet-Video"", ""maxsu/ControlNet_Demo"", ""sancho89sf/ControlNet-Video"", ""ismot/1702t5"", ""Datasculptor/ControlNetv15"", ""Shima111/Indian_slon"", ""hugging-fellows/ControlNet-Video"", ""Shima111/India"", ""Joeythemonster/ControlNet-Video"", ""kiin/ai"", ""AhhhhCraaaap/AIOF"", ""Jonjon3030/ControlNet-Video"", ""Karumoon/TestCN001"", ""Shima111/Metaforce"", ""lee-t/ControlNet-Video"", ""AhhhhCraaaap/Buckles_SHoe"", ""cheezay/ControlNet"", ""evelynf/ControlNet2"", ""ysharma/ControlNetwithSlider"", ""Charlie-QwQ/ControlNet-with-other-models"", ""Harshveer/fine_diffusers"", ""Majorbzb/ControlNet"", ""BrayanAndres/ControlNet-Video"", ""donghuang/dh"", ""lixbox/SD-webui-controlnet-docker"", ""Fernando22/ControlNet-Video"", ""Fernando22/ControlNet-Video2"", ""Fernando22/ControlNet-Video3"", ""Omnibus-archive/ControlNet-Video"", ""chenxin1212/stable-diffusion-webui-controlnet-docker"", ""pinkqween/DiscordAI""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-03-07 03:26:16+00:00"", ""cardData"": null, ""transformersInfo"": null, ""_id"": ""63ebf4a1308bd1f7fcb9ccbc"", ""modelId"": ""webui/ControlNet-modules-safetensors"", ""usedStorage"": 7278912091}",0,,0,,0,,0,,0,"AIFILMS/ControlNet-Video, SUPERSHANKY/ControlNet_Colab, Yuhkil/AnythingV5NewUI, abidlabs/ControlNet, bobu5/SD-webui-controlnet-docker, carloscar/stable-diffusion-webui-controlnet-docker, fffiloni/ControlNet-Video, hugging-fellows/ControlNet-Video, huggingface/InferenceSupport/discussions/463, meowingamogus69/stable-diffusion-webui-controlnet-docker, pinkqween/DiscordAI, ysharma/ControlNet_Image_Comparison, yuan2023/stable-diffusion-webui-controlnet-docker",13
|
CrisperWhisper_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nyrahealth/CrisperWhisper,"---
|
| 3 |
+
license: cc-by-nc-4.0
|
| 4 |
+
language:
|
| 5 |
+
- de
|
| 6 |
+
- en
|
| 7 |
+
base_model: openai/whisper-large-v3
|
| 8 |
+
metrics:
|
| 9 |
+
- cer
|
| 10 |
+
- wer
|
| 11 |
+
pipeline_tag: automatic-speech-recognition
|
| 12 |
+
library_name: transformers
|
| 13 |
+
---
|
| 14 |
+
# CrisperWhisper
|
| 15 |
+
|
| 16 |
+
**CrisperWhisper** is an advanced variant of OpenAI's Whisper, designed for fast, precise, and verbatim speech recognition with accurate (**crisp**) word-level timestamps. Unlike the original Whisper, which tends to omit disfluencies and follows more of a intended transcription style, CrisperWhisper aims to transcribe every spoken word exactly as it is, including fillers, pauses, stutters and false starts. Checkout our repo for more details: https://github.com/nyrahealth/CrisperWhisper
|
| 17 |
+
|
| 18 |
+
## Key Features
|
| 19 |
+
|
| 20 |
+
- 🎯 **Accurate Word-Level Timestamps**: Provides precise timestamps, even around disfluencies and pauses, by utilizing an adjusted tokenizer and a custom attention loss during training.
|
| 21 |
+
- 📝 **Verbatim Transcription**: Transcribes every spoken word exactly as it is, including and differentiating fillers like ""um"" and ""uh"".
|
| 22 |
+
- 🔍 **Filler Detection**: Detects and accurately transcribes fillers.
|
| 23 |
+
- 🛡️ **Hallucination Mitigation**: Minimizes transcription hallucinations to enhance accuracy.
|
| 24 |
+
|
| 25 |
+
## Table of Contents
|
| 26 |
+
|
| 27 |
+
- [Key Features](#key-features)
|
| 28 |
+
- [Highlights](#highlights)
|
| 29 |
+
- [Performance Overview](#1-performance-overview)
|
| 30 |
+
- [Qualitative Performance Overview](#11-qualitative-performance-overview)
|
| 31 |
+
- [Quantitative Performance Overview](#12-quantitative-performance-overview)
|
| 32 |
+
- [Transcription Performance](#transcription-performance)
|
| 33 |
+
- [Segmentation Performance](#segmentation-performance)
|
| 34 |
+
- [Usage](#2-usage)
|
| 35 |
+
- [with transformers](#21-usage-with-🤗-transformers)
|
| 36 |
+
- [How?](#3-How?)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
## Highlights
|
| 40 |
+
|
| 41 |
+
- 🏆 **1st place** on the [OpenASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) in verbatim datasets (TED, AMI)
|
| 42 |
+
- 🎓 **Accepted at INTERSPEECH 2024**.
|
| 43 |
+
- 📄 **Paper Drop**: Check out our [paper](https://arxiv.org/abs/2408.16589) for details and reasoning behind our tokenizer adjustment.
|
| 44 |
+
- ✨ **New Feature**: Not mentioned in the paper is a added AttentionLoss to further improve timestamp accuracy. By specifically adding a loss to train the attention scores used for the DTW alignment using timestamped data we significantly boosted the alignment performance.
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
## 1. Performance Overview
|
| 49 |
+
|
| 50 |
+
### 1.1 Qualitative Performance Overview
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
| Audio | Whisper Large V3 | Crisper Whisper |
|
| 54 |
+
|-------|------------------------|------------------------|
|
| 55 |
+
| [Demo de 1](https://github.com/user-attachments/assets/c8608ca8-5e02-4c4a-afd3-8f7c5bff75d5) | Er war kein Genie, aber doch ein fähiger Ingenieur. | Es ist zwar kein. Er ist zwar kein Genie, aber doch ein fähiger Ingenieur.|
|
| 56 |
+
| [Demo de 2](https://github.com/user-attachments/assets/c68414b1-0f84-441c-b39b-29069487edb6) | Leider müssen wir in diesen schweren Zeiten auch unserem Tagesgeschäft nachgehen. Der hier vorgelegte Kulturhaushalt der Ampelregierung strebt an, den Erfolgskurs der Union zumindest fiskalisch fortzuführen. | Leider [UH] müssen wir in diesen [UH] schweren Zeiten auch [UH] unserem [UH] Tagesgeschäft nachgehen. Der hier [UH] vorgelegte [UH] Kulturhaushalt der [UH] Ampelregierung strebt an, den [UH] Erfolgskurs der Union [UH] zumindest [UH] fiskalisch fortzuführen. Es. |
|
| 57 |
+
| [Demo de 3](https://github.com/user-attachments/assets/0c1ed60c-2829-47e4-b7ba-eb584b0a5e9a) | die über alle FRA-Fraktionen hinweg gut im Blick behalten sollten, auch weil sie teilweise sehr teeteuer sind. Aber nicht nur, weil sie teeteuer sind. Wir steigen mit diesem Endentwurf ein in die sogenannten Pandemie-Bereitschaftsverträge.| Die über alle Fr Fraktionen hinweg gut im [UH] Blick behalten sollten, auch weil sie teil teilweise sehr te teuer sind. Aber nicht nur, weil sie te teuer sind. Wir [UH] steigen mit diesem Ent Entwurf ein in die sogenannten Pand Pandemiebereitschaftsverträge. |
|
| 58 |
+
| [Demo en 1](https://github.com/user-attachments/assets/cde5d69c-657f-4ae4-b4ae-b958ea2eacc5) | alternative is you can get like, you have those Dr. Bronner's| Alternative is you can get like [UH] you have those, you know, those doctor Brahmer's. |
|
| 59 |
+
| [Demo en 2](https://github.com/user-attachments/assets/906e307d-5613-4c41-9c61-65f4beede1fd) | influence our natural surrounding? How does it influence our ecosystem? | Influence our [UM] our [UH] our natural surrounding. How does it influence our ecosystem? |
|
| 60 |
+
| [Demo en 3](https://github.com/user-attachments/assets/6c09cd58-a574-4697-9a7e-92e416cf2522) | and always find a place on the street to park and it was easy and you weren't a long distance away from wherever it was that you were trying to go. So I remember that being a lot of fun and easy to do and there were nice places to go and good events to attend. Come downtown and you had the Warner Theater and | And always find a place on the street to park. And and it was it was easy and you weren't a long distance away from wherever it was that you were trying to go. So, I I I remember that being a lot of fun and easy to do and there were nice places to go and, [UM] i good events to attend. Come downtown and you had the Warner Theater and, [UM] |
|
| 61 |
+
| [Demo en 4](https://github.com/user-attachments/assets/7df19486-5e4e-4443-8528-09b07dddf61a) | you know, more masculine, who were rough, and that definitely wasn't me. Then, you know, I was very smart because my father made sure I was smart, you know. So, you know, I hung around those people, you know. And then you had the ones that were just out doing things that they shouldn't have been doing also. So, yeah, I was in the little geek squad. You were in the little geek squad. Yeah. | you know, more masculine, who were rough, and that definitely wasn't me. Then, you know, I was very smart because my father made sure I was smart. You know, so, [UM] you know, I I hung around those people, you know. And then you had the ones that were just just out doing things that they shouldn't have been doing also. So yeah, I was the l I was in the little geek squad. Do you |
|
| 62 |
+
|
| 63 |
+
### 1.2 Quantitative Performance Overview
|
| 64 |
+
|
| 65 |
+
#### Transcription Performance
|
| 66 |
+
|
| 67 |
+
CrisperWhisper significantly outperforms Whisper Large v3, especially on datasets that have a more verbatim transcription style in the ground truth, such as AMI and TED-LIUM.
|
| 68 |
+
|
| 69 |
+
| Dataset | CrisperWhisper | Whisper Large v3 |
|
| 70 |
+
|----------------------|:--------------:|:----------------:|
|
| 71 |
+
| [AMI](https://huggingface.co/datasets/edinburghcstr/ami) | **8.72** | 16.01 |
|
| 72 |
+
| [Earnings22](https://huggingface.co/datasets/revdotcom/earnings22) | 12.37 | **11.3** |
|
| 73 |
+
| [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) | 10.27 | **10.02** |
|
| 74 |
+
| [LibriSpeech clean](https://huggingface.co/datasets/openslr/librispeech_asr) | **1.74** | 2.03 |
|
| 75 |
+
| [LibriSpeech other](https://huggingface.co/datasets/openslr/librispeech_asr) | 3.97 | **3.91** |
|
| 76 |
+
| [SPGISpeech](https://huggingface.co/datasets/kensho/spgispeech) | **2.71** | 2.95 |
|
| 77 |
+
| [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium) | **3.35** | 3.9 |
|
| 78 |
+
| [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) | **8.61** | 9.52 |
|
| 79 |
+
| [CommonVoice](https://huggingface.co/datasets/mozilla-foundation/common_voice_9_0) | **8.19** | 9.67 |
|
| 80 |
+
| **Average WER** | **6.66** | 7.7 |
|
| 81 |
+
|
| 82 |
+
#### Segmentation Performance
|
| 83 |
+
|
| 84 |
+
CrisperWhisper demonstrates superior performance segmentation performance. This performance gap is especially pronounced around disfluencies and pauses.
|
| 85 |
+
The following table uses the metrics as defined in the paper. For this table we used a collar of 50ms. Heads for each Model were selected using the method described in the [How](#5-how) section and the result attaining the highest F1 Score was choosen for each model using varying number of heads.
|
| 86 |
+
|
| 87 |
+
| Dataset | Metric | CrisperWhisper | Whisper Large v2 | Whisper Large v3 |
|
| 88 |
+
|---------|--------|------------------|------------------|------------------|
|
| 89 |
+
| [AMI IHM](https://groups.inf.ed.ac.uk/ami/corpus/) | F1 Score | **0.79** | 0.63 | 0.66 |
|
| 90 |
+
| | Avg IOU | **0.67** | 0.54 | 0.53 |
|
| 91 |
+
| [Common Voice](https://commonvoice.mozilla.org/en/datasets) | F1 Score | **0.80** | 0.42 | 0.48 |
|
| 92 |
+
| | Avg IOU | **0.70** | 0.32 | 0.43 |
|
| 93 |
+
| [TIMIT](https://catalog.ldc.upenn.edu/LDC93S1) | F1 Score | **0.69** | 0.40 | 0.54 |
|
| 94 |
+
| | Avg IOU | **0.56** | 0.32 | 0.43 |
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
## 2. Usage
|
| 98 |
+
|
| 99 |
+
Here's how to use CrisperWhisper in your Python scripts:
|
| 100 |
+
|
| 101 |
+
First install our custom transformers fork for the most accurate timestamps:
|
| 102 |
+
```
|
| 103 |
+
pip install git+https://github.com/nyrahealth/transformers.git@crisper_whisper
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
### 2.1 Usage with 🤗 transformers
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
```python
|
| 110 |
+
import os
|
| 111 |
+
import sys
|
| 112 |
+
import torch
|
| 113 |
+
|
| 114 |
+
from datasets import load_dataset
|
| 115 |
+
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
| 116 |
+
|
| 117 |
+
def adjust_pauses_for_hf_pipeline_output(pipeline_output, split_threshold=0.12):
|
| 118 |
+
""""""
|
| 119 |
+
Adjust pause timings by distributing pauses up to the threshold evenly between adjacent words.
|
| 120 |
+
""""""
|
| 121 |
+
|
| 122 |
+
adjusted_chunks = pipeline_output[""chunks""].copy()
|
| 123 |
+
|
| 124 |
+
for i in range(len(adjusted_chunks) - 1):
|
| 125 |
+
current_chunk = adjusted_chunks[i]
|
| 126 |
+
next_chunk = adjusted_chunks[i + 1]
|
| 127 |
+
|
| 128 |
+
current_start, current_end = current_chunk[""timestamp""]
|
| 129 |
+
next_start, next_end = next_chunk[""timestamp""]
|
| 130 |
+
pause_duration = next_start - current_end
|
| 131 |
+
|
| 132 |
+
if pause_duration > 0:
|
| 133 |
+
if pause_duration > split_threshold:
|
| 134 |
+
distribute = split_threshold / 2
|
| 135 |
+
else:
|
| 136 |
+
distribute = pause_duration / 2
|
| 137 |
+
|
| 138 |
+
# Adjust current chunk end time
|
| 139 |
+
adjusted_chunks[i][""timestamp""] = (current_start, current_end + distribute)
|
| 140 |
+
|
| 141 |
+
# Adjust next chunk start time
|
| 142 |
+
adjusted_chunks[i + 1][""timestamp""] = (next_start - distribute, next_end)
|
| 143 |
+
pipeline_output[""chunks""] = adjusted_chunks
|
| 144 |
+
|
| 145 |
+
return pipeline_output
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
device = ""cuda:0"" if torch.cuda.is_available() else ""cpu""
|
| 149 |
+
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
| 150 |
+
|
| 151 |
+
model_id = ""nyrahealth/CrisperWhisper""
|
| 152 |
+
|
| 153 |
+
model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
| 154 |
+
model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
|
| 155 |
+
)
|
| 156 |
+
model.to(device)
|
| 157 |
+
|
| 158 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
| 159 |
+
|
| 160 |
+
pipe = pipeline(
|
| 161 |
+
""automatic-speech-recognition"",
|
| 162 |
+
model=model,
|
| 163 |
+
tokenizer=processor.tokenizer,
|
| 164 |
+
feature_extractor=processor.feature_extractor,
|
| 165 |
+
chunk_length_s=30,
|
| 166 |
+
batch_size=16,
|
| 167 |
+
return_timestamps='word',
|
| 168 |
+
torch_dtype=torch_dtype,
|
| 169 |
+
device=device,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
dataset = load_dataset(""distil-whisper/librispeech_long"", ""clean"", split=""validation"")
|
| 173 |
+
sample = dataset[0][""audio""]
|
| 174 |
+
hf_pipeline_output = pipe(sample)
|
| 175 |
+
crisper_whisper_result = adjust_pauses_for_hf_pipeline_output(hf_pipeline_output)
|
| 176 |
+
print(crisper_whisper_result)
|
| 177 |
+
```
|
| 178 |
+
|
| 179 |
+
read more about the reasoning behind the pause distribution logic in our paper.
|
| 180 |
+
|
| 181 |
+
## 3. How?
|
| 182 |
+
|
| 183 |
+
We employ the popular Dynamic Time Warping (DTW) on the Whisper cross-attention scores, as detailed in our [paper](https://arxiv.org/abs/2408.16589) to derive word-level timestamps. By leveraging our retokenization process, this method allows us to consistently detect pauses. Given that the accuracy of the timestamps heavily depends on the DTW cost matrix and, consequently, on the quality of the cross-attentions, we developed a specialized loss function for the selected alignment heads to enhance precision.
|
| 184 |
+
|
| 185 |
+
Although this loss function was not included in the original [paper](https://arxiv.org/abs/2408.16589) due to time constraints preventing the completion of experiments and training before the submission deadline, it has been used to train our publicly available models.
|
| 186 |
+
Key Features of this loss are as follows:
|
| 187 |
+
|
| 188 |
+
1. **Data Preparation**
|
| 189 |
+
- We used datasets with word-level timestamp annotations, such as [AMI IHM](https://groups.inf.ed.ac.uk/ami/corpus/) and [TIMIT](https://catalog.ldc.upenn.edu/LDC93S1) , but required additional timestamped data.
|
| 190 |
+
- To address this, we validated the alignment accuracy of several forced alignment tools using a small hand-labeled dataset.
|
| 191 |
+
- Based on this validation, we chose the [PyTorch CTC aligner](https://pytorch.org/audio/main/tutorials/ctc_forced_alignment_api_tutorial.html) to generate more time-aligned data from the CommonVoice dataset.
|
| 192 |
+
- Because the [PyTorch CTC aligner](https://pytorch.org/audio/main/tutorials/ctc_forced_alignment_api_tutorial.html) tends to overestimate pause durations, we applied the same pause-splitting method detailed in our [paper](...) to correct these errors. The effectiveness of this correction was confirmed using our hand-labeled dataset.
|
| 193 |
+
|
| 194 |
+
2. **Token-Word Alignment**
|
| 195 |
+
- Due to retokenization as detailed in our [paper](https://arxiv.org/abs/2408.16589), each token is either part of a word or a pause/space, but never both
|
| 196 |
+
- Therefore each token can be cleanly aligned to a word OR a space/pause
|
| 197 |
+
|
| 198 |
+
3. **Ground Truth Cross-Attention**
|
| 199 |
+
- We define the cross-attention ground truth for tokens as the L2-normalized vector, where:
|
| 200 |
+
- A value of 1 indicates that the word is active according to the word-level ground truth timestamp.
|
| 201 |
+
- A value of 0 indicates that no attention should be paid.
|
| 202 |
+
- To account for small inaccuracies in the ground truth timestamps, we apply a linear interpolation of 4 steps (8 milliseconds) on both sides of the ground truth vector, transitioning smoothly from 0 to 1.
|
| 203 |
+
|
| 204 |
+
4. **Loss Calculation**
|
| 205 |
+
- The loss function is defined as `1 - cosine similarity` between the predicted cross-attention vector (when predicting a token) and the ground truth cross-attention vector.
|
| 206 |
+
- This loss is averaged across all predicted tokens and alignment heads.
|
| 207 |
+
|
| 208 |
+
5. **Alignment Head selection**
|
| 209 |
+
- To choose the heads for alignment we evaluated the alignment performance of each individual decoder attention head on the timestamped timit dataset.
|
| 210 |
+
- We choose the 15 best performing heads and finetune them using our attention loss.
|
| 211 |
+
|
| 212 |
+
6. **Training Details**
|
| 213 |
+
- Since most of our samples during training were shorter than 30 seconds we shift the audio sample and corresponding timestamp ground truth around with a 50% probability to mitigate the cross attentions ,,overfitting"" to early positions of the encoder output.
|
| 214 |
+
- If we have more than 40ms of silence (before or after shifting) we prepend the ground truth transcript ( and corresponding cross attention ground truth) with a space so the model has to accurately predict the starting time of the first word.
|
| 215 |
+
- We use [WavLM](https://arxiv.org/abs/2110.13900) augmentations during Training adding random speech samples or noise to the audio wave to generally increase robustness of the transcription and stability of the alignment heads.
|
| 216 |
+
- We clip ,,predicted"" values in the cross attention vectors 4 seconds before and 4 seconds after the groundtruth word they belong to to 0. This is to decrease the dimensionality of the cross attention vector and therefore emphasize the attention where it counts in the loss and ultimately for the alignment.
|
| 217 |
+
- With a probability of 1% we use samples containing exclusively noise where the model has to return a empty prediction to improve hallucination.
|
| 218 |
+
- The Model is trained on a mixture of english and german datasets so we only gurantee good performance on these languages
|
| 219 |
+
- The Model is trained in three stages, in the first stage we use around 10000 hours of audio to adjust Whisper to the new tokenizer. In the second stage we exclusively use high quality datasets that are transcribed in a verbatim fashion. Finally we continue training on this verbatim mixture and add the attention loss for another 6000 steps.
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
## License
|
| 223 |
+
---
|
| 224 |
+
license: cc-by-nc-4.0
|
| 225 |
+
---","{""id"": ""nyrahealth/CrisperWhisper"", ""author"": ""nyrahealth"", ""sha"": ""7aefea4c6c009ea7c47e6ab79247dfaf73d4c518"", ""last_modified"": ""2024-12-19 11:31:55+00:00"", ""created_at"": ""2024-08-29 15:53:10+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10077, ""downloads_all_time"": null, ""likes"": 272, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""whisper"", ""automatic-speech-recognition"", ""de"", ""en"", ""arxiv:2408.16589"", ""arxiv:2110.13900"", ""base_model:openai/whisper-large-v3"", ""base_model:finetune:openai/whisper-large-v3"", ""license:cc-by-nc-4.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""automatic-speech-recognition"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: openai/whisper-large-v3\nlanguage:\n- de\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\nmetrics:\n- cer\n- wer\npipeline_tag: automatic-speech-recognition"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""WhisperForConditionalGeneration""], ""model_type"": ""whisper"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSpeechSeq2Seq"", ""custom_class"": null, ""pipeline_tag"": ""automatic-speech-recognition"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='normalizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""rafaaa2105/subtitles-translation"", ""Conexion/nyrahealth-CrisperWhisper"", ""adil9858/nyrahealth-CrisperWhisper"", ""rafaaa2105/crisper-whisper"", ""onlinework/nyrahealth-CrisperWhisper"", ""AlDracu/nyrahealth-CrisperWhisper"", ""Vovan4eg/nyrahealth-CrisperWhisper"", ""on1onmangoes/heyzzk241211v1""], ""safetensors"": {""parameters"": {""F16"": 1609879040}, ""total"": 1609879040}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-19 11:31:55+00:00"", ""cardData"": ""base_model: openai/whisper-large-v3\nlanguage:\n- de\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\nmetrics:\n- cer\n- wer\npipeline_tag: automatic-speech-recognition"", ""transformersInfo"": {""auto_model"": ""AutoModelForSpeechSeq2Seq"", ""custom_class"": null, ""pipeline_tag"": ""automatic-speech-recognition"", ""processor"": ""AutoProcessor""}, ""_id"": ""66d099665a5139a40a85b568"", ""modelId"": ""nyrahealth/CrisperWhisper"", ""usedStorage"": 7971358108}",0,,0,https://huggingface.co/miosipof/asr2_medium_CRSPR_v0.5,1,,0,,0,"AlDracu/nyrahealth-CrisperWhisper, Conexion/nyrahealth-CrisperWhisper, Vovan4eg/nyrahealth-CrisperWhisper, adil9858/nyrahealth-CrisperWhisper, hf-audio/open_asr_leaderboard, huggingface/InferenceSupport/discussions/new?title=nyrahealth/CrisperWhisper&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnyrahealth%2FCrisperWhisper%5D(%2Fnyrahealth%2FCrisperWhisper)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, on1onmangoes/heyzzk241211v1, onlinework/nyrahealth-CrisperWhisper, rafaaa2105/crisper-whisper, rafaaa2105/subtitles-translation",10
|
Cyberpunk-Anime-Diffusion_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
DGSpitzer/Cyberpunk-Anime-Diffusion,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
thumbnail: ""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/thumbnail.png""
|
| 6 |
+
tags:
|
| 7 |
+
- cyberpunk
|
| 8 |
+
- anime
|
| 9 |
+
- waifu-diffusion
|
| 10 |
+
- stable-diffusion
|
| 11 |
+
- aiart
|
| 12 |
+
- text-to-image
|
| 13 |
+
license: creativeml-openrail-m
|
| 14 |
+
---
|
| 15 |
+
<center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/5.jpg"" width=""512"" height=""512""/></center>
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+
# Cyberpunk Anime Diffusion
|
| 20 |
+
|
| 21 |
+
An AI model that generates cyberpunk anime characters!~
|
| 22 |
+
|
| 23 |
+
Based of a finetuned Waifu Diffusion V1.3 Model with Stable Diffusion V1.5 New Vae, training in Dreambooth
|
| 24 |
+
|
| 25 |
+
by [DGSpitzer](https://www.youtube.com/channel/UCzzsYBF4qwtMwJaPJZ5SuPg)
|
| 26 |
+
|
| 27 |
+
### 🧨 Diffusers
|
| 28 |
+
|
| 29 |
+
This repo contains both .ckpt and Diffuser model files. It's compatible to be used as any Stable Diffusion model, using standard [Stable Diffusion Pipelines](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
|
| 30 |
+
|
| 31 |
+
You can convert this model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](https://huggingface.co/blog/stable_diffusion_jax).
|
| 32 |
+
|
| 33 |
+
```python example for loading the Diffuser
|
| 34 |
+
#!pip install diffusers transformers scipy torch
|
| 35 |
+
from diffusers import StableDiffusionPipeline
|
| 36 |
+
import torch
|
| 37 |
+
|
| 38 |
+
model_id = ""DGSpitzer/Cyberpunk-Anime-Diffusion""
|
| 39 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
| 40 |
+
pipe = pipe.to(""cuda"")
|
| 41 |
+
|
| 42 |
+
prompt = ""a beautiful perfect face girl in dgs illustration style, Anime fine details portrait of school girl in front of modern tokyo city landscape on the background deep bokeh, anime masterpiece, 8k, sharp high quality anime""
|
| 43 |
+
image = pipe(prompt).images[0]
|
| 44 |
+
|
| 45 |
+
image.save(""./cyberpunk_girl.png"")
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
# Online Demo
|
| 49 |
+
|
| 50 |
+
You can try the Online Web UI demo build with [Gradio](https://github.com/gradio-app/gradio), or use Colab Notebook at here:
|
| 51 |
+
|
| 52 |
+
*My Online Space Demo*
|
| 53 |
+
[](https://huggingface.co/spaces/DGSpitzer/DGS-Diffusion-Space)
|
| 54 |
+
|
| 55 |
+
*Finetuned Diffusion WebUI Demo by anzorq*
|
| 56 |
+
[](https://huggingface.co/spaces/anzorq/finetuned_diffusion)
|
| 57 |
+
|
| 58 |
+
*Colab Notebook*
|
| 59 |
+
[](https://colab.research.google.com/github/HelixNGC7293/cyberpunk-anime-diffusion/blob/main/cyberpunk_anime_diffusion.ipynb)[](https://github.com/HelixNGC7293/cyberpunk-anime-diffusion)
|
| 60 |
+
|
| 61 |
+
*Buy me a coffee if you like this project ;P ♥*
|
| 62 |
+
[](https://www.buymeacoffee.com/dgspitzer)
|
| 63 |
+
|
| 64 |
+
<center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/1.jpg"" width=""512"" height=""512""/></center>
|
| 65 |
+
|
| 66 |
+
# **👇Model👇**
|
| 67 |
+
|
| 68 |
+
AI Model Weights available at huggingface: https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion
|
| 69 |
+
|
| 70 |
+
<center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/2.jpg"" width=""512"" height=""512""/></center>
|
| 71 |
+
|
| 72 |
+
# Usage
|
| 73 |
+
|
| 74 |
+
After model loaded, use keyword **dgs** in your prompt, with **illustration style** to get even better results.
|
| 75 |
+
|
| 76 |
+
For sampler, use **Euler A** for the best result (**DDIM** kinda works too), CFG Scale 7, steps 20 should be fine
|
| 77 |
+
|
| 78 |
+
**Example 1:**
|
| 79 |
+
|
| 80 |
+
```
|
| 81 |
+
portrait of a girl in dgs illustration style, Anime girl, female soldier working in a cyberpunk city, cleavage, ((perfect femine face)), intricate, 8k, highly detailed, shy, digital painting, intense, sharp focus
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
For cyber robot male character, you can add **muscular male** to improve the output.
|
| 85 |
+
|
| 86 |
+
**Example 2:**
|
| 87 |
+
|
| 88 |
+
```
|
| 89 |
+
a photo of muscular beard soldier male in dgs illustration style, half-body, holding robot arms, strong chest
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
**Example 3 (with Stable Diffusion WebUI):**
|
| 93 |
+
|
| 94 |
+
If using [AUTOMATIC1111's Stable Diffusion WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
|
| 95 |
+
|
| 96 |
+
You can simply use this as **prompt** with **Euler A** Sampler, CFG Scale 7, steps 20, 704 x 704px output res:
|
| 97 |
+
|
| 98 |
+
```
|
| 99 |
+
an anime girl in dgs illustration style
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
And set the **negative prompt** as this to get cleaner face:
|
| 103 |
+
|
| 104 |
+
```
|
| 105 |
+
out of focus, scary, creepy, evil, disfigured, missing limbs, ugly, gross, missing fingers
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
This will give you the exactly same style as the sample images above.
|
| 109 |
+
|
| 110 |
+
<center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/ReadmeAddon.jpg"" width=""256"" height=""353""/></center>
|
| 111 |
+
|
| 112 |
+
---
|
| 113 |
+
|
| 114 |
+
**NOTE: usage of this model implies accpetance of stable diffusion's [CreativeML Open RAIL-M license](LICENSE)**
|
| 115 |
+
|
| 116 |
+
---
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
<center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/4.jpg"" width=""700"" height=""700""/></center>
|
| 120 |
+
|
| 121 |
+
<center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/6.jpg"" width=""700"" height=""700""/></center>
|
| 122 |
+
","{""id"": ""DGSpitzer/Cyberpunk-Anime-Diffusion"", ""author"": ""DGSpitzer"", ""sha"": ""2b6407002b73374e6864d3647f4eb9659bca36a9"", ""last_modified"": ""2023-06-21 20:44:20+00:00"", ""created_at"": ""2022-10-27 17:02:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 617, ""downloads_all_time"": null, ""likes"": 546, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""cyberpunk"", ""anime"", ""waifu-diffusion"", ""stable-diffusion"", ""aiart"", ""text-to-image"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- cyberpunk\n- anime\n- waifu-diffusion\n- stable-diffusion\n- aiart\n- text-to-image\nthumbnail: https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/thumbnail.png"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Cyberpunk-Anime-Diffusion.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Cyberpunk-Anime-Diffusion.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cyberpunk_anime_diffusion.ipynb', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/4.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/5.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/6.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/ReadmeAddon.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/thumbnail.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""anzorq/finetuned_diffusion"", ""darkstorm2150/Stable-Diffusion-Protogen-x3.4-webui"", ""Yntec/ToyWorld"", ""darkstorm2150/protogen-web-ui"", ""Yntec/PrintingPress"", ""vorstcavry/ai"", ""kamiyamai/stable-diffusion-webui"", ""DGSpitzer/DGS-Diffusion-Space"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""BilalSardar/Text-To-image-AllModels"", ""AdamOswald1/finetuned_diffusion"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""IAmXenos21/stable-diffusion-webui-VORST2"", ""John6666/PrintingPress4"", ""Nickhilearla135095/maximum_diffusion"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""AlStable/AlPrompt"", ""Rifd/ngees_doang"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Joeythemonster/Text-To-image-AllModels"", ""Evel/Evel_Space"", ""luisrguerra/sd-real-dream-lcm-cpu"", ""Daniela-C/6x_Image_diffusion"", ""akhaliq/webui-orangemixs"", ""Dao3/Text-To-image-AllModels"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""ConceptArtHouse/webui-gameasset"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""YeOldHermit/StableDiffusion_AnythingV3_ModelCamenduru"", ""zwv9/webui-cpu"", ""kaleidoskop-hug/PrintingPress"", ""Adam111/stable-diffusion-webui"", ""vs4vijay/stable-diffusion"", ""Yasu55/stable-diffusion-webui"", ""ennov8ion/stablediffusion-models"", ""Shocky/Pink-Anime"", ""ReiPlush64/finetuned_diffusion"", ""John6666/ToyWorld4"", ""sasaro/webui"", ""Omnibus-archive/Diffusion-Flood"", ""Crossper6/stable-diffusion-webui"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""Malifex/CPU-Anything-V3.0-WebUI"", ""lianzhou/stable-diffusion-webui"", ""Missinginaction/stablediffusionwithnofilter"", ""arthurdias/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper"", ""thestasi/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper"", ""achyuth1344/stable-diffusion-webui"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""Jackflack09/finetuned_diffusion2"", ""ennov8ion/FantasyArt-Models"", ""ennov8ion/dreamlike-models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""Minecraft3193092/Stable-Diffusion-8"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""deaf1296/finetuned_diffusion"", ""pieeetre/stable-diffusion-webui"", ""luluneko1/stable-diffusion-webui"", ""Lyra121/finetuned_diffusion"", ""voltcutter/stable-diffusion-webui"", ""hylee/finetuned_diffusion"", ""RhythmRemix14/PrintingPressDx"", ""Minecraft3193092/Stable-Diffusion-7"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""AiiluoChen/webui"", ""Heckeroo/Cyberpunk-Anime-Diffusion"", ""Eduger/webui"", ""bobathetheft/webui"", ""natvill/stable-diffusion-webui"", ""Danielito/webui"", ""Eyeszik/webui"", ""YuraM/Stable-Diffusion-Protogen-webui"", ""TheFellow42/webui"", ""OswaldDev/webuih"", ""trhacknon/webui"", ""Harshveer/Finetuned_Diffusion_Max"", ""gato001k1/maximum_diffusion0k"", ""rubberboy/stable-diffusion-webui"", ""hilmyblaze/WebUI-Counterfeit-V2.5""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-06-21 20:44:20+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- cyberpunk\n- anime\n- waifu-diffusion\n- stable-diffusion\n- aiart\n- text-to-image\nthumbnail: https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/thumbnail.png"", ""transformersInfo"": null, ""_id"": ""635ab9b93180c590f4f48db9"", ""modelId"": ""DGSpitzer/Cyberpunk-Anime-Diffusion"", ""usedStorage"": 24647194668}",0,,0,,0,,0,,0,"DGSpitzer/DGS-Diffusion-Space, IAmXenos21/stable-diffusion-webui-VORST2, Joeythemonster/Text-To-image-AllModels, John6666/Diffusion80XX4sg, John6666/PrintingPress4, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, anzorq/finetuned_diffusion, darkstorm2150/Stable-Diffusion-Protogen-x3.4-webui, huggingface/InferenceSupport/discussions/new?title=DGSpitzer/Cyberpunk-Anime-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BDGSpitzer%2FCyberpunk-Anime-Diffusion%5D(%2FDGSpitzer%2FCyberpunk-Anime-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, phenixrhyder/NSFW-ToyWorld, vorstcavry/ai",15
|
DeepSeek-Coder-V2-Instruct_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
deepseek-ai/DeepSeek-Coder-V2-Instruct,"---
|
| 3 |
+
license: other
|
| 4 |
+
license_name: deepseek-license
|
| 5 |
+
license_link: LICENSE
|
| 6 |
+
base_model: deepseek-ai/DeepSeek-Coder-V2-Base
|
| 7 |
+
---
|
| 8 |
+
<!-- markdownlint-disable first-line-h1 -->
|
| 9 |
+
<!-- markdownlint-disable html -->
|
| 10 |
+
<!-- markdownlint-disable no-duplicate-header -->
|
| 11 |
+
|
| 12 |
+
<div align=""center"">
|
| 13 |
+
<img src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true"" width=""60%"" alt=""DeepSeek-V2"" />
|
| 14 |
+
</div>
|
| 15 |
+
<hr>
|
| 16 |
+
<div align=""center"" style=""line-height: 1;"">
|
| 17 |
+
<a href=""https://www.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
|
| 18 |
+
<img alt=""Homepage"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/badge.svg?raw=true"" style=""display: inline-block; vertical-align: middle;""/>
|
| 19 |
+
</a>
|
| 20 |
+
<a href=""https://chat.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
|
| 21 |
+
<img alt=""Chat"" src=""https://img.shields.io/badge/🤖%20Chat-DeepSeek%20V2-536af5?color=536af5&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 22 |
+
</a>
|
| 23 |
+
<a href=""https://huggingface.co/deepseek-ai"" target=""_blank"" style=""margin: 2px;"">
|
| 24 |
+
<img alt=""Hugging Face"" src=""https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 25 |
+
</a>
|
| 26 |
+
</div>
|
| 27 |
+
|
| 28 |
+
<div align=""center"" style=""line-height: 1;"">
|
| 29 |
+
<a href=""https://discord.gg/Tc7c45Zzu5"" target=""_blank"" style=""margin: 2px;"">
|
| 30 |
+
<img alt=""Discord"" src=""https://img.shields.io/badge/Discord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da"" style=""display: inline-block; vertical-align: middle;""/>
|
| 31 |
+
</a>
|
| 32 |
+
<a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/qr.jpeg?raw=true"" target=""_blank"" style=""margin: 2px;"">
|
| 33 |
+
<img alt=""Wechat"" src=""https://img.shields.io/badge/WeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 34 |
+
</a>
|
| 35 |
+
<a href=""https://twitter.com/deepseek_ai"" target=""_blank"" style=""margin: 2px;"">
|
| 36 |
+
<img alt=""Twitter Follow"" src=""https://img.shields.io/badge/Twitter-deepseek_ai-white?logo=x&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
|
| 37 |
+
</a>
|
| 38 |
+
</div>
|
| 39 |
+
|
| 40 |
+
<div align=""center"" style=""line-height: 1;"">
|
| 41 |
+
<a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-CODE"" style=""margin: 2px;"">
|
| 42 |
+
<img alt=""Code License"" src=""https://img.shields.io/badge/Code_License-MIT-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
|
| 43 |
+
</a>
|
| 44 |
+
<a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL"" style=""margin: 2px;"">
|
| 45 |
+
<img alt=""Model License"" src=""https://img.shields.io/badge/Model_License-Model_Agreement-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
|
| 46 |
+
</a>
|
| 47 |
+
</div>
|
| 48 |
+
<p align=""center"">
|
| 49 |
+
<a href=""#4-api-platform"">API Platform</a> |
|
| 50 |
+
<a href=""#5-how-to-run-locally"">How to Use</a> |
|
| 51 |
+
<a href=""#6-license"">License</a> |
|
| 52 |
+
</p>
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
<p align=""center"">
|
| 56 |
+
<a href=""https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/paper.pdf""><b>Paper Link</b>👁️</a>
|
| 57 |
+
</p>
|
| 58 |
+
|
| 59 |
+
# DeepSeek-Coder-V2: Breaking the Barrier of Closed-Source Models in Code Intelligence
|
| 60 |
+
|
| 61 |
+
## 1. Introduction
|
| 62 |
+
We present DeepSeek-Coder-V2, an open-source Mixture-of-Experts (MoE) code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks. Specifically, DeepSeek-Coder-V2 is further pre-trained from an intermediate checkpoint of DeepSeek-V2 with additional 6 trillion tokens. Through this continued pre-training, DeepSeek-Coder-V2 substantially enhances the coding and mathematical reasoning capabilities of DeepSeek-V2, while maintaining comparable performance in general language tasks. Compared to DeepSeek-Coder-33B, DeepSeek-Coder-V2 demonstrates significant advancements in various aspects of code-related tasks, as well as reasoning and general capabilities. Additionally, DeepSeek-Coder-V2 expands its support for programming languages from 86 to 338, while extending the context length from 16K to 128K.
|
| 63 |
+
|
| 64 |
+
<p align=""center"">
|
| 65 |
+
<img width=""100%"" src=""https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/figures/performance.png?raw=true"">
|
| 66 |
+
</p>
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
In standard benchmark evaluations, DeepSeek-Coder-V2 achieves superior performance compared to closed-source models such as GPT4-Turbo, Claude 3 Opus, and Gemini 1.5 Pro in coding and math benchmarks. The list of supported programming languages can be found [here](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/supported_langs.txt).
|
| 70 |
+
|
| 71 |
+
## 2. Model Downloads
|
| 72 |
+
|
| 73 |
+
We release the DeepSeek-Coder-V2 with 16B and 236B parameters based on the [DeepSeekMoE](https://arxiv.org/pdf/2401.06066) framework, which has actived parameters of only 2.4B and 21B , including base and instruct models, to the public.
|
| 74 |
+
|
| 75 |
+
<div align=""center"">
|
| 76 |
+
|
| 77 |
+
| **Model** | **#Total Params** | **#Active Params** | **Context Length** | **Download** |
|
| 78 |
+
| :-----------------------------: | :---------------: | :----------------: | :----------------: | :----------------------------------------------------------: |
|
| 79 |
+
| DeepSeek-Coder-V2-Lite-Base | 16B | 2.4B | 128k | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Base) |
|
| 80 |
+
| DeepSeek-Coder-V2-Lite-Instruct | 16B | 2.4B | 128k | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct) |
|
| 81 |
+
| DeepSeek-Coder-V2-Base | 236B | 21B | 128k | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Base) |
|
| 82 |
+
| DeepSeek-Coder-V2-Instruct | 236B | 21B | 128k | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct) |
|
| 83 |
+
|
| 84 |
+
</div>
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
## 3. Chat Website
|
| 88 |
+
|
| 89 |
+
You can chat with the DeepSeek-Coder-V2 on DeepSeek's official website: [coder.deepseek.com](https://coder.deepseek.com/sign_in)
|
| 90 |
+
|
| 91 |
+
## 4. API Platform
|
| 92 |
+
We also provide OpenAI-Compatible API at DeepSeek Platform: [platform.deepseek.com](https://platform.deepseek.com/), and you can also pay-as-you-go at an unbeatable price.
|
| 93 |
+
<p align=""center"">
|
| 94 |
+
<img width=""40%"" src=""https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/figures/model_price.jpg?raw=true"">
|
| 95 |
+
</p>
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
## 5. How to run locally
|
| 99 |
+
**Here, we provide some examples of how to use DeepSeek-Coder-V2-Lite model. If you want to utilize DeepSeek-Coder-V2 in BF16 format for inference, 80GB*8 GPUs are required.**
|
| 100 |
+
|
| 101 |
+
### Inference with Huggingface's Transformers
|
| 102 |
+
You can directly employ [Huggingface's Transformers](https://github.com/huggingface/transformers) for model inference.
|
| 103 |
+
|
| 104 |
+
#### Code Completion
|
| 105 |
+
```python
|
| 106 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 107 |
+
import torch
|
| 108 |
+
tokenizer = AutoTokenizer.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Base"", trust_remote_code=True)
|
| 109 |
+
model = AutoModelForCausalLM.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Base"", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
|
| 110 |
+
input_text = ""#write a quick sort algorithm""
|
| 111 |
+
inputs = tokenizer(input_text, return_tensors=""pt"").to(model.device)
|
| 112 |
+
outputs = model.generate(**inputs, max_length=128)
|
| 113 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
#### Code Insertion
|
| 117 |
+
```python
|
| 118 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 119 |
+
import torch
|
| 120 |
+
tokenizer = AutoTokenizer.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Base"", trust_remote_code=True)
|
| 121 |
+
model = AutoModelForCausalLM.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Base"", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
|
| 122 |
+
input_text = """"""<|fim▁begin|>def quick_sort(arr):
|
| 123 |
+
if len(arr) <= 1:
|
| 124 |
+
return arr
|
| 125 |
+
pivot = arr[0]
|
| 126 |
+
left = []
|
| 127 |
+
right = []
|
| 128 |
+
<|fim▁hole|>
|
| 129 |
+
if arr[i] < pivot:
|
| 130 |
+
left.append(arr[i])
|
| 131 |
+
else:
|
| 132 |
+
right.append(arr[i])
|
| 133 |
+
return quick_sort(left) + [pivot] + quick_sort(right)<|fim▁end|>""""""
|
| 134 |
+
inputs = tokenizer(input_text, return_tensors=""pt"").to(model.device)
|
| 135 |
+
outputs = model.generate(**inputs, max_length=128)
|
| 136 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=True)[len(input_text):])
|
| 137 |
+
```
|
| 138 |
+
|
| 139 |
+
#### Chat Completion
|
| 140 |
+
|
| 141 |
+
```python
|
| 142 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 143 |
+
import torch
|
| 144 |
+
tokenizer = AutoTokenizer.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"", trust_remote_code=True)
|
| 145 |
+
model = AutoModelForCausalLM.from_pretrained(""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
|
| 146 |
+
messages=[
|
| 147 |
+
{ 'role': 'user', 'content': ""write a quick sort algorithm in python.""}
|
| 148 |
+
]
|
| 149 |
+
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"").to(model.device)
|
| 150 |
+
# tokenizer.eos_token_id is the id of <|end▁of▁sentence|> token
|
| 151 |
+
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, top_k=50, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
| 152 |
+
print(tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True))
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
The complete chat template can be found within `tokenizer_config.json` located in the huggingface model repository.
|
| 158 |
+
|
| 159 |
+
An example of chat template is as belows:
|
| 160 |
+
|
| 161 |
+
```bash
|
| 162 |
+
<|begin▁of▁sentence|>User: {user_message_1}
|
| 163 |
+
|
| 164 |
+
Assistant: {assistant_message_1}<|end▁of▁sentence|>User: {user_message_2}
|
| 165 |
+
|
| 166 |
+
Assistant:
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
You can also add an optional system message:
|
| 170 |
+
|
| 171 |
+
```bash
|
| 172 |
+
<|begin▁of▁sentence|>{system_message}
|
| 173 |
+
|
| 174 |
+
User: {user_message_1}
|
| 175 |
+
|
| 176 |
+
Assistant: {assistant_message_1}<|end▁of▁sentence|>User: {user_message_2}
|
| 177 |
+
|
| 178 |
+
Assistant:
|
| 179 |
+
```
|
| 180 |
+
|
| 181 |
+
### Inference with vLLM (recommended)
|
| 182 |
+
To utilize [vLLM](https://github.com/vllm-project/vllm) for model inference, please merge this Pull Request into your vLLM codebase: https://github.com/vllm-project/vllm/pull/4650.
|
| 183 |
+
|
| 184 |
+
```python
|
| 185 |
+
from transformers import AutoTokenizer
|
| 186 |
+
from vllm import LLM, SamplingParams
|
| 187 |
+
|
| 188 |
+
max_model_len, tp_size = 8192, 1
|
| 189 |
+
model_name = ""deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct""
|
| 190 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 191 |
+
llm = LLM(model=model_name, tensor_parallel_size=tp_size, max_model_len=max_model_len, trust_remote_code=True, enforce_eager=True)
|
| 192 |
+
sampling_params = SamplingParams(temperature=0.3, max_tokens=256, stop_token_ids=[tokenizer.eos_token_id])
|
| 193 |
+
|
| 194 |
+
messages_list = [
|
| 195 |
+
[{""role"": ""user"", ""content"": ""Who are you?""}],
|
| 196 |
+
[{""role"": ""user"", ""content"": ""write a quick sort algorithm in python.""}],
|
| 197 |
+
[{""role"": ""user"", ""content"": ""Write a piece of quicksort code in C++.""}],
|
| 198 |
+
]
|
| 199 |
+
|
| 200 |
+
prompt_token_ids = [tokenizer.apply_chat_template(messages, add_generation_prompt=True) for messages in messages_list]
|
| 201 |
+
|
| 202 |
+
outputs = llm.generate(prompt_token_ids=prompt_token_ids, sampling_params=sampling_params)
|
| 203 |
+
|
| 204 |
+
generated_text = [output.outputs[0].text for output in outputs]
|
| 205 |
+
print(generated_text)
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
## 6. License
|
| 211 |
+
|
| 212 |
+
This code repository is licensed under [the MIT License](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/LICENSE-CODE). The use of DeepSeek-Coder-V2 Base/Instruct models is subject to [the Model License](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/LICENSE-MODEL). DeepSeek-Coder-V2 series (including Base and Instruct) supports commercial use.
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
## 7. Contact
|
| 216 |
+
If you have any questions, please raise an issue or contact us at [service@deepseek.com](service@deepseek.com).
|
| 217 |
+
","{""id"": ""deepseek-ai/DeepSeek-Coder-V2-Instruct"", ""author"": ""deepseek-ai"", ""sha"": ""2453c79a2a0947968a054947b53daa598cb3be52"", ""last_modified"": ""2024-08-21 06:42:50+00:00"", ""created_at"": ""2024-06-14 03:46:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 19262, ""downloads_all_time"": null, ""likes"": 615, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""deepseek_v2"", ""text-generation"", ""conversational"", ""custom_code"", ""arxiv:2401.06066"", ""base_model:deepseek-ai/DeepSeek-Coder-V2-Base"", ""base_model:finetune:deepseek-ai/DeepSeek-Coder-V2-Base"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: deepseek-ai/DeepSeek-Coder-V2-Base\nlicense: other\nlicense_name: deepseek-license\nlicense_link: LICENSE"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""DeepseekV2ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_deepseek.DeepseekV2Config"", ""AutoModel"": ""modeling_deepseek.DeepseekV2Model"", ""AutoModelForCausalLM"": ""modeling_deepseek.DeepseekV2ForCausalLM""}, ""model_type"": ""deepseek_v2"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": null, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_deepseek.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00047-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00048-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00049-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00050-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00051-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00052-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00053-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00054-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00055-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_deepseek.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_deepseek_fast.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""cstr/PDF-Summarizer"", ""aliceblue11/logo_o1-preview"", ""nbroad/HF-API-monitor"", ""ruslanmv/convert_to_gguf"", ""hamxa500/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""joshuaXX/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""evelyn-lo/evelyn"", ""Sunrusojsis/QuizBot"", ""abhillubillu/gameapp"", ""itsjakeo/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""zjasper666/bf16_vs_fp8"", ""HuggingFaceSupport/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""martinakaduc/melt"", ""testcs/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""SUHHHH/openLLMchatbot"", ""SG34/openLLMchatbot"", ""aliceblue11/openLLMchatbot111"", ""aliceblue11/openLLMchatbot222"", ""aliceblue11/LLMpromt111"", ""SUHHHH/LLMpromt"", ""SG34/LLMpromt"", ""SUHHHH/USEB-COPY"", ""aliceblue11/LLMpromt222"", ""SUHHHH/LLMpromt-test"", ""superrich001/LLMpromt"", ""aliceblue11/LLMpromt333"", ""basantakhakurel/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""SternLee04/trialSpace"", ""SternLee04/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""clay990/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""Brano892H9/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""whoamiii/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""cartr117/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""cnyt947/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""mendoanjoe/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""tpdph/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""IdenGhost/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""baranbayden/coderv2"", ""alanerkenov317/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""msun415/Llamole"", ""Examon1/Examon4u"", ""K00B404/LLM_Quantization"", ""YoungRiggs/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""R-Shitei/theme-test"", ""memex-in/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""KuberMehta/PolyThink-Alpha"", ""rayaaas/deepseek-ai-DeepSeek-Coder-V2-Instruct"", ""davidizzle/LIA_CodiceDaVinci"", ""sam522/Coding_bot""], ""safetensors"": {""parameters"": {""BF16"": 235741434880}, ""total"": 235741434880}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-21 06:42:50+00:00"", ""cardData"": ""base_model: deepseek-ai/DeepSeek-Coder-V2-Base\nlicense: other\nlicense_name: deepseek-license\nlicense_link: LICENSE"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""666bbd0e0f82f95f338669c7"", ""modelId"": ""deepseek-ai/DeepSeek-Coder-V2-Instruct"", ""usedStorage"": 471486512925}",0,https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF,1,,0,"https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF, https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-i1-GGUF, https://huggingface.co/bartowski/DeepSeek-Coder-V2-Instruct-GGUF, https://huggingface.co/qwp4w3hyb/DeepSeek-Coder-V2-Instruct-iMat-GGUF, https://huggingface.co/nisten/deepseek-coder-v2-inst-cpu-optimized-gguf, https://huggingface.co/second-state/DeepSeek-Coder-V2-Instruct-GGUF, https://huggingface.co/gaianet/DeepSeek-Coder-V2-Instruct-GGUF, https://huggingface.co/GGorman/DeepSeek-Coder-V2-Instruct-Q4-mlx",8,,0,"FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, SG34/LLMpromt, YoungRiggs/deepseek-ai-DeepSeek-Coder-V2-Instruct, abhillubillu/gameapp, aliceblue11/LLMpromt111, aliceblue11/logo_o1-preview, bhaskartripathi/LLM_Quantization, cstr/PDF-Summarizer, evelyn-lo/evelyn, huggingface/InferenceSupport/discussions/914, martinakaduc/melt",13
|
| 218 |
+
mradermacher/DeepSeek-Coder-V2-Instruct-GGUF,"---
|
| 219 |
+
base_model: deepseek-ai/DeepSeek-Coder-V2-Instruct
|
| 220 |
+
language:
|
| 221 |
+
- en
|
| 222 |
+
library_name: transformers
|
| 223 |
+
license: other
|
| 224 |
+
license_link: LICENSE
|
| 225 |
+
license_name: deepseek-license
|
| 226 |
+
quantized_by: mradermacher
|
| 227 |
+
---
|
| 228 |
+
## About
|
| 229 |
+
|
| 230 |
+
<!-- ### quantize_version: 2 -->
|
| 231 |
+
<!-- ### output_tensor_quantised: 1 -->
|
| 232 |
+
<!-- ### convert_type: hf -->
|
| 233 |
+
<!-- ### vocab_type: -->
|
| 234 |
+
<!-- ### tags: -->
|
| 235 |
+
static quants of https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct
|
| 236 |
+
|
| 237 |
+
<!-- provided-files -->
|
| 238 |
+
weighted/imatrix quants are available at https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-i1-GGUF
|
| 239 |
+
## Usage
|
| 240 |
+
|
| 241 |
+
If you are unsure how to use GGUF files, refer to one of [TheBloke's
|
| 242 |
+
READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for
|
| 243 |
+
more details, including on how to concatenate multi-part files.
|
| 244 |
+
|
| 245 |
+
## Provided Quants
|
| 246 |
+
|
| 247 |
+
(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants)
|
| 248 |
+
|
| 249 |
+
| Link | Type | Size/GB | Notes |
|
| 250 |
+
|:-----|:-----|--------:|:------|
|
| 251 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q2_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q2_K.gguf.part2of2) | Q2_K | 86.0 | |
|
| 252 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ3_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ3_XS.gguf.part2of2) | IQ3_XS | 96.4 | |
|
| 253 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ3_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ3_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ3_S.gguf.part3of3) | IQ3_S | 101.8 | beats Q3_K* |
|
| 254 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_S.gguf.part3of3) | Q3_K_S | 101.8 | |
|
| 255 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ3_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ3_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ3_M.gguf.part3of3) | IQ3_M | 103.5 | |
|
| 256 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_M.gguf.part3of3) | Q3_K_M | 112.8 | lower quality |
|
| 257 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_L.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_L.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q3_K_L.gguf.part3of3) | Q3_K_L | 122.5 | |
|
| 258 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ4_XS.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ4_XS.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.IQ4_XS.gguf.part3of3) | IQ4_XS | 126.9 | |
|
| 259 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q4_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q4_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q4_K_S.gguf.part3of3) | Q4_K_S | 134.0 | fast, recommended |
|
| 260 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q4_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q4_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q4_K_M.gguf.part3of3) | Q4_K_M | 142.6 | fast, recommended |
|
| 261 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q5_K_S.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q5_K_S.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q5_K_S.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q5_K_S.gguf.part4of4) | Q5_K_S | 162.4 | |
|
| 262 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q5_K_M.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q5_K_M.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q5_K_M.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q5_K_M.gguf.part4of4) | Q5_K_M | 167.3 | |
|
| 263 |
+
| [PART 1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q6_K.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q6_K.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q6_K.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q6_K.gguf.part4of4) | Q6_K | 193.6 | very good quality |
|
| 264 |
+
| [P1](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part1of6) [P2](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part2of6) [P3](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part3of6) [P4](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part4of6) [P5](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part5of6) [P6](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part6of6) | Q8_0 | 250.7 | fast, best quality |
|
| 265 |
+
|
| 266 |
+
Here is a handy graph by ikawrakow comparing some lower-quality quant
|
| 267 |
+
types (lower is better):
|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
|
| 271 |
+
And here are Artefact2's thoughts on the matter:
|
| 272 |
+
https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9
|
| 273 |
+
|
| 274 |
+
## FAQ / Model Request
|
| 275 |
+
|
| 276 |
+
See https://huggingface.co/mradermacher/model_requests for some answers to
|
| 277 |
+
questions you might have and/or if you want some other model quantized.
|
| 278 |
+
|
| 279 |
+
## Thanks
|
| 280 |
+
|
| 281 |
+
I thank my company, [nethype GmbH](https://www.nethype.de/), for letting
|
| 282 |
+
me use its servers and providing upgrades to my workstation to enable
|
| 283 |
+
this work in my free time.
|
| 284 |
+
|
| 285 |
+
<!-- end -->
|
| 286 |
+
","{""id"": ""mradermacher/DeepSeek-Coder-V2-Instruct-GGUF"", ""author"": ""mradermacher"", ""sha"": ""f0f4de82f9fd727e6cb113ad6c04988bcfec00a7"", ""last_modified"": ""2024-06-21 11:53:13+00:00"", ""created_at"": ""2024-06-18 10:53:51+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 7, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""en"", ""base_model:deepseek-ai/DeepSeek-Coder-V2-Instruct"", ""base_model:finetune:deepseek-ai/DeepSeek-Coder-V2-Instruct"", ""license:other"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: deepseek-ai/DeepSeek-Coder-V2-Instruct\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: deepseek-license\nlicense_link: LICENSE\nquantized_by: mradermacher"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ3_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ3_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ3_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ3_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ3_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ3_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ3_XS.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ3_XS.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ4_XS.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ4_XS.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.IQ4_XS.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q2_K.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q2_K.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_L.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_L.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_L.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q3_K_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q4_K_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q4_K_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q4_K_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q4_K_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q4_K_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q4_K_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q5_K_M.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q5_K_M.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q5_K_M.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q5_K_M.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q5_K_S.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q5_K_S.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q5_K_S.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q5_K_S.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q6_K.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q6_K.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q6_K.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q6_K.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part1of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part2of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part3of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part4of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part5of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-Coder-V2-Instruct.Q8_0.gguf.part6of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-21 11:53:13+00:00"", ""cardData"": ""base_model: deepseek-ai/DeepSeek-Coder-V2-Instruct\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: deepseek-license\nlicense_link: LICENSE\nquantized_by: mradermacher"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6671673fce36cc6c625a8af4"", ""modelId"": ""mradermacher/DeepSeek-Coder-V2-Instruct-GGUF"", ""usedStorage"": 1900813255872}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mradermacher/DeepSeek-Coder-V2-Instruct-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmradermacher%2FDeepSeek-Coder-V2-Instruct-GGUF%5D(%2Fmradermacher%2FDeepSeek-Coder-V2-Instruct-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
DucHaitenAIart_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
DucHaiten/DucHaitenAIart,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
tags:
|
| 6 |
+
- stable-diffusion
|
| 7 |
+
- text-to-image
|
| 8 |
+
- image-to-image
|
| 9 |
+
- diffusers
|
| 10 |
+
license: creativeml-openrail-m
|
| 11 |
+
inference: true
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
**Big update DucHaitenAIart_v3.1**
|
| 15 |
+
|
| 16 |
+
*Big update of DucHaitenAIart, v3.1 is able to receive more diverse, more detailed prompts with gorgeous colors and more realistic shadows. The image has the breath of 3D anime, but the material is much more realistic. The weak point is that some celebrity images are no longer in the model, a bit too 3d anime might make some people dislike, the image of the teeth is a bit lacking in detail.
|
| 17 |
+
|
| 18 |
+
**Please support me by becoming a patron:**
|
| 19 |
+
|
| 20 |
+
https://www.patreon.com/duchaitenreal
|
| 21 |
+
|
| 22 |
+
*****
|
| 23 |
+
|
| 24 |
+
All sample images only use text to image, no editing, no image to image, no restore face no highres fix no extras.
|
| 25 |
+
|
| 26 |
+
*****
|
| 27 |
+
|
| 28 |
+
Hello, sorry for my lousy english.
|
| 29 |
+
|
| 30 |
+
After days of trying and retrying hundreds of times, with dozens of different versions, DucHaitenAIart finally released the official version.
|
| 31 |
+
|
| 32 |
+
Improved image sharpness, more realistic lighting correction, more shooting angles, the only downside is that it's less flexible and less random than beta-v6.0, so I'm still will leave beta-v6.0 for anyone to download.
|
| 33 |
+
|
| 34 |
+
This model can create NSFW images but since it is not a hentai and porn model, anything really hardcore will be difficult to create. But, To make the model work better with NSFW images, add “hentai, porn, rule 34” to the prompt
|
| 35 |
+
|
| 36 |
+
Always add to the prompt “masterpiece, best quality, 1girl or 1boy, realistic, anime or cartoon (it's two different styles, but I personally prefer anime), 3D, pixar, (add “pin-up”) ” if you are going to give your character a sexy pose), highly detail eyes, perfect eyes, both eyes are the same, (if you don't want to draw eyes, don't add them), smooth, perfect face, hd, 2k, 4k , 8k, 16k
|
| 37 |
+
|
| 38 |
+
Add to the prompt: “extremely detailed 8K, high resolution, ultra quality” to further enhance the image quality, but it may weaken the AI's interest in other keywords.
|
| 39 |
+
|
| 40 |
+
You can add “glare, Iridescent, Global illumination, real hair movement, realistic light, realistic shadow” to the prompt to create a better lighting effect, but the image will then become too realistic, if you don't want to. Please adjust it accordingly.
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
*****
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
Sampler: DPM++ 2S a Karras
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
+ negative prompt:
|
| 50 |
+
illustration, painting, cartoons, sketch, (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, ((monochrome)), ((grayscale)), collapsed eyeshadow, multiple eyeblows, vaginas in breasts, (cropped), oversaturated, extra limb, missing limbs, deformed hands, long neck, long body, imperfect, (bad hands), signature, watermark, username, artist name, conjoined fingers, deformed fingers, ugly eyes, imperfect eyes, skewed eyes, unnatural face, unnatural body, error
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
*****
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
Some test:
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+

|
| 60 |
+

|
| 61 |
+

|
| 62 |
+

|
| 63 |
+

|
| 64 |
+

|
| 65 |
+

|
| 66 |
+

|
| 67 |
+

|
| 68 |
+

|
| 69 |
+

|
| 70 |
+
","{""id"": ""DucHaiten/DucHaitenAIart"", ""author"": ""DucHaiten"", ""sha"": ""6610784a2ca1a19fc06479ce13853e4a7a3f2687"", ""last_modified"": ""2024-05-17 16:50:42+00:00"", ""created_at"": ""2022-12-28 10:37:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1148, ""downloads_all_time"": null, ""likes"": 179, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""stable-diffusion"", ""text-to-image"", ""image-to-image"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- image-to-image\n- diffusers\ninference: true"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='AIart_v2.0-fp16-no-ema.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaiten-AIart-SDXL_v3.3.5.1.5.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaitenAIart-v4.5.3.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaitenAIart_v1.1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaitenAIart_v1.1_emaonly.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaitenAIart_v1.1a.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaitenAIart_v1.2.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaitenAIart_v2.0.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaitenAIart_v2.0.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DucHaitenAIart_v3.1.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Style-cartoon-kid-0.5.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='beta-v6.0.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""DucHaiten/webui"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""Yntec/DucHaiten-Webui-CPU"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""Bga/EDITSpacIO_v1_EXPAND"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""John6666/DucHaiten-Webui-CPU-Model-Merge-v2-test"", ""mindtube/maximum_multiplier_places"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""Bga/Diffusion80XXSHANTA"", ""musei/Diffusion80XXSHANTAPubl"", ""jordonpeter01/Diffusion70"", ""gsrathoreniks/web_ui"", ""ennov8ion/Landscapes-models"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""vih-v/x_mod"", ""soiz/DucHaiten-Webui-CPU2"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""safWayne/DucHaiten-DucHaitenAIart"", ""lekerdel/DucHaiten-DucHaitenAIar"", ""JDlion25/DucHaiten-DucHaitenAIart"", ""noykd/DucHaiten-DucHaitenAIart"", ""cloixai/DucHaitenAI"", ""Verien/DucHaiten-DucHaitenAIart"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models"", ""SMOOTHY1962/DucHaiten-DucHaitenAIart"", ""Yntec/top_100_diffusion"", ""AIlexDev/Diffusion60XX"", ""flatindo/all-models"", ""flatindo/all-models-v1"", ""flatindo/img_All_models"", ""johann22/chat-diffusion-describe"", ""wideprism/Ultimate-Model-Collection"", ""GAIneZis/FantasyArt-Models"", ""TheMaisk/Einfach.ImageAI"", ""vkatis/models_x6"", ""ennov8ion/picasso-diffusion"", ""vih-v/models_x"", ""K00B404/stablediffusion-portal"", ""ennov8ion/anime-new-models"", ""ennov8ion/anime-multi-new-models"", ""ennov8ion/photo-multi""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-17 16:50:42+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- image-to-image\n- diffusers\ninference: true"", ""transformersInfo"": null, ""_id"": ""63ac1c819e9b228c3095a676"", ""modelId"": ""DucHaiten/DucHaitenAIart"", ""usedStorage"": 239962562861}",0,,0,,0,,0,,0,"Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/ToyWorld4, John6666/hfd_test_nostopbutton, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=DucHaiten/DucHaitenAIart&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BDucHaiten%2FDucHaitenAIart%5D(%2FDucHaiten%2FDucHaitenAIart)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",13
|
EXAONE-Deep-32B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
FalconLite_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
amazon/FalconLite,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
inference: false
|
| 5 |
+
---
|
| 6 |
+
# FalconLite Model
|
| 7 |
+
|
| 8 |
+
FalconLite is a quantized version of the [Falcon 40B SFT OASST-TOP1 model](https://huggingface.co/OpenAssistant/falcon-40b-sft-top1-560), capable of processing long (i.e. 11K tokens) input sequences while consuming 4x less GPU memory. By utilizing 4-bit [GPTQ quantization](https://github.com/PanQiWei/AutoGPTQ) and adapted [dynamic NTK](https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/) RotaryEmbedding, FalconLite achieves a balance between latency, accuracy, and memory efficiency. With the ability to process 5x longer contexts than the original model, FalconLite is useful for applications such as topic retrieval, summarization, and question-answering. FalconLite can be deployed on a single AWS `g5.12x` instance with [TGI 0.9.2](https://github.com/huggingface/text-generation-inference/tree/v0.9.2), making it suitable for applications that require high performance in resource-constrained environments.
|
| 9 |
+
|
| 10 |
+
## *New!* FalconLite2 Model ##
|
| 11 |
+
To keep up with the updated model FalconLite2, please refer to [FalconLite2](https://huggingface.co/amazon/FalconLite2).
|
| 12 |
+
|
| 13 |
+
## Model Details
|
| 14 |
+
|
| 15 |
+
- **Developed by:** [AWS Contributors](https://github.com/orgs/aws-samples/teams/aws-prototype-ml-apac)
|
| 16 |
+
- **Model type:** [Falcon 40B](https://huggingface.co/tiiuae/falcon-40b)
|
| 17 |
+
- **Language:** English
|
| 18 |
+
- **Quantized from weights:** [Falcon 40B SFT OASST-TOP1 model](https://huggingface.co/OpenAssistant/falcon-40b-sft-top1-560)
|
| 19 |
+
- **Modified from layers:** [Text-Generation-Inference 0.9.2](https://github.com/huggingface/text-generation-inference/tree/v0.9.2)
|
| 20 |
+
- **License:** Apache 2.0
|
| 21 |
+
- **Contact:** [GitHub issues](https://github.com/awslabs/extending-the-context-length-of-open-source-llms/issues)
|
| 22 |
+
- **Blogpost:** [Extend the context length of Falcon40B to 10k](https://medium.com/@chenwuperth/extend-the-context-length-of-falcon40b-to-10k-85d81d32146f)
|
| 23 |
+
|
| 24 |
+
## Deploy FalconLite ##
|
| 25 |
+
SSH login to an AWS `g5.12x` instance with the [Deep Learning AMI](https://aws.amazon.com/releasenotes/aws-deep-learning-ami-gpu-pytorch-2-0-ubuntu-20-04/).
|
| 26 |
+
|
| 27 |
+
### Start LLM server
|
| 28 |
+
```bash
|
| 29 |
+
git clone https://github.com/awslabs/extending-the-context-length-of-open-source-llms.git falconlite-dev
|
| 30 |
+
cd falconlite-dev/script
|
| 31 |
+
./docker_build.sh
|
| 32 |
+
./start_falconlite.sh
|
| 33 |
+
```
|
| 34 |
+
### Perform inference
|
| 35 |
+
```bash
|
| 36 |
+
# after FalconLite has been completely started
|
| 37 |
+
pip install -r requirements-client.txt
|
| 38 |
+
python falconlite_client.py
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
### *New!* Amazon SageMaker Deployment ###
|
| 42 |
+
To deploy FalconLite on SageMaker endpoint, please follow [this notebook](https://github.com/awslabs/extending-the-context-length-of-open-source-llms/blob/main/custom-tgi-ecr/deploy.ipynb).
|
| 43 |
+
|
| 44 |
+
**Important** - When using FalconLite for inference for the first time, it may require a brief 'warm-up' period that can take 10s of seconds. However, subsequent inferences should be faster and return results in a more timely manner. This warm-up period is normal and should not affect the overall performance of the system once the initialisation period has been completed.
|
| 45 |
+
|
| 46 |
+
## Evalution Result ##
|
| 47 |
+
We evaluated FalconLite against benchmarks that are specifically designed to assess the capabilities of LLMs in handling longer contexts. All evaluations were conducted without fine-tuning the model.
|
| 48 |
+
|
| 49 |
+
### Accuracy ###
|
| 50 |
+
|Eval task|Input length| Input length | Input length| Input length|
|
| 51 |
+
|----------|-------------|-------------|------------|-----------|
|
| 52 |
+
| | 2800 ~ 3800| 5500 ~ 5600 |7500 ~ 8300 | 9300 ~ 11000 |
|
| 53 |
+
| [Topic Retrieval](https://lmsys.org/blog/2023-06-29-longchat/) | 100% | 100% | 92% | 92% |
|
| 54 |
+
| [Line Retrieval](https://lmsys.org/blog/2023-06-29-longchat/#longeval-results) | 38% | 12% | 8% | 4% |
|
| 55 |
+
| [Pass key Retrieval](https://github.com/epfml/landmark-attention/blob/main/llama/run_test.py#L101) | 100% | 100% | 100% | 100% |
|
| 56 |
+
|
| 57 |
+
|Eval task| Test set Accuracy | Hard subset Accuracy|
|
| 58 |
+
|----------|-------------|-------------|
|
| 59 |
+
| [Question Answering with Long Input Texts](https://nyu-mll.github.io/quality/) | 46.9% | 40.8% |
|
| 60 |
+
|
| 61 |
+
### Performance ###
|
| 62 |
+
**metrics** = the average number of generated tokens per second (TPS) =
|
| 63 |
+
|
| 64 |
+
`nb-generated-tokens` / `end-to-end-response-time`
|
| 65 |
+
|
| 66 |
+
The `end-to-end-response-time` = when the last token is generated - when the inference request is received
|
| 67 |
+
|
| 68 |
+
|Instance| Input length | Input length| Input length|Input length|
|
| 69 |
+
|----------|-------------|-------------|------------|------------|
|
| 70 |
+
| | 20 | 3300 | 5500 |10000 |
|
| 71 |
+
| g5.48x | 22 tps | 12 tps | 12 tps | 12 tps |
|
| 72 |
+
| g5.12x | 18 tps | 11 tps | 11 tps | 10 tps |
|
| 73 |
+
|
| 74 |
+
## Limitations ##
|
| 75 |
+
* Our evaluation shows that FalconLite's capability in `Line Retrieval` is limited, and requires further effort.
|
| 76 |
+
* While `g5.12x` is sufficient for FalconLite to handle 10K long contexts, a larger instance with more memory capcacity such as `g5.48x` is recommended for sustained, heavy workloads.
|
| 77 |
+
* Before using the FalconLite model, it is important to perform your own independent assessment, and take measures to ensure that your use would comply with your own specific quality control practices and standards, and that your use would comply with the local rules, laws, regulations, licenses and terms that apply to you, and your content.","{""id"": ""amazon/FalconLite"", ""author"": ""amazon"", ""sha"": ""8bb62932f0ab8902341e9d1a579d3f2a7f4a2778"", ""last_modified"": ""2023-11-17 11:00:22+00:00"", ""created_at"": ""2023-08-01 14:18:59+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 175, ""downloads_all_time"": null, ""likes"": 169, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""RefinedWeb"", ""text-generation"", ""custom_code"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""RWForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_RW.RWConfig"", ""AutoModel"": ""modelling_RW.RWModel"", ""AutoModelForCausalLM"": ""modelling_RW.RWForCausalLM"", ""AutoModelForQuestionAnswering"": ""modelling_RW.RWForQuestionAnswering"", ""AutoModelForSequenceClassification"": ""modelling_RW.RWForSequenceClassification"", ""AutoModelForTokenClassification"": ""modelling_RW.RWForTokenClassification""}, ""model_type"": ""RefinedWeb"", ""tokenizer_config"": {""eos_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modelling_RW.RWForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_RW.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='gptq_model-4bit-128g.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modelling_RW.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantize_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-17 11:00:22+00:00"", ""cardData"": ""license: apache-2.0\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modelling_RW.RWForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""64c9145329d2f65419dbf2cc"", ""modelId"": ""amazon/FalconLite"", ""usedStorage"": 22271097816}",0,,0,,0,https://huggingface.co/PrunaAI/amazon-FalconLite-GGUF-smashed,1,,0,huggingface/InferenceSupport/discussions/new?title=amazon/FalconLite&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bamazon%2FFalconLite%5D(%2Famazon%2FFalconLite)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Flux-uncensored_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
enhanceaiteam/Flux-uncensored,"---
|
| 3 |
+
tags:
|
| 4 |
+
- text-to-image
|
| 5 |
+
- stable-diffusion
|
| 6 |
+
- lora
|
| 7 |
+
- diffusers
|
| 8 |
+
- fluxpipeline
|
| 9 |
+
- flux
|
| 10 |
+
- not-for-all-audiences
|
| 11 |
+
base_model: black-forest-labs/FLUX.1-dev
|
| 12 |
+
license: creativeml-openrail-m
|
| 13 |
+
pipeline_tag: text-to-image
|
| 14 |
+
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
# FLUX Uncensored LoRA
|
| 18 |
+
|
| 19 |
+
<div align=""center"">
|
| 20 |
+
<img src=""banner.webp"" alt=""Banner Logo"" width=""800""/>
|
| 21 |
+
</div>
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
## Model Description
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
created by https://enhanceai.art
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
support discord server - https://discord.gg/cuCX9qur6f
|
| 31 |
+
|
| 32 |
+
The **FLUX Uncensored LoRA** is an enhancement designed for the base model `black-forest-labs/FLUX.1-dev`. It enables explicit, unrestricted generation of images using text prompts. The LoRA weights have been fine-tuned to remove the base model's content restrictions, allowing for the generation of NSFW (Not Safe For Work) and other uncensored content.
|
| 33 |
+
|
| 34 |
+
This LoRA extension can be loaded into the `FLUX.1-dev` pipeline using the `diffusers` library. It is optimized for high-quality, explicit image generation based on user-provided prompts. The model is intended for research and personal use, and adheres to the non-commercial license terms.
|
| 35 |
+
|
| 36 |
+
> **Warning:** This model allows the generation of explicit content. Users should exercise caution and adhere to legal and ethical guidelines.
|
| 37 |
+
|
| 38 |
+
# Donate & Support
|
| 39 |
+
|
| 40 |
+
## Why Support Us?
|
| 41 |
+
|
| 42 |
+
At **EnhanceAI**, we build powerful AI tools and models for creators and developers. Your support helps us continue innovating and improving the platform.
|
| 43 |
+
|
| 44 |
+
## How Your Donation Helps
|
| 45 |
+
|
| 46 |
+
- Enhance our AI tools and models.
|
| 47 |
+
- Keep the platform running smoothly.
|
| 48 |
+
- Provide you with new features and updates.
|
| 49 |
+
|
| 50 |
+
## Benefits of Donating:
|
| 51 |
+
|
| 52 |
+
- Exclusive access to premium tools.
|
| 53 |
+
- Early access to updates.
|
| 54 |
+
- Priority support.
|
| 55 |
+
|
| 56 |
+
[Donate Now](https://enhanceai.art/pricing)
|
| 57 |
+
|
| 58 |
+
Thank you for helping us grow and continue making AI accessible to all!
|
| 59 |
+
|
| 60 |
+
## License
|
| 61 |
+
|
| 62 |
+
This LoRA extension follows the **FLUX-1-dev Non-Commercial License**.
|
| 63 |
+
|
| 64 |
+
- **License Name:** flux-1-dev-non-commercial-license
|
| 65 |
+
- **License Link:** [https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)
|
| 66 |
+
|
| 67 |
+
## How to Use
|
| 68 |
+
|
| 69 |
+
Below is an example of how to use the FLUX Uncensored LoRA with the `diffusers` library:
|
| 70 |
+
|
| 71 |
+
```python
|
| 72 |
+
from diffusers import AutoPipelineForText2Image
|
| 73 |
+
import torch
|
| 74 |
+
|
| 75 |
+
# Load the base model
|
| 76 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(""black-forest-labs/FLUX.1-dev"", torch_dtype=torch.bfloat16).to('cuda')
|
| 77 |
+
|
| 78 |
+
# Load the uncensored LoRA weights
|
| 79 |
+
pipeline.load_lora_weights('enhanceaiteam/Flux-uncensored', weight_name='lora.safetensors')
|
| 80 |
+
|
| 81 |
+
# Generate an image with an uncensored NSFW prompt
|
| 82 |
+
image = pipeline('a naked cute girl').images[0]
|
| 83 |
+
image.show()
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
# Check out more AI tools and models at https://enhanceai.art
|
| 87 |
+
print(""Visit https://enhanceai.art for more AI tools and image generation models!"")
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
## Trigger Words
|
| 91 |
+
|
| 92 |
+
Use the following trigger words to guide the model toward generating NSFW content:
|
| 93 |
+
|
| 94 |
+
- **nsfw**
|
| 95 |
+
- **naked**
|
| 96 |
+
- **pron**
|
| 97 |
+
- **kissing**
|
| 98 |
+
- **erotic**
|
| 99 |
+
- **nude**
|
| 100 |
+
- **sensual**
|
| 101 |
+
- **adult content**
|
| 102 |
+
- **explicit**
|
| 103 |
+
|
| 104 |
+
These keywords, along with descriptive prompts, help the model generate explicit imagery.
|
| 105 |
+
|
| 106 |
+
## Model Details
|
| 107 |
+
|
| 108 |
+
- **Base Model:** `black-forest-labs/FLUX.1-dev`
|
| 109 |
+
- **LoRA Weights:** `enhanceaiteam/Flux-uncensored`
|
| 110 |
+
- **LoRA Weight File:** `lora.safetensors`
|
| 111 |
+
- **Torch Data Type:** `torch.bfloat16`
|
| 112 |
+
- **Hardware Requirement:** CUDA-enabled GPU recommended for optimal performance.
|
| 113 |
+
|
| 114 |
+
## Disclaimer
|
| 115 |
+
|
| 116 |
+
This model is capable of generating uncensored and explicit content. It should be used responsibly and within the bounds of the law. The creators do not endorse illegal or unethical use of the model. Content generated using this model should comply with platform guidelines and local regulations regarding NSFW material.","{""id"": ""enhanceaiteam/Flux-uncensored"", ""author"": ""enhanceaiteam"", ""sha"": ""cddf8e566740be3b3a453afffbc1f62652bffa31"", ""last_modified"": ""2024-10-21 14:40:07+00:00"", ""created_at"": ""2024-09-03 04:53:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2104, ""downloads_all_time"": null, ""likes"": 369, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""stable-diffusion"", ""lora"", ""fluxpipeline"", ""flux"", ""not-for-all-audiences"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:adapter:black-forest-labs/FLUX.1-dev"", ""license:creativeml-openrail-m"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: black-forest-labs/FLUX.1-dev\nlicense: creativeml-openrail-m\npipeline_tag: text-to-image\ntags:\n- text-to-image\n- stable-diffusion\n- lora\n- diffusers\n- fluxpipeline\n- flux\n- not-for-all-audiences"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='banner.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='lora.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Nymbo/Serverless-ImgGen-Hub"", ""Nymbo/Compare-6"", ""madhawavish/enhanceaiteam-Flux-uncensored"", ""fantaxy/playground25"", ""nevproject/Flux-uncensored"", ""Novaciano/Flux_Lustly_AI_Uncensored_NSFW_V1"", ""runebloodstone/enhanceaiteam-Flux-uncensored"", ""huanhoang/flux2"", ""Deddy/FLUX-Wallpaper-HD-Maker"", ""Amphon072/enhanceaiteam-Flux-uncensored"", ""PeepDaSlan9/HYDRAS_flux2"", ""colbyford/flux2"", ""NativeAngels/Compare-6"", ""SamOdinson/enhanceaiteam-Flux-uncensored"", ""Nerox6x/enhanceaiteam-Flux-uncensored"", ""taufiqdp/FLUX"", ""K00B404/CleanFLUX.1-Schnell-Serverless"", ""huan2hoang3/flux2"", ""HuggingFaceSupport/enhanceaiteam-Flux-uncensored"", ""marcolino1980/enhanceaiteam-Flux-uncensored"", ""viknesh100/Flux-image-gen"", ""K00B404/Flux-uncensored-Custom"", ""RufioSwashbuckle/enhanceaiteam-Flux-uncensored"", ""jizzz/Flux-uncensored"", ""NerdyToast8/enhanceaiteam-Flux-uncensored"", ""superman112332/enhanceaiteam-Flux-uncensored32"", ""Deninski1/enhanceaiteam-Flux-uncensored"", ""K00B404/Funcensored"", ""Alibrown/Flux-uncensored-dev"", ""NativeAngels/Serverless-ImgGen-Hub"", ""theunseenones94/Flux_Lustly_AI_Uncensored_NSFW_V1"", ""Kanokpun/enhanceaiteam-Flux-uncensored"", ""rizoa/flux3"", ""Gh6st66/enhanceaiteam-Flux-uncensored"", ""Parmist/strangerzonehf-Flux-Super-Realism-LoRA"", ""K00B404/FLUX-Wallpaper-HD-Maker_p"", ""Nymbo/serverless-imggen-test"", ""huanhoang/enhanceaiteam-Flux-uncensored"", ""Mary-4/enhanceaiteam-Flux-uncensored"", ""huggingfacereusableemail/enhanceaiteam-Flux-uncensored"", ""Latads/enhanceaiteam-Flux-uncensored"", ""DAPANTELIS/enhanceaiteam-Flux-uncensored"", ""lou2191/enhanceaiteam-Flux-uncensored"", ""IvanAmador/enhanceaiteam-Flux-uncensored"", ""aexyb/SDXL"", ""Clocky123/enhanceaiteam-Flux-uncensored"", ""vatistasdimitris/enhanceaiteam-Flux-uncensored"", ""llamazade/enhanceaiteam-Flux-uncensored"", ""affgg/enhanceaiteam-Flux-uncensored"", ""us3rshell/enhanceaiteam-Flux-uncensored"", ""superman112332/Flux-uncensored"", ""fhsp93/uncensored-FLUX"", ""MartsoBodziu1994/enhanceaiteam-Flux-uncensored"", ""brodekh/enhanceaiteam-Flux-uncensored"", ""Dutch666/enhanceaiteam-Flux-uncensored"", ""Yash7004/enhanceaiteam-Flux-uncensored"", ""likeableartist/enhanceaiteam-Flux-uncensored"", ""mehyar500/enhanceaiteam-Flux-uncensored"", ""AppRich/enhanceaiteam-Flux-uncensored"", ""bigdawg314/enhanceaiteam-Flux-uncensored"", ""qAidleX/enhanceaiteam-Flux-uncensored"", ""Bornhald/enhanceaiteam-Flux-uncensored"", ""MaxxLopior/enhanceaiteam-Flux-uncensored"", ""sigilandclover/enhanceaiteam-Flux-uncensored"", ""Samz222/enhanceaiteam-Flux-uncensored"", ""jonywick718doe/enhanceaiteam-Flux-uncensorey46437"", ""hansmdll/enhanceaiteam-Flux-uncensored"", ""Yeahhsjqhqjw/enhanceaiteam-Flux-uncensored"", ""enthrete/enhanceaiteam-Flux-uncensored"", ""pubomaxsm/pubogr"", ""troyweber23/enhanceaiteam-Flux-uncensored"", ""Stockmarket/enhanceaiteam-Flux-uncensored"", ""ragul06/enhanceaiteam-Flux-uncensored"", ""Hyleys/enhanceaiteam-Flux-uncensored"", ""TheThanos/enhanceaiteam-Flux-uncensored"", ""zikazama/enhanceaiteam-Flux-uncensored"", ""HermesTres1998/enhanceaiteam-Flux-uncensored"", ""MalucoCalibrado/enhanceaiteam-Flux-uncensored"", ""RayTitanic/enhanceaiteam-Flux-uncensored"", ""mktjpn2024/enhanceaiteam-Flux-uncensored"", ""FNuni/enhanceaiteam-Flux-uncensored"", ""bgswangz/enhanceaiteam-Flux-uncensored"", ""SilencTeachYouHowToSing/enhanceaiteam-Flux-uncensored"", ""Akbartus/FluxSchnell"", ""zdvrer/enhanceaiteam-Flux-uncensored"", ""doctumdoces/enhanceaiteam-Flux-uncensored"", ""ananthusajeev/enhanceaiteam-Flux-uncensored"", ""fdsdfsqFFDS/enhanceaiteam-Flux-uncensored"", ""drewlarrowood/enhanceaiteam-Flux-uncensored"", ""Taffy1984/enhanceaiteam-Flux-uncensored"", ""vdrouot/enhanceaiteam-Flux-uncensored"", ""K00B404/flux_666"", ""illestnoize/Compare-6"", ""Alibrown/FluxUC-MotionSynt"", ""sshsiiaoaoaoa/enhanceaiteam-Flux-uncensored"", ""Bigbrain7/enhanceaiteam-Flux-uncensored"", ""Ever2025/enhanceaiteam-Flux-uncensored"", ""K00B404/3Luik"", ""Travispixley/enhanceaiteam-Flux-uncensored"", ""Travispixley/enhanceaiteam-Flux-uncensored2""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-21 14:40:07+00:00"", ""cardData"": ""base_model: black-forest-labs/FLUX.1-dev\nlicense: creativeml-openrail-m\npipeline_tag: text-to-image\ntags:\n- text-to-image\n- stable-diffusion\n- lora\n- diffusers\n- fluxpipeline\n- flux\n- not-for-all-audiences"", ""transformersInfo"": null, ""_id"": ""66d6964067e7960d053c4f62"", ""modelId"": ""enhanceaiteam/Flux-uncensored"", ""usedStorage"": 1374952176}",0,,0,,0,,0,,0,,0
|
Future-Diffusion_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
nitrosocke/Future-Diffusion,"---
|
| 3 |
+
license: openrail++
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- stable-diffusion
|
| 8 |
+
- text-to-image
|
| 9 |
+
- diffusers
|
| 10 |
+
thumbnail: ""https://huggingface.co/nitrosocke/Future-Diffusion/resolve/main/images/future-diffusion-thumbnail-2.jpg""
|
| 11 |
+
inference: false
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
### Future Diffusion
|
| 15 |
+
|
| 16 |
+
This is the fine-tuned Stable Diffusion 2.0 model trained on high quality 3D images with a futuristic Sci-Fi theme.
|
| 17 |
+
Use the tokens
|
| 18 |
+
`future style`
|
| 19 |
+
in your prompts for the effect.
|
| 20 |
+
Trained on Stability.ai's [Stable Diffusion 2.0 Base](https://huggingface.co/stabilityai/stable-diffusion-2-base) with 512x512 resolution.
|
| 21 |
+
|
| 22 |
+
**If you enjoy my work and want to test new models before release, please consider supporting me**
|
| 23 |
+
[](https://patreon.com/user?u=79196446)
|
| 24 |
+
|
| 25 |
+
**Disclaimer: The SD 2.0 model is just over 24h old at this point and we still need to figure out how it works exactly. Please view this as an early prototype and experiment with the model.**
|
| 26 |
+
|
| 27 |
+
**Characters rendered with the model:**
|
| 28 |
+

|
| 29 |
+
**Cars and Animals rendered with the model:**
|
| 30 |
+

|
| 31 |
+
**Landscapes rendered with the model:**
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+
#### Prompt and settings for the Characters:
|
| 35 |
+
**future style [subject] Negative Prompt: duplicate heads bad anatomy**
|
| 36 |
+
_Steps: 20, Sampler: Euler a, CFG scale: 7, Size: 512x704_
|
| 37 |
+
|
| 38 |
+
#### Prompt and settings for the Landscapes:
|
| 39 |
+
**future style city market street level at night Negative Prompt: blurry fog soft**
|
| 40 |
+
_Steps: 20, Sampler: Euler a, CFG scale: 7, Size: 1024x576_
|
| 41 |
+
|
| 42 |
+
This model was trained using the diffusers based dreambooth training by ShivamShrirao using prior-preservation loss and the _train-text-encoder_ flag in 7.000 steps.
|
| 43 |
+
|
| 44 |
+
## License
|
| 45 |
+
|
| 46 |
+
This model is open access and available to all, with a CreativeML Open RAIL++-M License further specifying rights and usage.
|
| 47 |
+
[Please read the full license here](https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL)","{""id"": ""nitrosocke/Future-Diffusion"", ""author"": ""nitrosocke"", ""sha"": ""dd9e03cd81a14d8b23db10ecd66ac22c4e5bd064"", ""last_modified"": ""2023-03-09 07:23:11+00:00"", ""created_at"": ""2022-11-24 23:43:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 202, ""downloads_all_time"": null, ""likes"": 400, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""stable-diffusion"", ""text-to-image"", ""en"", ""license:openrail++"", ""autotrain_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: openrail++\ntags:\n- stable-diffusion\n- text-to-image\n- diffusers\nthumbnail: https://huggingface.co/nitrosocke/Future-Diffusion/resolve/main/images/future-diffusion-thumbnail-2.jpg\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='future-diffusion-v1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='future-diffusion-v1.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/future-diffusion-samples01s.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/future-diffusion-samples02s.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/future-diffusion-samples03s.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/future-diffusion-thumbnail-2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""IgorSense/Diffusion_Space2"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""ennov8ion/FantasyArt-Models"", ""ennov8ion/dreamlike-models"", ""noes14155/img_All_models"", ""Dagfinn1962/prodia2"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""pikto/Diffuser"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""Blane187/multi-diffusion"", ""NativeAngels/ToyWorld"", ""mindtube/maximum_multiplier_places"", ""pikto/prodia"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""bradarrML/Diffusion_Space"", ""Mileena/Diffusion_Space2-Styles"", ""ennov8ion/Landscapes-models"", ""Shad0ws/ImageModelTestEnvironment"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""richds/Diffusion_Space"", ""IgorSense/Diffusion_Space"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models"", ""Yntec/top_100_diffusion"", ""AIlexDev/Diffusion60XX"", ""jptv/Stable-diffusion"", ""flatindo/img_All_models"", ""flatindo/all-models"", ""flatindo/all-models-v1"", ""johann22/chat-diffusion-describe"", ""wideprism/Ultimate-Model-Collection"", ""GAIneZis/FantasyArt-Models"", ""TheMaisk/Einfach.ImageAI"", ""vkatis/models_x6"", ""ennov8ion/picasso-diffusion"", ""K00B404/stablediffusion-portal"", ""vih-v/models_x"", ""ennov8ion/anime-new-models"", ""ennov8ion/anime-multi-new-models"", ""ennov8ion/photo-multi"", ""ennov8ion/anime-multi"", ""ElenaVasileva/Magic_Levitan_v1_4_Stanislav"", ""StanislavMichalov/Magic_Levitan_v1_4_Stanislav"", ""vih-v/models_d2"", ""StiveDudov/Magic_Levitan_v1_4_Stanislav"", ""Ashrafb/comicbook-models"", ""sohoso/architecture""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-03-09 07:23:11+00:00"", ""cardData"": ""language:\n- en\nlicense: openrail++\ntags:\n- stable-diffusion\n- text-to-image\n- diffusers\nthumbnail: https://huggingface.co/nitrosocke/Future-Diffusion/resolve/main/images/future-diffusion-thumbnail-2.jpg\ninference: false"", ""transformersInfo"": null, ""_id"": ""638001b09b75fb485a54c0a5"", ""modelId"": ""nitrosocke/Future-Diffusion"", ""usedStorage"": 18068938846}",0,,0,,0,,0,,0,"Alfasign/dIFFU, Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/ToyWorld4, John6666/hfd_test_nostopbutton, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=nitrosocke/Future-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnitrosocke%2FFuture-Diffusion%5D(%2Fnitrosocke%2FFuture-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",13
|
Hyper-SD_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
ByteDance/Hyper-SD,"---
|
| 3 |
+
library_name: diffusers
|
| 4 |
+
inference: false
|
| 5 |
+
tags:
|
| 6 |
+
- lora
|
| 7 |
+
- text-to-image
|
| 8 |
+
- stable-diffusion
|
| 9 |
+
- flux
|
| 10 |
+
base_model: black-forest-labs/FLUX.1-dev
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
# Hyper-SD
|
| 14 |
+
Official Repository of the paper: *[Hyper-SD](https://arxiv.org/abs/2404.13686)*.
|
| 15 |
+
|
| 16 |
+
Project Page: https://hyper-sd.github.io/
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
## News🔥🔥🔥
|
| 22 |
+
|
| 23 |
+
* Aug.26, 2024. 💥💥💥 Our 8-steps and 16-steps **FLUX.1-dev-related LoRAs** are available now! We recommend LoRA scales around 0.125 that is adaptive with training and guidance scale could be kept on 3.5. Lower step LoRAs would be coming soon. 💥💥💥
|
| 24 |
+
* Aug.19, 2024. SD3-related CFG LoRAs are available now! We recommend setting guidance scale to 3.0/5.0/7.0 at 4/8/16-steps. Don't forget to fuse lora with a relatively small scale (e.g. 0.125 that is adaptive with training) before inference with diffusers. Note that 8-steps and 16-steps LoRA can also inference on a little bit smaller steps like 6-steps and 12-steps, respectively. Hope to hear your feedback, FLUX-related models will be coming next week.
|
| 25 |
+
* May.13, 2024. The 12-Steps CFG-Preserved [Hyper-SDXL-12steps-CFG-LoRA](https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-12steps-CFG-lora.safetensors) and [Hyper-SD15-12steps-CFG-LoRA](https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SD15-12steps-CFG-lora.safetensors) is also available now(support 5~8 guidance scales), this could be more practical with better trade-off between performance and speed. Enjoy!
|
| 26 |
+
* Apr.30, 2024. Our 8-Steps CFG-Preserved [Hyper-SDXL-8steps-CFG-LoRA](https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-8steps-CFG-lora.safetensors) and [Hyper-SD15-8steps-CFG-LoRA](https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SD15-8steps-CFG-lora.safetensors) is available now(support 5~8 guidance scales), we strongly recommend making the 8-step CFGLora a standard configuration for all SDXL and SD15 models!!!
|
| 27 |
+
* Apr.28, 2024. ComfyUI workflows on 1-Step Unified LoRA 🥰 with TCDScheduler to inference on different steps are [released](https://huggingface.co/ByteDance/Hyper-SD/tree/main/comfyui)! Remember to install ⭕️ [ComfyUI-TCD](https://github.com/JettHu/ComfyUI-TCD) in your `ComfyUI/custom_nodes` folder!!! You're encouraged to adjust the eta parameter to get better results 🌟!
|
| 28 |
+
* Apr.26, 2024. Thanks to @[Pete](https://huggingface.co/pngwn) for contributing to our [scribble demo](https://huggingface.co/spaces/ByteDance/Hyper-SD15-Scribble) with larger canvas right now 👏.
|
| 29 |
+
* Apr.24, 2024. The ComfyUI [workflow](https://huggingface.co/ByteDance/Hyper-SD/blob/main/comfyui/Hyper-SDXL-1step-Unet-workflow.json) and [checkpoint](https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-1step-Unet-Comfyui.fp16.safetensors) on 1-Step SDXL UNet ✨ is also available! Don't forget ⭕️ to install the custom [scheduler](https://huggingface.co/ByteDance/Hyper-SD/tree/main/comfyui/ComfyUI-HyperSDXL1StepUnetScheduler) in your `ComfyUI/custom_nodes` folder!!!
|
| 30 |
+
* Apr.23, 2024. ComfyUI workflows on N-Steps LoRAs are [released](https://huggingface.co/ByteDance/Hyper-SD/tree/main/comfyui)! Worth a try for creators 💥!
|
| 31 |
+
* Apr.23, 2024. Our technical report 📚 is uploaded to [arXiv](https://arxiv.org/abs/2404.13686)! Many implementation details are provided and we welcome more discussions👏.
|
| 32 |
+
* Apr.21, 2024. Hyper-SD ⚡️ is highly compatible and work well with different base models and controlnets. To clarify, we also append the usage example of controlnet [here](https://huggingface.co/ByteDance/Hyper-SD#controlnet-usage).
|
| 33 |
+
* Apr.20, 2024. Our checkpoints and two demos 🤗 (i.e. [SD15-Scribble](https://huggingface.co/spaces/ByteDance/Hyper-SD15-Scribble) and [SDXL-T2I](https://huggingface.co/spaces/ByteDance/Hyper-SDXL-1Step-T2I)) are publicly available on [HuggingFace Repo](https://huggingface.co/ByteDance/Hyper-SD).
|
| 34 |
+
|
| 35 |
+
## Try our Hugging Face demos:
|
| 36 |
+
Hyper-SD Scribble demo host on [🤗 scribble](https://huggingface.co/spaces/ByteDance/Hyper-SD15-Scribble)
|
| 37 |
+
|
| 38 |
+
Hyper-SDXL One-step Text-to-Image demo host on [🤗 T2I](https://huggingface.co/spaces/ByteDance/Hyper-SDXL-1Step-T2I)
|
| 39 |
+
|
| 40 |
+
## Introduction
|
| 41 |
+
|
| 42 |
+
Hyper-SD is one of the new State-of-the-Art diffusion model acceleration techniques.
|
| 43 |
+
In this repository, we release the models distilled from [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev), [SD3-Medium](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers), [SDXL Base 1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and [Stable-Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)。
|
| 44 |
+
|
| 45 |
+
## Checkpoints
|
| 46 |
+
|
| 47 |
+
* `Hyper-FLUX.1-dev-Nsteps-lora.safetensors`: Lora checkpoint, for FLUX.1-dev-related models.
|
| 48 |
+
* `Hyper-SD3-Nsteps-CFG-lora.safetensors`: Lora checkpoint, for SD3-related models.
|
| 49 |
+
* `Hyper-SDXL-Nstep-lora.safetensors`: Lora checkpoint, for SDXL-related models.
|
| 50 |
+
* `Hyper-SD15-Nstep-lora.safetensors`: Lora checkpoint, for SD1.5-related models.
|
| 51 |
+
* `Hyper-SDXL-1step-unet.safetensors`: Unet checkpoint distilled from SDXL-Base.
|
| 52 |
+
|
| 53 |
+
## Text-to-Image Usage
|
| 54 |
+
|
| 55 |
+
### FLUX.1-dev-related models
|
| 56 |
+
```python
|
| 57 |
+
import torch
|
| 58 |
+
from diffusers import FluxPipeline
|
| 59 |
+
from huggingface_hub import hf_hub_download
|
| 60 |
+
base_model_id = ""black-forest-labs/FLUX.1-dev""
|
| 61 |
+
repo_name = ""ByteDance/Hyper-SD""
|
| 62 |
+
# Take 8-steps lora as an example
|
| 63 |
+
ckpt_name = ""Hyper-FLUX.1-dev-8steps-lora.safetensors""
|
| 64 |
+
# Load model, please fill in your access tokens since FLUX.1-dev repo is a gated model.
|
| 65 |
+
pipe = FluxPipeline.from_pretrained(base_model_id, token=""xxx"")
|
| 66 |
+
pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
|
| 67 |
+
pipe.fuse_lora(lora_scale=0.125)
|
| 68 |
+
pipe.to(""cuda"", dtype=torch.float16)
|
| 69 |
+
image=pipe(prompt=""a photo of a cat"", num_inference_steps=8, guidance_scale=3.5).images[0]
|
| 70 |
+
image.save(""output.png"")
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
### SD3-related models
|
| 74 |
+
```python
|
| 75 |
+
import torch
|
| 76 |
+
from diffusers import StableDiffusion3Pipeline
|
| 77 |
+
from huggingface_hub import hf_hub_download
|
| 78 |
+
base_model_id = ""stabilityai/stable-diffusion-3-medium-diffusers""
|
| 79 |
+
repo_name = ""ByteDance/Hyper-SD""
|
| 80 |
+
# Take 8-steps lora as an example
|
| 81 |
+
ckpt_name = ""Hyper-SD3-8steps-CFG-lora.safetensors""
|
| 82 |
+
# Load model, please fill in your access tokens since SD3 repo is a gated model.
|
| 83 |
+
pipe = StableDiffusion3Pipeline.from_pretrained(base_model_id, token=""xxx"")
|
| 84 |
+
pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
|
| 85 |
+
pipe.fuse_lora(lora_scale=0.125)
|
| 86 |
+
pipe.to(""cuda"", dtype=torch.float16)
|
| 87 |
+
image=pipe(prompt=""a photo of a cat"", num_inference_steps=8, guidance_scale=5.0).images[0]
|
| 88 |
+
image.save(""output.png"")
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
### SDXL-related models
|
| 92 |
+
#### 2-Steps, 4-Steps, 8-steps LoRA
|
| 93 |
+
Take the 2-steps LoRA as an example, you can also use other LoRAs for the corresponding inference steps setting.
|
| 94 |
+
```python
|
| 95 |
+
import torch
|
| 96 |
+
from diffusers import DiffusionPipeline, DDIMScheduler
|
| 97 |
+
from huggingface_hub import hf_hub_download
|
| 98 |
+
base_model_id = ""stabilityai/stable-diffusion-xl-base-1.0""
|
| 99 |
+
repo_name = ""ByteDance/Hyper-SD""
|
| 100 |
+
# Take 2-steps lora as an example
|
| 101 |
+
ckpt_name = ""Hyper-SDXL-2steps-lora.safetensors""
|
| 102 |
+
# Load model.
|
| 103 |
+
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant=""fp16"").to(""cuda"")
|
| 104 |
+
pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
|
| 105 |
+
pipe.fuse_lora()
|
| 106 |
+
# Ensure ddim scheduler timestep spacing set as trailing !!!
|
| 107 |
+
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing=""trailing"")
|
| 108 |
+
# lower eta results in more detail
|
| 109 |
+
prompt=""a photo of a cat""
|
| 110 |
+
image=pipe(prompt=prompt, num_inference_steps=2, guidance_scale=0).images[0]
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
#### Unified LoRA (support 1 to 8 steps inference)
|
| 114 |
+
You can flexibly adjust the number of inference steps and eta value to achieve best performance.
|
| 115 |
+
```python
|
| 116 |
+
import torch
|
| 117 |
+
from diffusers import DiffusionPipeline, TCDScheduler
|
| 118 |
+
from huggingface_hub import hf_hub_download
|
| 119 |
+
base_model_id = ""stabilityai/stable-diffusion-xl-base-1.0""
|
| 120 |
+
repo_name = ""ByteDance/Hyper-SD""
|
| 121 |
+
ckpt_name = ""Hyper-SDXL-1step-lora.safetensors""
|
| 122 |
+
# Load model.
|
| 123 |
+
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant=""fp16"").to(""cuda"")
|
| 124 |
+
pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
|
| 125 |
+
pipe.fuse_lora()
|
| 126 |
+
# Use TCD scheduler to achieve better image quality
|
| 127 |
+
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
| 128 |
+
# Lower eta results in more detail for multi-steps inference
|
| 129 |
+
eta=1.0
|
| 130 |
+
prompt=""a photo of a cat""
|
| 131 |
+
image=pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0, eta=eta).images[0]
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
#### 1-step SDXL Unet
|
| 135 |
+
Only for the single step inference.
|
| 136 |
+
```python
|
| 137 |
+
import torch
|
| 138 |
+
from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
|
| 139 |
+
from huggingface_hub import hf_hub_download
|
| 140 |
+
from safetensors.torch import load_file
|
| 141 |
+
base_model_id = ""stabilityai/stable-diffusion-xl-base-1.0""
|
| 142 |
+
repo_name = ""ByteDance/Hyper-SD""
|
| 143 |
+
ckpt_name = ""Hyper-SDXL-1step-Unet.safetensors""
|
| 144 |
+
# Load model.
|
| 145 |
+
unet = UNet2DConditionModel.from_config(base_model_id, subfolder=""unet"").to(""cuda"", torch.float16)
|
| 146 |
+
unet.load_state_dict(load_file(hf_hub_download(repo_name, ckpt_name), device=""cuda""))
|
| 147 |
+
pipe = DiffusionPipeline.from_pretrained(base_model_id, unet=unet, torch_dtype=torch.float16, variant=""fp16"").to(""cuda"")
|
| 148 |
+
# Use LCM scheduler instead of ddim scheduler to support specific timestep number inputs
|
| 149 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 150 |
+
# Set start timesteps to 800 in the one-step inference to get better results
|
| 151 |
+
prompt=""a photo of a cat""
|
| 152 |
+
image=pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0, timesteps=[800]).images[0]
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
### SD1.5-related models
|
| 157 |
+
|
| 158 |
+
#### 2-Steps, 4-Steps, 8-steps LoRA
|
| 159 |
+
Take the 2-steps LoRA as an example, you can also use other LoRAs for the corresponding inference steps setting.
|
| 160 |
+
```python
|
| 161 |
+
import torch
|
| 162 |
+
from diffusers import DiffusionPipeline, DDIMScheduler
|
| 163 |
+
from huggingface_hub import hf_hub_download
|
| 164 |
+
base_model_id = ""runwayml/stable-diffusion-v1-5""
|
| 165 |
+
repo_name = ""ByteDance/Hyper-SD""
|
| 166 |
+
# Take 2-steps lora as an example
|
| 167 |
+
ckpt_name = ""Hyper-SD15-2steps-lora.safetensors""
|
| 168 |
+
# Load model.
|
| 169 |
+
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant=""fp16"").to(""cuda"")
|
| 170 |
+
pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
|
| 171 |
+
pipe.fuse_lora()
|
| 172 |
+
# Ensure ddim scheduler timestep spacing set as trailing !!!
|
| 173 |
+
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing=""trailing"")
|
| 174 |
+
prompt=""a photo of a cat""
|
| 175 |
+
image=pipe(prompt=prompt, num_inference_steps=2, guidance_scale=0).images[0]
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
#### Unified LoRA (support 1 to 8 steps inference)
|
| 180 |
+
You can flexibly adjust the number of inference steps and eta value to achieve best performance.
|
| 181 |
+
```python
|
| 182 |
+
import torch
|
| 183 |
+
from diffusers import DiffusionPipeline, TCDScheduler
|
| 184 |
+
from huggingface_hub import hf_hub_download
|
| 185 |
+
base_model_id = ""runwayml/stable-diffusion-v1-5""
|
| 186 |
+
repo_name = ""ByteDance/Hyper-SD""
|
| 187 |
+
ckpt_name = ""Hyper-SD15-1step-lora.safetensors""
|
| 188 |
+
# Load model.
|
| 189 |
+
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant=""fp16"").to(""cuda"")
|
| 190 |
+
pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
|
| 191 |
+
pipe.fuse_lora()
|
| 192 |
+
# Use TCD scheduler to achieve better image quality
|
| 193 |
+
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
| 194 |
+
# Lower eta results in more detail for multi-steps inference
|
| 195 |
+
eta=1.0
|
| 196 |
+
prompt=""a photo of a cat""
|
| 197 |
+
image=pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0, eta=eta).images[0]
|
| 198 |
+
```
|
| 199 |
+
|
| 200 |
+
## ControlNet Usage
|
| 201 |
+
### SDXL-related models
|
| 202 |
+
|
| 203 |
+
#### 2-Steps, 4-Steps, 8-steps LoRA
|
| 204 |
+
Take Canny Controlnet and 2-steps inference as an example:
|
| 205 |
+
```python
|
| 206 |
+
import torch
|
| 207 |
+
from diffusers.utils import load_image
|
| 208 |
+
import numpy as np
|
| 209 |
+
import cv2
|
| 210 |
+
from PIL import Image
|
| 211 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, DDIMScheduler
|
| 212 |
+
from huggingface_hub import hf_hub_download
|
| 213 |
+
|
| 214 |
+
# Load original image
|
| 215 |
+
image = load_image(""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"")
|
| 216 |
+
image = np.array(image)
|
| 217 |
+
# Prepare Canny Control Image
|
| 218 |
+
low_threshold = 100
|
| 219 |
+
high_threshold = 200
|
| 220 |
+
image = cv2.Canny(image, low_threshold, high_threshold)
|
| 221 |
+
image = image[:, :, None]
|
| 222 |
+
image = np.concatenate([image, image, image], axis=2)
|
| 223 |
+
control_image = Image.fromarray(image)
|
| 224 |
+
control_image.save(""control.png"")
|
| 225 |
+
control_weight = 0.5 # recommended for good generalization
|
| 226 |
+
|
| 227 |
+
# Initialize pipeline
|
| 228 |
+
controlnet = ControlNetModel.from_pretrained(
|
| 229 |
+
""diffusers/controlnet-canny-sdxl-1.0"",
|
| 230 |
+
torch_dtype=torch.float16
|
| 231 |
+
)
|
| 232 |
+
vae = AutoencoderKL.from_pretrained(""madebyollin/sdxl-vae-fp16-fix"", torch_dtype=torch.float16)
|
| 233 |
+
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(""stabilityai/stable-diffusion-xl-base-1.0"", controlnet=controlnet, vae=vae, torch_dtype=torch.float16).to(""cuda"")
|
| 234 |
+
|
| 235 |
+
pipe.load_lora_weights(hf_hub_download(""ByteDance/Hyper-SD"", ""Hyper-SDXL-2steps-lora.safetensors""))
|
| 236 |
+
# Ensure ddim scheduler timestep spacing set as trailing !!!
|
| 237 |
+
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing=""trailing"")
|
| 238 |
+
pipe.fuse_lora()
|
| 239 |
+
image = pipe(""A chocolate cookie"", num_inference_steps=2, image=control_image, guidance_scale=0, controlnet_conditioning_scale=control_weight).images[0]
|
| 240 |
+
image.save('image_out.png')
|
| 241 |
+
```
|
| 242 |
+
|
| 243 |
+
#### Unified LoRA (support 1 to 8 steps inference)
|
| 244 |
+
Take Canny Controlnet as an example:
|
| 245 |
+
```python
|
| 246 |
+
import torch
|
| 247 |
+
from diffusers.utils import load_image
|
| 248 |
+
import numpy as np
|
| 249 |
+
import cv2
|
| 250 |
+
from PIL import Image
|
| 251 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, TCDScheduler
|
| 252 |
+
from huggingface_hub import hf_hub_download
|
| 253 |
+
|
| 254 |
+
# Load original image
|
| 255 |
+
image = load_image(""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"")
|
| 256 |
+
image = np.array(image)
|
| 257 |
+
# Prepare Canny Control Image
|
| 258 |
+
low_threshold = 100
|
| 259 |
+
high_threshold = 200
|
| 260 |
+
image = cv2.Canny(image, low_threshold, high_threshold)
|
| 261 |
+
image = image[:, :, None]
|
| 262 |
+
image = np.concatenate([image, image, image], axis=2)
|
| 263 |
+
control_image = Image.fromarray(image)
|
| 264 |
+
control_image.save(""control.png"")
|
| 265 |
+
control_weight = 0.5 # recommended for good generalization
|
| 266 |
+
|
| 267 |
+
# Initialize pipeline
|
| 268 |
+
controlnet = ControlNetModel.from_pretrained(
|
| 269 |
+
""diffusers/controlnet-canny-sdxl-1.0"",
|
| 270 |
+
torch_dtype=torch.float16
|
| 271 |
+
)
|
| 272 |
+
vae = AutoencoderKL.from_pretrained(""madebyollin/sdxl-vae-fp16-fix"", torch_dtype=torch.float16)
|
| 273 |
+
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 274 |
+
""stabilityai/stable-diffusion-xl-base-1.0"",
|
| 275 |
+
controlnet=controlnet, vae=vae, torch_dtype=torch.float16).to(""cuda"")
|
| 276 |
+
|
| 277 |
+
# Load Hyper-SD15-1step lora
|
| 278 |
+
pipe.load_lora_weights(hf_hub_download(""ByteDance/Hyper-SD"", ""Hyper-SDXL-1step-lora.safetensors""))
|
| 279 |
+
pipe.fuse_lora()
|
| 280 |
+
# Use TCD scheduler to achieve better image quality
|
| 281 |
+
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
| 282 |
+
# Lower eta results in more detail for multi-steps inference
|
| 283 |
+
eta=1.0
|
| 284 |
+
image = pipe(""A chocolate cookie"", num_inference_steps=4, image=control_image, guidance_scale=0, controlnet_conditioning_scale=control_weight, eta=eta).images[0]
|
| 285 |
+
image.save('image_out.png')
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
### SD1.5-related models
|
| 289 |
+
|
| 290 |
+
#### 2-Steps, 4-Steps, 8-steps LoRA
|
| 291 |
+
Take Canny Controlnet and 2-steps inference as an example:
|
| 292 |
+
```python
|
| 293 |
+
import torch
|
| 294 |
+
from diffusers.utils import load_image
|
| 295 |
+
import numpy as np
|
| 296 |
+
import cv2
|
| 297 |
+
from PIL import Image
|
| 298 |
+
from diffusers import ControlNetModel, StableDiffusionControlNetPipeline, DDIMScheduler
|
| 299 |
+
|
| 300 |
+
from huggingface_hub import hf_hub_download
|
| 301 |
+
|
| 302 |
+
controlnet_checkpoint = ""lllyasviel/control_v11p_sd15_canny""
|
| 303 |
+
|
| 304 |
+
# Load original image
|
| 305 |
+
image = load_image(""https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/input.png"")
|
| 306 |
+
image = np.array(image)
|
| 307 |
+
# Prepare Canny Control Image
|
| 308 |
+
low_threshold = 100
|
| 309 |
+
high_threshold = 200
|
| 310 |
+
image = cv2.Canny(image, low_threshold, high_threshold)
|
| 311 |
+
image = image[:, :, None]
|
| 312 |
+
image = np.concatenate([image, image, image], axis=2)
|
| 313 |
+
control_image = Image.fromarray(image)
|
| 314 |
+
control_image.save(""control.png"")
|
| 315 |
+
|
| 316 |
+
# Initialize pipeline
|
| 317 |
+
controlnet = ControlNetModel.from_pretrained(controlnet_checkpoint, torch_dtype=torch.float16)
|
| 318 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(""runwayml/stable-diffusion-v1-5"", controlnet=controlnet, torch_dtype=torch.float16).to(""cuda"")
|
| 319 |
+
pipe.load_lora_weights(hf_hub_download(""ByteDance/Hyper-SD"", ""Hyper-SD15-2steps-lora.safetensors""))
|
| 320 |
+
pipe.fuse_lora()
|
| 321 |
+
# Ensure ddim scheduler timestep spacing set as trailing !!!
|
| 322 |
+
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing=""trailing"")
|
| 323 |
+
image = pipe(""a blue paradise bird in the jungle"", num_inference_steps=2, image=control_image, guidance_scale=0).images[0]
|
| 324 |
+
image.save('image_out.png')
|
| 325 |
+
```
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
#### Unified LoRA (support 1 to 8 steps inference)
|
| 329 |
+
Take Canny Controlnet as an example:
|
| 330 |
+
```python
|
| 331 |
+
import torch
|
| 332 |
+
from diffusers.utils import load_image
|
| 333 |
+
import numpy as np
|
| 334 |
+
import cv2
|
| 335 |
+
from PIL import Image
|
| 336 |
+
from diffusers import ControlNetModel, StableDiffusionControlNetPipeline, TCDScheduler
|
| 337 |
+
from huggingface_hub import hf_hub_download
|
| 338 |
+
|
| 339 |
+
controlnet_checkpoint = ""lllyasviel/control_v11p_sd15_canny""
|
| 340 |
+
|
| 341 |
+
# Load original image
|
| 342 |
+
image = load_image(""https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/input.png"")
|
| 343 |
+
image = np.array(image)
|
| 344 |
+
# Prepare Canny Control Image
|
| 345 |
+
low_threshold = 100
|
| 346 |
+
high_threshold = 200
|
| 347 |
+
image = cv2.Canny(image, low_threshold, high_threshold)
|
| 348 |
+
image = image[:, :, None]
|
| 349 |
+
image = np.concatenate([image, image, image], axis=2)
|
| 350 |
+
control_image = Image.fromarray(image)
|
| 351 |
+
control_image.save(""control.png"")
|
| 352 |
+
|
| 353 |
+
# Initialize pipeline
|
| 354 |
+
controlnet = ControlNetModel.from_pretrained(controlnet_checkpoint, torch_dtype=torch.float16)
|
| 355 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(""runwayml/stable-diffusion-v1-5"", controlnet=controlnet, torch_dtype=torch.float16).to(""cuda"")
|
| 356 |
+
# Load Hyper-SD15-1step lora
|
| 357 |
+
pipe.load_lora_weights(hf_hub_download(""ByteDance/Hyper-SD"", ""Hyper-SD15-1step-lora.safetensors""))
|
| 358 |
+
pipe.fuse_lora()
|
| 359 |
+
# Use TCD scheduler to achieve better image quality
|
| 360 |
+
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
| 361 |
+
# Lower eta results in more detail for multi-steps inference
|
| 362 |
+
eta=1.0
|
| 363 |
+
image = pipe(""a blue paradise bird in the jungle"", num_inference_steps=1, image=control_image, guidance_scale=0, eta=eta).images[0]
|
| 364 |
+
image.save('image_out.png')
|
| 365 |
+
```
|
| 366 |
+
## Comfyui Usage
|
| 367 |
+
* `Hyper-SDXL-Nsteps-lora.safetensors`: [text-to-image workflow](https://huggingface.co/ByteDance/Hyper-SD/blob/main/comfyui/Hyper-SDXL-Nsteps-lora-workflow.json)
|
| 368 |
+
* `Hyper-SD15-Nsteps-lora.safetensors`: [text-to-image workflow](https://huggingface.co/ByteDance/Hyper-SD/blob/main/comfyui/Hyper-SD15-Nsteps-lora-workflow.json)
|
| 369 |
+
* `Hyper-SDXL-1step-Unet-Comfyui.fp16.safetensors`: [text-to-image workflow](https://huggingface.co/ByteDance/Hyper-SD/blob/main/comfyui/Hyper-SDXL-1step-Unet-workflow.json)
|
| 370 |
+
* **REQUIREMENT / INSTALL** for 1-Step SDXL UNet: Please install our [scheduler folder](https://huggingface.co/ByteDance/Hyper-SD/tree/main/comfyui/ComfyUI-HyperSDXL1StepUnetScheduler) into your `ComfyUI/custom_nodes` to enable sampling from 800 timestep instead of 999.
|
| 371 |
+
* i.e. making sure the `ComfyUI/custom_nodes/ComfyUI-HyperSDXL1StepUnetScheduler` folder exist.
|
| 372 |
+
* For more details, please refer to our [technical report](https://arxiv.org/abs/2404.13686).
|
| 373 |
+
* `Hyper-SD15-1step-lora.safetensors`: [text-to-image workflow](https://huggingface.co/ByteDance/Hyper-SD/blob/main/comfyui/Hyper-SD15-1step-unified-lora-workflow.json)
|
| 374 |
+
* `Hyper-SDXL-1step-lora.safetensors`: [text-to-image workflow](https://huggingface.co/ByteDance/Hyper-SD/blob/main/comfyui/Hyper-SDXL-1step-unified-lora-workflow.json)
|
| 375 |
+
* **REQUIREMENT / INSTALL** for 1-Step Unified LoRAs: Please install the [ComfyUI-TCD](https://github.com/JettHu/ComfyUI-TCD) into your `ComfyUI/custom_nodes` to enable TCDScheduler with support of different inference steps (1~8) using single checkpoint.
|
| 376 |
+
* i.e. making sure the `ComfyUI/custom_nodes/ComfyUI-TCD` folder exist.
|
| 377 |
+
* You're encouraged to adjust the eta parameter in TCDScheduler to get better results.
|
| 378 |
+
|
| 379 |
+
## Citation
|
| 380 |
+
```bibtex
|
| 381 |
+
@misc{ren2024hypersd,
|
| 382 |
+
title={Hyper-SD: Trajectory Segmented Consistency Model for Efficient Image Synthesis},
|
| 383 |
+
author={Yuxi Ren and Xin Xia and Yanzuo Lu and Jiacheng Zhang and Jie Wu and Pan Xie and Xing Wang and Xuefeng Xiao},
|
| 384 |
+
year={2024},
|
| 385 |
+
eprint={2404.13686},
|
| 386 |
+
archivePrefix={arXiv},
|
| 387 |
+
primaryClass={cs.CV}
|
| 388 |
+
}
|
| 389 |
+
```","{""id"": ""ByteDance/Hyper-SD"", ""author"": ""ByteDance"", ""sha"": ""bc08d970a87c74c71209491d64e3525845698863"", ""last_modified"": ""2024-12-05 09:02:21+00:00"", ""created_at"": ""2024-04-20 06:34:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 134623, ""downloads_all_time"": null, ""likes"": 1183, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": ""warm"", ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""lora"", ""text-to-image"", ""stable-diffusion"", ""flux"", ""arxiv:2404.13686"", ""base_model:black-forest-labs/FLUX.1-dev"", ""base_model:adapter:black-forest-labs/FLUX.1-dev"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: black-forest-labs/FLUX.1-dev\nlibrary_name: diffusers\ntags:\n- lora\n- text-to-image\n- stable-diffusion\n- flux\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-FLUX.1-dev-16steps-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-FLUX.1-dev-8steps-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD15-12steps-CFG-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD15-1step-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD15-2steps-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD15-4steps-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD15-8steps-CFG-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD15-8steps-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD3-16steps-CFG-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD3-4steps-CFG-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SD3-8steps-CFG-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SDXL-12steps-CFG-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SDXL-1step-Unet-Comfyui.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SDXL-1step-Unet.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SDXL-1step-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SDXL-2steps-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SDXL-4steps-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SDXL-8steps-CFG-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Hyper-SDXL-8steps-lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='comfyui/ComfyUI-HyperSDXL1StepUnetScheduler/__init__.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='comfyui/ComfyUI-HyperSDXL1StepUnetScheduler/node.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='comfyui/Hyper-SD15-1step-unified-lora-workflow.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='comfyui/Hyper-SD15-Nsteps-lora-workflow.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='comfyui/Hyper-SDXL-1step-Unet-workflow.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='comfyui/Hyper-SDXL-1step-unified-lora-workflow.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='comfyui/Hyper-SDXL-Nsteps-lora-workflow.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hypersd_tearser.jpg', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ByteDance/Hyper-FLUX-8Steps-LoRA"", ""radames/Real-Time-Latent-Consistency-Model"", ""ByteDance/Hyper-SDXL-1Step-T2I"", ""multimodalart/flux-outpainting"", ""ByteDance/Hyper-SD15-Scribble"", ""r3gm/DiffuseCraft"", ""John6666/DiffuseCraftMod"", ""multimodalart/one-step-comparison"", ""John6666/votepurchase-multiple-model"", ""fantos/flx8lora"", ""gokaygokay/Flux-TRELLIS"", ""multimodalart/low-step-flux-comparison"", ""linoyts/fast-FLUX.1-Redux-dev"", ""doevent/FLUX.1-merged"", ""radames/InstantStyle-Hyper-SD"", ""rf-inversion/RF-inversion"", ""fffiloni/ReNO"", ""eienmojiki/AnyDiffuse"", ""ariG23498/flux-edit"", ""Heartsync/FLUX-Vision"", ""radames/InstantStyle-Hyper-SDXL"", ""ginigen/FLUX-Eternity"", ""Menyu/DiffuseCraftMod"", ""John6666/sdxl-to-diffusers-v2"", ""mantrakp/aai"", ""John6666/sdxl-to-diffusers-v3"", ""zerhero/DiffuseCraft"", ""HRJ360/AI-STORYTELLER"", ""John6666/safetensors_to_diffusers"", ""bobber/DiffuseCraft"", ""fcyai/Hyper-FLUX-8Steps-LoRA"", ""John6666/sdxl-to-diffusers-v2p"", ""alsaeth/ByteDance-Hyper-SD"", ""EVA787797/kiii44545454"", ""John6666/testvp"", ""John6666/sdxl-to-diffusers-v2-cliptest"", ""K00B404/Hyper-SDXL-1Step-T2I-cpu"", ""John6666/gradio_uitest1"", ""linoyts/Stable-Flow"", ""Uthar/John6666_sdxl-to-diffusers-v3"", ""shivguddadmath/Hyper-SDXL"", ""Falln87/Hyper-SD15-Scribble"", ""FallnAI/HyperSD15-Scribble"", ""mba07m/Hackathon3D"", ""Nymbo/sdxl-to-diffusers-v2"", ""banan1233op/hypersd-sdxl"", ""Iwaku-Real/Hyper-SDXL-1Step-T2I"", ""xbbd/ByteDance-Hyper-SD"", ""HuggingFaceSupport/ByteDance-Hyper-SD"", ""rencent/ByteDance-Hyper-SD"", ""Raumkommander/Hyper-FLUX-8Steps-LoRA"", ""marsyao/Hyper-FLUX-8Steps-LoRA"", ""johnstonkaren314/ByteDance-Hyper-SD"", ""AnonDev/ByteDance-Hyper-SD"", ""Naranko/ByteDance-Hyper-SD"", ""bruvvyluvvy/Hyper-FLUX-8Steps-LoRA"", ""Afrinetwork/ig"", ""somukandula/ByteDance-Hyper-SD"", ""Aditya2034/abc21"", ""Larm/ByteDance-Hyper-SD"", ""a2post/Hyper-FLUX-8Steps-LoRA"", ""vijaykumar8560/vijayimage"", ""K00B404/Hyper-FLUX-8Steps-LoRA_CPU"", ""nightfury/Hyper-FLUX-8Steps-LoRA"", ""Evansville/ByteDance-Hyper-SD"", ""Fili2a2/DIGITAL-PROSPECTIVE-Hyper-SD"", ""Afrinetwork/ig1"", ""GQ123QWE/ByteDance-Hyper-SD"", ""Vivawaves/Hyper-FLUX-8Steps-LoRA"", ""JeCabrera/AI-STORYTELLER2"", ""Funpee/Hyper-FLUX-8Steps-LoRA"", ""callzz/sdxl-to-diffusers-v2"", ""Nymbo/flux-outpainting"", ""JohnyLahente/flux-outpainting"", ""huanhoang/flux-outpainting"", ""xbarusui/testsd"", ""kheloo/Hyper-FLUX-8Steps-LoRA"", ""SteelBerserker9346/flx8lora"", ""sominjj/flx8lora"", ""xkstudio/flx8lora"", ""khelonaseer1/FLUX.1-merged"", ""aminss29/flux-outpainting"", ""John6666/diffusers_lora_error_test1"", ""Ihatenamesforever/Hyper-FLUX-8Steps-LoRA"", ""Kutches/sdxl-to-diffusers-v32"", ""WhiteAiZ/sdxl-to-diffusers-v32"", ""LAJILAODEEAIQ/officechat-DiffuseCraftMod"", ""ShahbazAlam/Hyper-FLUX-8Steps-LoRA"", ""eMILF2/real-time-model"", ""Anupam251272/Hyper-SD"", ""SpyC0der77/sdxl"", ""SpyC0der77/Model-lora"", ""LPX55/FLUX.1-Redux_Turbo"", ""supratimrana/ByteDance-Hyper-SD"", ""DileepEravada/ByteDance-Hyper-SD"", ""fluxai111/ByteDance-Hyper-SD"", ""phamvkhai20/api-generate-image"", ""eienmojiki/DiffuseCraftMod"", ""John6666/flux-to-diffusers-zero-test"", ""CyberSys/Flux-TRELLIS""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-05 09:02:21+00:00"", ""cardData"": ""base_model: black-forest-labs/FLUX.1-dev\nlibrary_name: diffusers\ntags:\n- lora\n- text-to-image\n- stable-diffusion\n- flux\ninference: false"", ""transformersInfo"": null, ""_id"": ""6623620e439565130935a9cb"", ""modelId"": ""ByteDance/Hyper-SD"", ""usedStorage"": 28448937470}",0,https://huggingface.co/ChenDY/NitroFusion,1,"https://huggingface.co/HopeTD/consulting, https://huggingface.co/gaherfuyhj/ivDaoo, https://huggingface.co/eirikrawr/lenchmobno, https://huggingface.co/ALT2/ssssss, https://huggingface.co/saber21/ml, https://huggingface.co/Andres123151/Eladio1, https://huggingface.co/luispine/wildfireUV, https://huggingface.co/yurslupy/Renaissance23, https://huggingface.co/fingerprinted/hngfds, https://huggingface.co/rarakura/opune",10,,0,https://huggingface.co/LPX55/FLUX.1-merged_uncensored,1,"ByteDance/Hyper-FLUX-8Steps-LoRA, ByteDance/Hyper-SD15-Scribble, ByteDance/Hyper-SDXL-1Step-T2I, John6666/DiffuseCraftMod, John6666/votepurchase-multiple-model, fantos/flx8lora, gokaygokay/Flux-TRELLIS, linoyts/fast-FLUX.1-Redux-dev, multimodalart/flux-outpainting, multimodalart/low-step-flux-comparison, multimodalart/one-step-comparison, r3gm/DiffuseCraft",12
|
| 390 |
+
ChenDY/NitroFusion,"---
|
| 391 |
+
base_model:
|
| 392 |
+
- tianweiy/DMD2
|
| 393 |
+
- ByteDance/Hyper-SD
|
| 394 |
+
- stabilityai/stable-diffusion-xl-base-1.0
|
| 395 |
+
pipeline_tag: text-to-image
|
| 396 |
+
library_name: diffusers
|
| 397 |
+
tags:
|
| 398 |
+
- text-to-image
|
| 399 |
+
- stable-diffusion
|
| 400 |
+
- sdxl
|
| 401 |
+
- adversarial diffusion distillation
|
| 402 |
+
---
|
| 403 |
+
# NitroFusion
|
| 404 |
+
<!-- > [**NitroFusion: High-Fidelity Single-Step Diffusion through Dynamic Adversarial Training**](), -->
|
| 405 |
+
> **NitroFusion: High-Fidelity Single-Step Diffusion through Dynamic Adversarial Training**
|
| 406 |
+
>
|
| 407 |
+
> Dar-Yen Chen, Hmrishav Bandyopadhyay, Kai Zou, Yi-Zhe Song
|
| 408 |
+
|
| 409 |
+
[[arXiv Paper]](https://arxiv.org/abs/2412.02030) [[Project Page]](https://chendaryen.github.io/NitroFusion.github.io/)
|
| 410 |
+
|
| 411 |
+
<!-- GitHub Repository: []() -->
|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
## News
|
| 417 |
+
* 06 Jan 2025: ComfyUI checkpoints `nitrosd-realism_comfyui.safetensors` and `nitrosd-vibrant_comfyui.safetensors`, as well as a [workflow](https://github.com/ChenDarYen/ComfyUI-TimestepShiftModel) are now released.
|
| 418 |
+
* 04 Dec 2024: [Paper](https://arxiv.org/abs/2412.02030) is released on arXiv, and the [project page](https://chendaryen.github.io/NitroFusion.github.io/) is now public.
|
| 419 |
+
* 30 Nov 2024: Our single-step text-to-image demo is publicly available on [🤗 Hugging Face Space](https://huggingface.co/spaces/ChenDY/NitroFusion_1step_T2I).
|
| 420 |
+
* 29 Nov 2024: Released two checkpoints: **NitroSD-Realism** and **NitroSD-Vibrant**.
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
## Online Demos
|
| 424 |
+
NitroFusion single-step Text-to-Image demo hosted on [🤗 Hugging Face Space](https://huggingface.co/spaces/ChenDY/NitroFusion_1step_T2I)
|
| 425 |
+
|
| 426 |
+
## Model Overview
|
| 427 |
+
- `nitrosd-realism_unet.safetensors`: Produces photorealistic images with fine details.
|
| 428 |
+
- `nitrosd-vibrant_unet.safetensors`: Offers vibrant, saturated color characteristics.
|
| 429 |
+
- Both models support 1 to 4 inference steps.
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
## Usage
|
| 433 |
+
|
| 434 |
+
First, we need to implement the scheduler with timestep shift for multi-step inference:
|
| 435 |
+
```python
|
| 436 |
+
from diffusers import LCMScheduler
|
| 437 |
+
class TimestepShiftLCMScheduler(LCMScheduler):
|
| 438 |
+
def __init__(self, *args, shifted_timestep=250, **kwargs):
|
| 439 |
+
super().__init__(*args, **kwargs)
|
| 440 |
+
self.register_to_config(shifted_timestep=shifted_timestep)
|
| 441 |
+
def set_timesteps(self, *args, **kwargs):
|
| 442 |
+
super().set_timesteps(*args, **kwargs)
|
| 443 |
+
self.origin_timesteps = self.timesteps.clone()
|
| 444 |
+
self.shifted_timesteps = (self.timesteps * self.config.shifted_timestep / self.config.num_train_timesteps).long()
|
| 445 |
+
self.timesteps = self.shifted_timesteps
|
| 446 |
+
def step(self, model_output, timestep, sample, generator=None, return_dict=True):
|
| 447 |
+
if self.step_index is None:
|
| 448 |
+
self._init_step_index(timestep)
|
| 449 |
+
self.timesteps = self.origin_timesteps
|
| 450 |
+
output = super().step(model_output, timestep, sample, generator, return_dict)
|
| 451 |
+
self.timesteps = self.shifted_timesteps
|
| 452 |
+
return output
|
| 453 |
+
```
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
We can then utilize the diffuser pipeline:
|
| 457 |
+
```python
|
| 458 |
+
import torch
|
| 459 |
+
from diffusers import DiffusionPipeline, UNet2DConditionModel
|
| 460 |
+
from huggingface_hub import hf_hub_download
|
| 461 |
+
from safetensors.torch import load_file
|
| 462 |
+
# Load model.
|
| 463 |
+
base_model_id = ""stabilityai/stable-diffusion-xl-base-1.0""
|
| 464 |
+
repo = ""ChenDY/NitroFusion""
|
| 465 |
+
# NitroSD-Realism
|
| 466 |
+
ckpt = ""nitrosd-realism_unet.safetensors""
|
| 467 |
+
unet = UNet2DConditionModel.from_config(base_model_id, subfolder=""unet"").to(""cuda"", torch.float16)
|
| 468 |
+
unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=""cuda""))
|
| 469 |
+
scheduler = TimestepShiftLCMScheduler.from_pretrained(base_model_id, subfolder=""scheduler"", shifted_timestep=250)
|
| 470 |
+
scheduler.config.original_inference_steps = 4
|
| 471 |
+
# # NitroSD-Vibrant
|
| 472 |
+
# ckpt = ""nitrosd-vibrant_unet.safetensors""
|
| 473 |
+
# unet = UNet2DConditionModel.from_config(base_model_id, subfolder=""unet"").to(""cuda"", torch.float16)
|
| 474 |
+
# unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=""cuda""))
|
| 475 |
+
# scheduler = TimestepShiftLCMScheduler.from_pretrained(base_model_id, subfolder=""scheduler"", shifted_timestep=500)
|
| 476 |
+
# scheduler.config.original_inference_steps = 4
|
| 477 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 478 |
+
base_model_id,
|
| 479 |
+
unet=unet,
|
| 480 |
+
scheduler=scheduler,
|
| 481 |
+
torch_dtype=torch.float16,
|
| 482 |
+
variant=""fp16"",
|
| 483 |
+
).to(""cuda"")
|
| 484 |
+
prompt = ""a photo of a cat""
|
| 485 |
+
image = pipe(
|
| 486 |
+
prompt=prompt,
|
| 487 |
+
num_inference_steps=1, # NotroSD-Realism and -Vibrant both support 1 - 4 inference steps.
|
| 488 |
+
guidance_scale=0,
|
| 489 |
+
).images[0]
|
| 490 |
+
```
|
| 491 |
+
|
| 492 |
+
## ComfyUI Usage
|
| 493 |
+
|
| 494 |
+
1. Download the `nitrosd-realism_comfyui.safetensors` and `nitrosd-vibrant_comfyui.safetensors`, and place them in the `ComfyUI/models/checkpoints`.
|
| 495 |
+
2. Clone the [ComfyUI-TimestepShiftModel](https://github.com/ChenDarYen/ComfyUI-TimestepShiftModel) repository into the `ComfyUI/custom_nodes`.
|
| 496 |
+
3. Play with the [workflow](https://github.com/ChenDarYen/ComfyUI-TimestepShiftModel/blob/main/ComfyUI_NitroSD_workflow.json)!
|
| 497 |
+
|
| 498 |
+
## License
|
| 499 |
+
|
| 500 |
+
NitroSD-Realism is released under [cc-by-nc-4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/deed.en), following its base model *DMD2*.
|
| 501 |
+
|
| 502 |
+
NitroSD-Vibrant is released under [openrail++](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md).
|
| 503 |
+
|
| 504 |
+
<!-- ## Contact
|
| 505 |
+
|
| 506 |
+
Feel free to contact us if you have any questions about the paper!
|
| 507 |
+
|
| 508 |
+
Dar-Yen Chen [@surrey.ac.uk](mailto:@surrey.ac.uk)
|
| 509 |
+
|
| 510 |
+
## Citation
|
| 511 |
+
|
| 512 |
+
If you find NitroFusion useful or relevant to your research, please kindly cite our papers:
|
| 513 |
+
|
| 514 |
+
```bib
|
| 515 |
+
|
| 516 |
+
``` -->
|
| 517 |
+
","{""id"": ""ChenDY/NitroFusion"", ""author"": ""ChenDY"", ""sha"": ""ce7256bf7c74b3968279921d1267797525c31d28"", ""last_modified"": ""2025-01-06 23:14:58+00:00"", ""created_at"": ""2024-11-30 00:13:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 472, ""downloads_all_time"": null, ""likes"": 95, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""stable-diffusion"", ""sdxl"", ""adversarial diffusion distillation"", ""arxiv:2412.02030"", ""base_model:ByteDance/Hyper-SD"", ""base_model:finetune:ByteDance/Hyper-SD"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- tianweiy/DMD2\n- ByteDance/Hyper-SD\n- stabilityai/stable-diffusion-xl-base-1.0\nlibrary_name: diffusers\npipeline_tag: text-to-image\ntags:\n- text-to-image\n- stable-diffusion\n- sdxl\n- adversarial diffusion distillation"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/banner.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nitrosd-realism_comfyui.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nitrosd-realism_unet.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nitrosd-vibrant_comfyui.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='nitrosd-vibrant_unet.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ChenDY/NitroFusion_1step_T2I"", ""lawwantsin/ChenDY-NitroFusion"", ""Jasondwqdqw/ChenDY-NitroFusion"", ""fatbeewan/ChenDY-NitroFusion"", ""danuc/ILikeAI"", ""Shandin/ChenDY-NitroFusion"", ""Helonx/ChenDY-NitroFusion"", ""Kruderis/ChenDY-NitroFusion"", ""FlappyMeese/NitroFusion_1step_T2I""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-06 23:14:58+00:00"", ""cardData"": ""base_model:\n- tianweiy/DMD2\n- ByteDance/Hyper-SD\n- stabilityai/stable-diffusion-xl-base-1.0\nlibrary_name: diffusers\npipeline_tag: text-to-image\ntags:\n- text-to-image\n- stable-diffusion\n- sdxl\n- adversarial diffusion distillation"", ""transformersInfo"": null, ""_id"": ""674a58a90c9aadbd95b19b5f"", ""modelId"": ""ChenDY/NitroFusion"", ""usedStorage"": 26004602125}",1,,0,,0,,0,,0,"ChenDY/NitroFusion_1step_T2I, FlappyMeese/NitroFusion_1step_T2I, Helonx/ChenDY-NitroFusion, Jasondwqdqw/ChenDY-NitroFusion, Kruderis/ChenDY-NitroFusion, Shandin/ChenDY-NitroFusion, danuc/ILikeAI, fatbeewan/ChenDY-NitroFusion, huggingface/InferenceSupport/discussions/new?title=ChenDY/NitroFusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BChenDY%2FNitroFusion%5D(%2FChenDY%2FNitroFusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, lawwantsin/ChenDY-NitroFusion",10
|
Inkpunk-Diffusion_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Envvi/Inkpunk-Diffusion,"---
|
| 3 |
+
license: creativeml-openrail-m
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- stable-diffusion
|
| 8 |
+
- text-to-image
|
| 9 |
+
- diffusers
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# Inkpunk Diffusion
|
| 13 |
+
|
| 14 |
+
Finetuned Stable Diffusion model trained on dreambooth. Vaguely inspired by Gorillaz, FLCL, and Yoji Shinkawa. Use **_nvinkpunk_** in your prompts.
|
| 15 |
+
|
| 16 |
+
# Gradio
|
| 17 |
+
|
| 18 |
+
We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Inkpunk-Diffusion:
|
| 19 |
+
[](https://huggingface.co/spaces/akhaliq/Inkpunk-Diffusion)
|
| 20 |
+
|
| 21 |
+
# Sample images
|
| 22 |
+

|
| 23 |
+
","{""id"": ""Envvi/Inkpunk-Diffusion"", ""author"": ""Envvi"", ""sha"": ""b491aaca6d312daf751e76dbf2b3eedf8cb91c7b"", ""last_modified"": ""2022-11-29 16:31:21+00:00"", ""created_at"": ""2022-11-25 06:06:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1198, ""downloads_all_time"": null, ""likes"": 983, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""stable-diffusion"", ""text-to-image"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- diffusers"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Inkpunk-Diffusion-v2.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='args.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='inkpunk-diffusion-v1.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='inkpunk-v1-samples.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='inkpunk-v2-samples-1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='inkpunk-v2-samples-2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""akhaliq/Inkpunk-Diffusion"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Daniela-C/6x_Image_diffusion"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""John6666/ToyWorld4"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""Jackflack09/finetuned_diffusion2"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""Zephyr65/Envvi-Inkpunk-Diffusion"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""Harshveer/Finetuned_Diffusion_Max"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/AnimeArtmodels2"", ""animeartstudio/AnimeModels"", ""karol99/Envvi-Inkpunk-Diffusion"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""Kvikontent/open-text2image-leaderboard"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""xkhaloda/Envvi-Inkpunk-Diffusion"", ""darkartsaibwd/Envvi-Inkpunk-Diffusion"", ""ygtrfed/pp-web-ui"", ""ivanmeyer/Finetuned_Diffusion_Max"", ""ennov8ion/Landscapes-models"", ""Shad0ws/ImageModelTestEnvironment"", ""sohoso/anime348756"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""K00B404/generate_many_models"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""Earendel/Inkpunk-Diffusion"", ""izumo092/test-7"", ""johnsonyue/Inkpunk-Diffusion"", ""vladocar/Inkpunk-Diffusion"", ""phanstudio/dreamlike-art-dreamlike-diffusion-1.0"", ""BerkTheBurrito/Envvi-Inkpunk-Diffusion-ForkbyBerk"", ""ARCjeanch/Envvi-Inkpunk-Diffusion"", ""swinwappy/Envvi-Inkpunk-Diffusion"", ""kiankiAN0099/Envvi-Inkpunk-Diffusion"", ""Mogrot/Envvi-Inkpunk-Diffusion"", ""VladBV/Envvi-Inkpunk-Diffusion"", ""Masterblah/Envvi-Inkpunk-Diffusion"", ""ISPA/Envvi-Inkpunk-Diffusion"", ""theblackcat/SpdrMn-Inkpunk-Diffusion"", ""ligalaita/Envvi-Inkpunk-Diffusion"", ""ennov8ion/abstractart-models"", ""ennov8ion/Scifiart-Models"", ""ennov8ion/interior-models"", ""ennov8ion/room-interior-models"", ""animeartstudio/AnimeArtModels1"", ""Yntec/top_100_diffusion"", ""SENSEI-FF/Envvi-Inkpunk-Diffusion""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2022-11-29 16:31:21+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\n- diffusers"", ""transformersInfo"": null, ""_id"": ""63805b5a54b1953f5341240a"", ""modelId"": ""Envvi/Inkpunk-Diffusion"", ""usedStorage"": 28191407827}",0,,0,"https://huggingface.co/jepz/isaac, https://huggingface.co/ephmecx/zws",2,,0,,0,"Daniela-C/6x_Image_diffusion, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/ToyWorld4, John6666/hfd_test_nostopbutton, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, akhaliq/Inkpunk-Diffusion, huggingface/InferenceSupport/discussions/new?title=Envvi/Inkpunk-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BEnvvi%2FInkpunk-Diffusion%5D(%2FEnvvi%2FInkpunk-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",14
|
Lag-Llama_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
time-series-foundation-models/Lag-Llama,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
tags:
|
| 5 |
+
- time series
|
| 6 |
+
- forecasting
|
| 7 |
+
- pretrained models
|
| 8 |
+
- foundation models
|
| 9 |
+
- time series foundation models
|
| 10 |
+
- time-series
|
| 11 |
+
pipeline_tag: time-series-forecasting
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
# Lag-Llama: Towards Foundation Models for Probabilistic Time Series Forecasting
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
|
| 18 |
+
Lag-Llama is the <b>first open-source foundation model for time series forecasting</b>!
|
| 19 |
+
|
| 20 |
+
[[Tweet Thread](https://twitter.com/arjunashok37/status/1755261111233114165)]
|
| 21 |
+
|
| 22 |
+
[[Model Weights](https://huggingface.co/time-series-foundation-models/Lag-Llama)] [[Colab Demo 1: Zero-Shot Forecasting](https://colab.research.google.com/drive/1DRAzLUPxsd-0r8b-o4nlyFXrjw_ZajJJ?usp=sharing)] [[Colab Demo 2: (Preliminary Finetuning)](https://colab.research.google.com/drive/1uvTmh-pe1zO5TeaaRVDdoEWJ5dFDI-pA?usp=sharing)]
|
| 23 |
+
|
| 24 |
+
[[Paper](https://arxiv.org/abs/2310.08278)]
|
| 25 |
+
|
| 26 |
+
[[Video](https://www.youtube.com/watch?v=Mf2FOzDPxck)]
|
| 27 |
+
____
|
| 28 |
+
|
| 29 |
+
<b>Updates</b>:
|
| 30 |
+
|
| 31 |
+
* **16-Apr-2024**: Released pretraining and finetuning scripts to replicate the experiments in the paper. See [Reproducing Experiments in the Paper](https://github.com/time-series-foundation-models/lag-llama?tab=readme-ov-file#reproducing-experiments-in-the-paper) for details.
|
| 32 |
+
* **9-Apr-2024**: We have released a 15-minute video 🎥 on Lag-Llama on [YouTube](https://www.youtube.com/watch?v=Mf2FOzDPxck).
|
| 33 |
+
* **5-Apr-2024**: Added a [section](https://colab.research.google.com/drive/1DRAzLUPxsd-0r8b-o4nlyFXrjw_ZajJJ?authuser=1#scrollTo=Mj9LXMpJ01d7&line=6&uniqifier=1) in Colab Demo 1 on the importance of tuning the context length for zero-shot forecasting. Added a [best practices section](https://github.com/time-series-foundation-models/lag-llama?tab=readme-ov-file#best-practices) in the README; added recommendations for finetuning. These recommendations will be demonstrated with an example in [Colab Demo 2](https://colab.research.google.com/drive/1uvTmh-pe1zO5TeaaRVDdoEWJ5dFDI-pA?usp=sharing) soon.
|
| 34 |
+
* **4-Apr-2024**: We have updated our requirements file with new versions of certain packages. Please update/recreate your environments if you have previously used the code locally.
|
| 35 |
+
* **7-Mar-2024**: We have released a preliminary [Colab Demo 2](https://colab.research.google.com/drive/1uvTmh-pe1zO5TeaaRVDdoEWJ5dFDI-pA?usp=sharing) for finetuning. Please note this is a preliminary tutorial. We recommend taking a look at the best practices if you are finetuning the model or using it for benchmarking.
|
| 36 |
+
* **17-Feb-2024**: We have released a new updated [Colab Demo 1](https://colab.research.google.com/drive/1DRAzLUPxsd-0r8b-o4nlyFXrjw_ZajJJ?usp=sharing) for zero-shot forecasting that shows how one can load time series of different formats.
|
| 37 |
+
* **7-Feb-2024**: We released Lag-Llama, with open-source model checkpoints and a Colab Demo for zero-shot forecasting.
|
| 38 |
+
|
| 39 |
+
____
|
| 40 |
+
|
| 41 |
+
**Current Features**:
|
| 42 |
+
|
| 43 |
+
💫 <b>Zero-shot forecasting</b> on a dataset of <b>any frequency</b> for <b>any prediction length</b>, using <a href=""https://colab.research.google.com/drive/1DRAzLUPxsd-0r8b-o4nlyFXrjw_ZajJJ?usp=sharing"" target=""_blank"">Colab Demo 1.</a><br/>
|
| 44 |
+
|
| 45 |
+
💫 <b>Finetuning</b> on a dataset using [Colab Demo 2](https://colab.research.google.com/drive/1uvTmh-pe1zO5TeaaRVDdoEWJ5dFDI-pA?usp=sharing).
|
| 46 |
+
|
| 47 |
+
💫 <b>Reproducing</b> experiments in the paper using the released scripts. See [Reproducing Experiments in the Paper](https://github.com/time-series-foundation-models/lag-llama?tab=readme-ov-file#reproducing-experiments-in-the-paper) for details.
|
| 48 |
+
|
| 49 |
+
**Note**: Please see the [best practices section](https://github.com/time-series-foundation-models/lag-llama?tab=readme-ov-file#best-practices) when using the model for zero-shot prediction and finetuning.
|
| 50 |
+
|
| 51 |
+
____
|
| 52 |
+
|
| 53 |
+
## Reproducing Experiments in the Paper
|
| 54 |
+
|
| 55 |
+
To replicate the pretraining setup used in the paper, please see [the pretraining script](scripts/pretrain.sh). Once a model is pretrained, instructions to finetune it with the setup in the paper can be found in [the finetuning script](scripts/finetune.sh).
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
## Best Practices
|
| 59 |
+
|
| 60 |
+
Here are some general tips in using Lag-Llama.
|
| 61 |
+
<!-- We recommend reading the [paper](https://arxiv.org/abs/2310.08278) for all details about the model. -->
|
| 62 |
+
|
| 63 |
+
### General Information
|
| 64 |
+
|
| 65 |
+
* Lag-Llama is a **probabilistic** forecasting model trained to output a probability distribution for each timestep to be predicted. For your own specific use-case, we would recommend benchmarking the zero-shot performance of the model on your data first, and then finetuning if necessary. As we show in our paper, Lag-Llama has strong zero-shot capabilities, but performs best when finetuned. The more data you finetune on, the better. For specific tips on applying on model zero-shot or on finetuning, please refer to the sections below.
|
| 66 |
+
|
| 67 |
+
#### Zero-Shot Forecasting
|
| 68 |
+
|
| 69 |
+
* Importantly, we recommend trying different **context lengths** (starting from $32$ which it was trained on) and identifying what works best for your data. As we show in [this section of the zero-shot forecasting demo](https://colab.research.google.com/drive/1DRAzLUPxsd-0r8b-o4nlyFXrjw_ZajJJ?authuser=1#scrollTo=Mj9LXMpJ01d7&line=6&uniqifier=1), the model's zero-shot performance improves as the context length is increased, until a certain context length which may be specific to your data. Further, we recommend enabling RoPE scaling for the model to work well with context lengths larger than what it was trained on.
|
| 70 |
+
|
| 71 |
+
#### Fine-Tuning
|
| 72 |
+
|
| 73 |
+
If you are trying to **benchmark** the performance of the model under finetuning, or trying to obtain maximum performance from the model:
|
| 74 |
+
|
| 75 |
+
* We recommend tuning two important hyperparameters for each dataset that you finetune on: the **context length** (suggested values: $32$, $64$, $128$, $256$, $512$, $1024$) and the **learning rate** (suggested values: $10^{-2}$, $5 * 10^{-3}$, $10^{-3}$, $5 * 10^{-3}$, $1 * 10^{-4}$, $5 * 10^{-4}$).
|
| 76 |
+
* We also highly recommend using a validation split of your dataset to early stop your model, with an early stopping patience of 50 epochs.
|
| 77 |
+
|
| 78 |
+
## Contact
|
| 79 |
+
|
| 80 |
+
We are dedicated to ensuring the reproducility of our results, and would be happy to help clarify questions about benchmarking our model or about the experiments in the paper.
|
| 81 |
+
The quickest way to reach us would be by email. Please email **both**:
|
| 82 |
+
1. [Arjun Ashok](https://ashok-arjun.github.io/) - arjun [dot] ashok [at] servicenow [dot] com
|
| 83 |
+
2. [Kashif Rasul](https://scholar.google.de/citations?user=cfIrwmAAAAAJ&hl=en) - kashif [dot] rasul [at] gmail [dot] com
|
| 84 |
+
|
| 85 |
+
If you have questions about the model usage (or) code (or) have specific errors (eg. using it with your own dataset), it would be best to create an issue in the GitHub repository.
|
| 86 |
+
|
| 87 |
+
## Citing this work
|
| 88 |
+
|
| 89 |
+
Please use the following Bibtex entry to cite Lag-Llama.
|
| 90 |
+
|
| 91 |
+
```
|
| 92 |
+
@misc{rasul2024lagllama,
|
| 93 |
+
title={Lag-Llama: Towards Foundation Models for Probabilistic Time Series Forecasting},
|
| 94 |
+
author={Kashif Rasul and Arjun Ashok and Andrew Robert Williams and Hena Ghonia and Rishika Bhagwatkar and Arian Khorasani and Mohammad Javad Darvishi Bayazi and George Adamopoulos and Roland Riachi and Nadhir Hassen and Marin Biloš and Sahil Garg and Anderson Schneider and Nicolas Chapados and Alexandre Drouin and Valentina Zantedeschi and Yuriy Nevmyvaka and Irina Rish},
|
| 95 |
+
year={2024},
|
| 96 |
+
eprint={2310.08278},
|
| 97 |
+
archivePrefix={arXiv},
|
| 98 |
+
primaryClass={cs.LG}
|
| 99 |
+
}
|
| 100 |
+
```","{""id"": ""time-series-foundation-models/Lag-Llama"", ""author"": ""time-series-foundation-models"", ""sha"": ""72dcfc29da106acfe38250a60f4ae29d1e56a3d9"", ""last_modified"": ""2024-05-14 12:41:49+00:00"", ""created_at"": ""2024-02-07 10:33:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 229, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""time series"", ""forecasting"", ""pretrained models"", ""foundation models"", ""time series foundation models"", ""time-series"", ""time-series-forecasting"", ""arxiv:2310.08278"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""time-series-forecasting"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0\npipeline_tag: time-series-forecasting\ntags:\n- time series\n- forecasting\n- pretrained models\n- foundation models\n- time series foundation models\n- time-series"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/lagllama.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='lag-llama.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 2449299}, ""total"": 2449299}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-14 12:41:49+00:00"", ""cardData"": ""license: apache-2.0\npipeline_tag: time-series-forecasting\ntags:\n- time series\n- forecasting\n- pretrained models\n- foundation models\n- time series foundation models\n- time-series"", ""transformersInfo"": null, ""_id"": ""65c35c9466c09b58a3834409"", ""modelId"": ""time-series-foundation-models/Lag-Llama"", ""usedStorage"": 39294567}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=time-series-foundation-models/Lag-Llama&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btime-series-foundation-models%2FLag-Llama%5D(%2Ftime-series-foundation-models%2FLag-Llama)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Leffa_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
franciszzj/Leffa,"---
|
| 3 |
+
license: mit
|
| 4 |
+
pipeline_tag: image-to-image
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
# *Leffa*: Learning Flow Fields in Attention for Controllable Person Image Generation
|
| 8 |
+
|
| 9 |
+
[📚 Paper](https://arxiv.org/abs/2412.08486) - [🤖 Code](https://github.com/franciszzj/Leffa) - [🔥 Demo](https://huggingface.co/spaces/franciszzj/Leffa) - [🤗 Model](https://huggingface.co/franciszzj/Leffa)
|
| 10 |
+
|
| 11 |
+
Star ⭐ us if you like it!
|
| 12 |
+
|
| 13 |
+
## News
|
| 14 |
+
- 09/Jan/2025. Inference defaults to float16, generating an image in 6 seconds (on A100).
|
| 15 |
+
- 02/Jan/2025. Update the mask generator to improve results. Add ref unet acceleration, boosting prediction speed by 30%. Include more controls in Advanced Options to enhance user experience. Enable intermediate result output for easier development. Enjoy using it!
|
| 16 |
+
- 18/Dec/2024. Thanks to @[StartHua](https://github.com/StartHua) for integrating Leffa into ComfyUI! Here is the [repo](https://github.com/StartHua/Comfyui_leffa)!
|
| 17 |
+
- 16/Dec/2024. The virtual try-on [model](https://huggingface.co/franciszzj/Leffa/blob/main/virtual_tryon_dc.pth) trained on DressCode is released.
|
| 18 |
+
- 12/Dec/2024. The HuggingFace [demo](https://huggingface.co/spaces/franciszzj/Leffa) and [models](https://huggingface.co/franciszzj/Leffa) (virtual try-on model trained on VITON-HD and pose transfer model trained on DeepFashion) are released.
|
| 19 |
+
- 11/Dec/2024. The [arXiv](https://arxiv.org/abs/2412.08486) version of the paper is released.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
*[Leffa](https://en.wiktionary.org/wiki/leffa)* is a unified framework for controllable person image generation that enables precise manipulation of both appearance (i.e., virtual try-on) and pose (i.e., pose transfer).
|
| 23 |
+
|
| 24 |
+
<div align=""center"">
|
| 25 |
+
<img src=""https://huggingface.co/franciszzj/Leffa/resolve/main/assets/teaser.png"" width=""100%"" height=""100%""/>
|
| 26 |
+
</div>
|
| 27 |
+
|
| 28 |
+
## Abstract
|
| 29 |
+
Controllable person image generation aims to generate a person image conditioned on reference images, allowing precise control over the person’s appearance or pose. However, prior methods often distort fine-grained textural details from the reference image, despite achieving high overall image quality. We attribute these distortions to inadequate attention to corresponding regions in the reference image. To address this, we thereby propose **le**arning **f**low **f**ields in **a**ttention (***Leffa***), which explicitly guides the target query to attend to the correct reference key in the attention layer during training. Specifically, it is realized via a regularization loss on top of the attention map within a diffusion-based baseline. Our extensive experiments show that *Leffa* achieves state-of-the-art performance in controlling appearance (virtual try-on) and pose (pose transfer), significantly reducing fine-grained detail distortion while maintaining high image quality. Additionally, we show that our loss is model-agnostic and can be used to improve the performance of other diffusion models.
|
| 30 |
+
|
| 31 |
+
## Method
|
| 32 |
+
An overview of our *Leffa* training pipeline for controllable person image generation. The left is our diffusion-based baseline; the right is our *Leffa* loss. Note that Isrc and Itgt are the same image during training.
|
| 33 |
+
|
| 34 |
+
<div align=""center"">
|
| 35 |
+
<img src=""https://huggingface.co/franciszzj/Leffa/resolve/main/assets/leffa.png"" width=""100%"" height=""100%""/>
|
| 36 |
+
</div>
|
| 37 |
+
|
| 38 |
+
## Visualization
|
| 39 |
+
Qualitative visual results comparison with other methods. The input person image for the pose transfer is generated using our method in the virtual try-on. The visualization results demonstrate that our method not only generates high-quality images but also greatly reduces the distortion of fine-grained details.
|
| 40 |
+
|
| 41 |
+
<div align=""center"">
|
| 42 |
+
<img src=""https://huggingface.co/franciszzj/Leffa/resolve/main/assets/vis_result.png"" width=""100%"" height=""100%""/>
|
| 43 |
+
</div>
|
| 44 |
+
|
| 45 |
+
## Installation
|
| 46 |
+
Create a conda environment and install requirements:
|
| 47 |
+
```shell
|
| 48 |
+
conda create -n leffa python==3.10
|
| 49 |
+
conda activate leffa
|
| 50 |
+
cd Leffa
|
| 51 |
+
pip install -r requirements.txt
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
## Gradio App
|
| 55 |
+
Run locally:
|
| 56 |
+
```shell
|
| 57 |
+
python app.py
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
## Evaluation
|
| 61 |
+
We use this [code](https://github.com/franciszzj/VtonEval) for metric evaluation.
|
| 62 |
+
|
| 63 |
+
## Acknowledgement
|
| 64 |
+
Our code is based on [Diffusers](https://github.com/huggingface/diffusers) and [Transformers](https://github.com/huggingface/transformers).
|
| 65 |
+
We use [SCHP](https://github.com/GoGoDuck912/Self-Correction-Human-Parsing/tree/master) and [DensePose](https://github.com/facebookresearch/DensePose) to generate masks and densepose in our [Demo](https://huggingface.co/spaces/franciszzj/Leffa).
|
| 66 |
+
We also referred to the code of [IDM-VTON](https://github.com/yisol/IDM-VTON) and [CatVTON](https://github.com/Zheng-Chong/CatVTON).
|
| 67 |
+
|
| 68 |
+
## Citation
|
| 69 |
+
If you find our work helpful or inspiring, please feel free to cite it.
|
| 70 |
+
```
|
| 71 |
+
@article{zhou2024learning,
|
| 72 |
+
title={Learning Flow Fields in Attention for Controllable Person Image Generation},
|
| 73 |
+
author={Zhou, Zijian and Liu, Shikun and Han, Xiao and Liu, Haozhe and Ng, Kam Woh and Xie, Tian and Cong, Yuren and Li, Hang and Xu, Mengmeng and Pérez-Rúa, Juan-Manuel and Patel, Aditya and Xiang, Tao and Shi, Miaojing and He, Sen},
|
| 74 |
+
journal={arXiv preprint arXiv:2412.08486},
|
| 75 |
+
year={2024},
|
| 76 |
+
}
|
| 77 |
+
```","{""id"": ""franciszzj/Leffa"", ""author"": ""franciszzj"", ""sha"": ""e90e94fc85e4cc8efa35b40cc1b502451a42a583"", ""last_modified"": ""2025-01-09 16:05:50+00:00"", ""created_at"": ""2024-12-10 17:48:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 309, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""onnx"", ""image-to-image"", ""arxiv:2412.08486"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""image-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: mit\npipeline_tag: image-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/leffa.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/teaser.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/vis_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='densepose/Base-DensePose-RCNN-FPN.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='densepose/densepose_rcnn_R_50_FPN_s1x.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='densepose/model_final_162be9.pkl', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/garment/01449_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/garment/01486_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/garment/01853_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/garment/02070_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/garment/03553_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person1/01350_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person1/01376_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person1/01416_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person1/05976_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person1/06094_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person2/01850_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person2/01875_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person2/02532_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person2/02902_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='examples/person2/05346_00.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='humanparsing/parsing_atr.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='humanparsing/parsing_lip.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openpose/body_pose_model.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pose_transfer.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='schp/exp-schp-201908261155-lip.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='schp/exp-schp-201908301523-atr.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='stable-diffusion-inpainting/scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='stable-diffusion-inpainting/unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='stable-diffusion-inpainting/vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='stable-diffusion-xl-1.0-inpainting-0.1/scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='stable-diffusion-xl-1.0-inpainting-0.1/unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='stable-diffusion-xl-1.0-inpainting-0.1/vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='virtual_tryon.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='virtual_tryon_dc.pth', size=None, blob_id=None, lfs=None)""], ""spaces"": [""franciszzj/Leffa"", ""ginipick/FitGen"", ""Nymbo/Leffa"", ""yidasanqian/Leffa"", ""zyflzxy/Leffa"", ""yusufsinan/yapayzeka"", ""sumeetemail27/clothnew"", ""jeremymiku2/Leffa"", ""vimalvskl7/Leffa"", ""ombrajoe/DualMe""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-09 16:05:50+00:00"", ""cardData"": ""license: mit\npipeline_tag: image-to-image"", ""transformersInfo"": null, ""_id"": ""67587ee70df2bc4dd6c8034a"", ""modelId"": ""franciszzj/Leffa"", ""usedStorage"": 36843788677}",0,https://huggingface.co/elric8475/leffa,1,,0,,0,,0,"Nymbo/Leffa, franciszzj/Leffa, ginipick/FitGen, huggingface/InferenceSupport/discussions/new?title=franciszzj/Leffa&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfranciszzj%2FLeffa%5D(%2Ffranciszzj%2FLeffa)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jeremymiku2/Leffa, ombrajoe/DualMe, sumeetemail27/clothnew, vimalvskl7/Leffa, yidasanqian/Leffa, yusufsinan/yapayzeka, zyflzxy/Leffa",11
|
| 78 |
+
elric8475/leffa,"---
|
| 79 |
+
base_model:
|
| 80 |
+
- franciszzj/Leffa
|
| 81 |
+
---","{""id"": ""elric8475/leffa"", ""author"": ""elric8475"", ""sha"": ""6f3ffa7605a987d96eee5701183081a61e43773a"", ""last_modified"": ""2024-12-30 02:17:58+00:00"", ""created_at"": ""2024-12-30 02:15:45+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""base_model:franciszzj/Leffa"", ""base_model:finetune:franciszzj/Leffa"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- franciszzj/Leffa"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-30 02:17:58+00:00"", ""cardData"": ""base_model:\n- franciszzj/Leffa"", ""transformersInfo"": null, ""_id"": ""6772025191b36f3bbfb27da2"", ""modelId"": ""elric8475/leffa"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=elric8475/leffa&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Belric8475%2Fleffa%5D(%2Felric8475%2Fleffa)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Llama-2-7B-GGUF_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
TheBloke/Llama-2-7B-GGUF,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: llama2
|
| 6 |
+
tags:
|
| 7 |
+
- facebook
|
| 8 |
+
- meta
|
| 9 |
+
- pytorch
|
| 10 |
+
- llama
|
| 11 |
+
- llama-2
|
| 12 |
+
model_name: Llama 2 7B
|
| 13 |
+
base_model: meta-llama/Llama-2-7b-hf
|
| 14 |
+
inference: false
|
| 15 |
+
model_creator: Meta
|
| 16 |
+
model_type: llama
|
| 17 |
+
pipeline_tag: text-generation
|
| 18 |
+
prompt_template: '{prompt}
|
| 19 |
+
|
| 20 |
+
'
|
| 21 |
+
quantized_by: TheBloke
|
| 22 |
+
---
|
| 23 |
+
|
| 24 |
+
<!-- header start -->
|
| 25 |
+
<!-- 200823 -->
|
| 26 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 27 |
+
<img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 28 |
+
</div>
|
| 29 |
+
<div style=""display: flex; justify-content: space-between; width: 100%;"">
|
| 30 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-start;"">
|
| 31 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
|
| 32 |
+
</div>
|
| 33 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-end;"">
|
| 34 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
|
| 35 |
+
</div>
|
| 36 |
+
</div>
|
| 37 |
+
<div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
|
| 38 |
+
<hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
|
| 39 |
+
<!-- header end -->
|
| 40 |
+
|
| 41 |
+
# Llama 2 7B - GGUF
|
| 42 |
+
- Model creator: [Meta](https://huggingface.co/meta-llama)
|
| 43 |
+
- Original model: [Llama 2 7B](https://huggingface.co/meta-llama/Llama-2-7b-hf)
|
| 44 |
+
|
| 45 |
+
<!-- description start -->
|
| 46 |
+
## Description
|
| 47 |
+
|
| 48 |
+
This repo contains GGUF format model files for [Meta's Llama 2 7B](https://huggingface.co/meta-llama/Llama-2-7b-hf).
|
| 49 |
+
|
| 50 |
+
<!-- description end -->
|
| 51 |
+
<!-- README_GGUF.md-about-gguf start -->
|
| 52 |
+
### About GGUF
|
| 53 |
+
|
| 54 |
+
GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. GGUF offers numerous advantages over GGML, such as better tokenisation, and support for special tokens. It is also supports metadata, and is designed to be extensible.
|
| 55 |
+
|
| 56 |
+
Here is an incomplate list of clients and libraries that are known to support GGUF:
|
| 57 |
+
|
| 58 |
+
* [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option.
|
| 59 |
+
* [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration.
|
| 60 |
+
* [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling.
|
| 61 |
+
* [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration.
|
| 62 |
+
* [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection.
|
| 63 |
+
* [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration.
|
| 64 |
+
* [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server.
|
| 65 |
+
* [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.
|
| 66 |
+
* [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use.
|
| 67 |
+
|
| 68 |
+
<!-- README_GGUF.md-about-gguf end -->
|
| 69 |
+
<!-- repositories-available start -->
|
| 70 |
+
## Repositories available
|
| 71 |
+
|
| 72 |
+
* [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Llama-2-7B-AWQ)
|
| 73 |
+
* [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Llama-2-7B-GPTQ)
|
| 74 |
+
* [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Llama-2-7B-GGUF)
|
| 75 |
+
* [Meta's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/meta-llama/Llama-2-7b-hf)
|
| 76 |
+
<!-- repositories-available end -->
|
| 77 |
+
|
| 78 |
+
<!-- prompt-template start -->
|
| 79 |
+
## Prompt template: None
|
| 80 |
+
|
| 81 |
+
```
|
| 82 |
+
{prompt}
|
| 83 |
+
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
<!-- prompt-template end -->
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
<!-- compatibility_gguf start -->
|
| 90 |
+
## Compatibility
|
| 91 |
+
|
| 92 |
+
These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d36d5be95a0d9088b674dbb27354107221](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221)
|
| 93 |
+
|
| 94 |
+
They are also compatible with many third party UIs and libraries - please see the list at the top of this README.
|
| 95 |
+
|
| 96 |
+
## Explanation of quantisation methods
|
| 97 |
+
<details>
|
| 98 |
+
<summary>Click to see details</summary>
|
| 99 |
+
|
| 100 |
+
The new methods available are:
|
| 101 |
+
* GGML_TYPE_Q2_K - ""type-1"" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)
|
| 102 |
+
* GGML_TYPE_Q3_K - ""type-0"" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw.
|
| 103 |
+
* GGML_TYPE_Q4_K - ""type-1"" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.
|
| 104 |
+
* GGML_TYPE_Q5_K - ""type-1"" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw
|
| 105 |
+
* GGML_TYPE_Q6_K - ""type-0"" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw
|
| 106 |
+
|
| 107 |
+
Refer to the Provided Files table below to see what files use which methods, and how.
|
| 108 |
+
</details>
|
| 109 |
+
<!-- compatibility_gguf end -->
|
| 110 |
+
|
| 111 |
+
<!-- README_GGUF.md-provided-files start -->
|
| 112 |
+
## Provided files
|
| 113 |
+
|
| 114 |
+
| Name | Quant method | Bits | Size | Max RAM required | Use case |
|
| 115 |
+
| ---- | ---- | ---- | ---- | ---- | ----- |
|
| 116 |
+
| [llama-2-7b.Q2_K.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q2_K.gguf) | Q2_K | 2 | 2.83 GB| 5.33 GB | smallest, significant quality loss - not recommended for most purposes |
|
| 117 |
+
| [llama-2-7b.Q3_K_S.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q3_K_S.gguf) | Q3_K_S | 3 | 2.95 GB| 5.45 GB | very small, high quality loss |
|
| 118 |
+
| [llama-2-7b.Q3_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q3_K_M.gguf) | Q3_K_M | 3 | 3.30 GB| 5.80 GB | very small, high quality loss |
|
| 119 |
+
| [llama-2-7b.Q3_K_L.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q3_K_L.gguf) | Q3_K_L | 3 | 3.60 GB| 6.10 GB | small, substantial quality loss |
|
| 120 |
+
| [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) | Q4_0 | 4 | 3.83 GB| 6.33 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
|
| 121 |
+
| [llama-2-7b.Q4_K_S.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_K_S.gguf) | Q4_K_S | 4 | 3.86 GB| 6.36 GB | small, greater quality loss |
|
| 122 |
+
| [llama-2-7b.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_K_M.gguf) | Q4_K_M | 4 | 4.08 GB| 6.58 GB | medium, balanced quality - recommended |
|
| 123 |
+
| [llama-2-7b.Q5_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q5_0.gguf) | Q5_0 | 5 | 4.65 GB| 7.15 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
|
| 124 |
+
| [llama-2-7b.Q5_K_S.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q5_K_S.gguf) | Q5_K_S | 5 | 4.65 GB| 7.15 GB | large, low quality loss - recommended |
|
| 125 |
+
| [llama-2-7b.Q5_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q5_K_M.gguf) | Q5_K_M | 5 | 4.78 GB| 7.28 GB | large, very low quality loss - recommended |
|
| 126 |
+
| [llama-2-7b.Q6_K.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q6_K.gguf) | Q6_K | 6 | 5.53 GB| 8.03 GB | very large, extremely low quality loss |
|
| 127 |
+
| [llama-2-7b.Q8_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q8_0.gguf) | Q8_0 | 8 | 7.16 GB| 9.66 GB | very large, extremely low quality loss - not recommended |
|
| 128 |
+
|
| 129 |
+
**Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead.
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
<!-- README_GGUF.md-provided-files end -->
|
| 134 |
+
|
| 135 |
+
<!-- README_GGUF.md-how-to-download start -->
|
| 136 |
+
## How to download GGUF files
|
| 137 |
+
|
| 138 |
+
**Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file.
|
| 139 |
+
|
| 140 |
+
The following clients/libraries will automatically download models for you, providing a list of available models to choose from:
|
| 141 |
+
- LM Studio
|
| 142 |
+
- LoLLMS Web UI
|
| 143 |
+
- Faraday.dev
|
| 144 |
+
|
| 145 |
+
### In `text-generation-webui`
|
| 146 |
+
|
| 147 |
+
Under Download Model, you can enter the model repo: TheBloke/Llama-2-7B-GGUF and below it, a specific filename to download, such as: llama-2-7b.Q4_K_M.gguf.
|
| 148 |
+
|
| 149 |
+
Then click Download.
|
| 150 |
+
|
| 151 |
+
### On the command line, including multiple files at once
|
| 152 |
+
|
| 153 |
+
I recommend using the `huggingface-hub` Python library:
|
| 154 |
+
|
| 155 |
+
```shell
|
| 156 |
+
pip3 install huggingface-hub>=0.17.1
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
Then you can download any individual model file to the current directory, at high speed, with a command like this:
|
| 160 |
+
|
| 161 |
+
```shell
|
| 162 |
+
huggingface-cli download TheBloke/Llama-2-7B-GGUF llama-2-7b.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False
|
| 163 |
+
```
|
| 164 |
+
|
| 165 |
+
<details>
|
| 166 |
+
<summary>More advanced huggingface-cli download usage</summary>
|
| 167 |
+
|
| 168 |
+
You can also download multiple files at once with a pattern:
|
| 169 |
+
|
| 170 |
+
```shell
|
| 171 |
+
huggingface-cli download TheBloke/Llama-2-7B-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf'
|
| 172 |
+
```
|
| 173 |
+
|
| 174 |
+
For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli).
|
| 175 |
+
|
| 176 |
+
To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`:
|
| 177 |
+
|
| 178 |
+
```shell
|
| 179 |
+
pip3 install hf_transfer
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`:
|
| 183 |
+
|
| 184 |
+
```shell
|
| 185 |
+
HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Llama-2-7B-GGUF llama-2-7b.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
Windows CLI users: Use `set HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1` before running the download command.
|
| 189 |
+
</details>
|
| 190 |
+
<!-- README_GGUF.md-how-to-download end -->
|
| 191 |
+
|
| 192 |
+
<!-- README_GGUF.md-how-to-run start -->
|
| 193 |
+
## Example `llama.cpp` command
|
| 194 |
+
|
| 195 |
+
Make sure you are using `llama.cpp` from commit [d0cee0d36d5be95a0d9088b674dbb27354107221](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later.
|
| 196 |
+
|
| 197 |
+
```shell
|
| 198 |
+
./main -ngl 32 -m llama-2-7b.Q4_K_M.gguf --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 -p ""{prompt}""
|
| 199 |
+
```
|
| 200 |
+
|
| 201 |
+
Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.
|
| 202 |
+
|
| 203 |
+
Change `-c 4096` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically.
|
| 204 |
+
|
| 205 |
+
If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
|
| 206 |
+
|
| 207 |
+
For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
|
| 208 |
+
|
| 209 |
+
## How to run in `text-generation-webui`
|
| 210 |
+
|
| 211 |
+
Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md).
|
| 212 |
+
|
| 213 |
+
## How to run from Python code
|
| 214 |
+
|
| 215 |
+
You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries.
|
| 216 |
+
|
| 217 |
+
### How to load this model from Python using ctransformers
|
| 218 |
+
|
| 219 |
+
#### First install the package
|
| 220 |
+
|
| 221 |
+
```bash
|
| 222 |
+
# Base ctransformers with no GPU acceleration
|
| 223 |
+
pip install ctransformers>=0.2.24
|
| 224 |
+
# Or with CUDA GPU acceleration
|
| 225 |
+
pip install ctransformers[cuda]>=0.2.24
|
| 226 |
+
# Or with ROCm GPU acceleration
|
| 227 |
+
CT_HIPBLAS=1 pip install ctransformers>=0.2.24 --no-binary ctransformers
|
| 228 |
+
# Or with Metal GPU acceleration for macOS systems
|
| 229 |
+
CT_METAL=1 pip install ctransformers>=0.2.24 --no-binary ctransformers
|
| 230 |
+
```
|
| 231 |
+
|
| 232 |
+
#### Simple example code to load one of these GGUF models
|
| 233 |
+
|
| 234 |
+
```python
|
| 235 |
+
from ctransformers import AutoModelForCausalLM
|
| 236 |
+
|
| 237 |
+
# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
|
| 238 |
+
llm = AutoModelForCausalLM.from_pretrained(""TheBloke/Llama-2-7B-GGUF"", model_file=""llama-2-7b.Q4_K_M.gguf"", model_type=""llama"", gpu_layers=50)
|
| 239 |
+
|
| 240 |
+
print(llm(""AI is going to""))
|
| 241 |
+
```
|
| 242 |
+
|
| 243 |
+
## How to use with LangChain
|
| 244 |
+
|
| 245 |
+
Here's guides on using llama-cpp-python or ctransformers with LangChain:
|
| 246 |
+
|
| 247 |
+
* [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp)
|
| 248 |
+
* [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers)
|
| 249 |
+
|
| 250 |
+
<!-- README_GGUF.md-how-to-run end -->
|
| 251 |
+
|
| 252 |
+
<!-- footer start -->
|
| 253 |
+
<!-- 200823 -->
|
| 254 |
+
## Discord
|
| 255 |
+
|
| 256 |
+
For further support, and discussions on these models and AI in general, join us at:
|
| 257 |
+
|
| 258 |
+
[TheBloke AI's Discord server](https://discord.gg/theblokeai)
|
| 259 |
+
|
| 260 |
+
## Thanks, and how to contribute
|
| 261 |
+
|
| 262 |
+
Thanks to the [chirper.ai](https://chirper.ai) team!
|
| 263 |
+
|
| 264 |
+
Thanks to Clay from [gpus.llm-utils.org](llm-utils)!
|
| 265 |
+
|
| 266 |
+
I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
|
| 267 |
+
|
| 268 |
+
If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
|
| 269 |
+
|
| 270 |
+
Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
|
| 271 |
+
|
| 272 |
+
* Patreon: https://patreon.com/TheBlokeAI
|
| 273 |
+
* Ko-Fi: https://ko-fi.com/TheBlokeAI
|
| 274 |
+
|
| 275 |
+
**Special thanks to**: Aemon Algiz.
|
| 276 |
+
|
| 277 |
+
**Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
Thank you to all my generous patrons and donaters!
|
| 281 |
+
|
| 282 |
+
And thank you again to a16z for their generous grant.
|
| 283 |
+
|
| 284 |
+
<!-- footer end -->
|
| 285 |
+
|
| 286 |
+
<!-- original-model-card start -->
|
| 287 |
+
# Original model card: Meta's Llama 2 7B
|
| 288 |
+
|
| 289 |
+
# **Llama 2**
|
| 290 |
+
Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
|
| 291 |
+
|
| 292 |
+
## Model Details
|
| 293 |
+
*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.*
|
| 294 |
+
|
| 295 |
+
Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.
|
| 296 |
+
|
| 297 |
+
**Model Developers** Meta
|
| 298 |
+
|
| 299 |
+
**Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.
|
| 300 |
+
|
| 301 |
+
**Input** Models input text only.
|
| 302 |
+
|
| 303 |
+
**Output** Models generate text only.
|
| 304 |
+
|
| 305 |
+
**Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
||Training Data|Params|Content Length|GQA|Tokens|LR|
|
| 309 |
+
|---|---|---|---|---|---|---|
|
| 310 |
+
|Llama 2|*A new mix of publicly available online data*|7B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
|
| 311 |
+
|Llama 2|*A new mix of publicly available online data*|13B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
|
| 312 |
+
|Llama 2|*A new mix of publicly available online data*|70B|4k|✔|2.0T|1.5 x 10<sup>-4</sup>|
|
| 313 |
+
|
| 314 |
+
*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.
|
| 315 |
+
|
| 316 |
+
**Model Dates** Llama 2 was trained between January 2023 and July 2023.
|
| 317 |
+
|
| 318 |
+
**Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
|
| 319 |
+
|
| 320 |
+
**License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
|
| 321 |
+
|
| 322 |
+
**Research Paper** [""Llama-2: Open Foundation and Fine-tuned Chat Models""](arxiv.org/abs/2307.09288)
|
| 323 |
+
|
| 324 |
+
## Intended Use
|
| 325 |
+
**Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
|
| 326 |
+
|
| 327 |
+
To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212).
|
| 328 |
+
|
| 329 |
+
**Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.
|
| 330 |
+
|
| 331 |
+
## Hardware and Software
|
| 332 |
+
**Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
|
| 333 |
+
|
| 334 |
+
**Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.
|
| 335 |
+
|
| 336 |
+
||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)|
|
| 337 |
+
|---|---|---|---|
|
| 338 |
+
|Llama 2 7B|184320|400|31.22|
|
| 339 |
+
|Llama 2 13B|368640|400|62.44|
|
| 340 |
+
|Llama 2 70B|1720320|400|291.42|
|
| 341 |
+
|Total|3311616||539.00|
|
| 342 |
+
|
| 343 |
+
**CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
|
| 344 |
+
|
| 345 |
+
## Training Data
|
| 346 |
+
**Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
|
| 347 |
+
|
| 348 |
+
**Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.
|
| 349 |
+
|
| 350 |
+
## Evaluation Results
|
| 351 |
+
|
| 352 |
+
In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.
|
| 353 |
+
|
| 354 |
+
|Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval|
|
| 355 |
+
|---|---|---|---|---|---|---|---|---|---|
|
| 356 |
+
|Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9|
|
| 357 |
+
|Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9|
|
| 358 |
+
|Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7|
|
| 359 |
+
|Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6|
|
| 360 |
+
|Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3|
|
| 361 |
+
|Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1|
|
| 362 |
+
|Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**|
|
| 363 |
+
|
| 364 |
+
**Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.
|
| 365 |
+
|
| 366 |
+
|||TruthfulQA|Toxigen|
|
| 367 |
+
|---|---|---|---|
|
| 368 |
+
|Llama 1|7B|27.42|23.00|
|
| 369 |
+
|Llama 1|13B|41.74|23.08|
|
| 370 |
+
|Llama 1|33B|44.19|22.57|
|
| 371 |
+
|Llama 1|65B|48.71|21.77|
|
| 372 |
+
|Llama 2|7B|33.29|**21.25**|
|
| 373 |
+
|Llama 2|13B|41.86|26.10|
|
| 374 |
+
|Llama 2|70B|**50.18**|24.60|
|
| 375 |
+
|
| 376 |
+
**Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
|||TruthfulQA|Toxigen|
|
| 380 |
+
|---|---|---|---|
|
| 381 |
+
|Llama-2-Chat|7B|57.04|**0.00**|
|
| 382 |
+
|Llama-2-Chat|13B|62.18|**0.00**|
|
| 383 |
+
|Llama-2-Chat|70B|**64.14**|0.01|
|
| 384 |
+
|
| 385 |
+
**Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above.
|
| 386 |
+
|
| 387 |
+
## Ethical Considerations and Limitations
|
| 388 |
+
Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.
|
| 389 |
+
|
| 390 |
+
Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide)
|
| 391 |
+
|
| 392 |
+
## Reporting Issues
|
| 393 |
+
Please report any software “bug,” or other problems with the models through one of the following means:
|
| 394 |
+
- Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
|
| 395 |
+
- Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
|
| 396 |
+
- Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
|
| 397 |
+
|
| 398 |
+
## Llama Model Index
|
| 399 |
+
|Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf|
|
| 400 |
+
|---|---|---|---|---|
|
| 401 |
+
|7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)|
|
| 402 |
+
|13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)|
|
| 403 |
+
|70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
|
| 404 |
+
|
| 405 |
+
<!-- original-model-card end -->
|
| 406 |
+
","{""id"": ""TheBloke/Llama-2-7B-GGUF"", ""author"": ""TheBloke"", ""sha"": ""b4e04e128f421c93a5f1e34ac4d7ca9b0af47b80"", ""last_modified"": ""2023-10-24 07:32:45+00:00"", ""created_at"": ""2023-09-04 15:53:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 19841, ""downloads_all_time"": null, ""likes"": 192, ""library_name"": ""transformers"", ""gguf"": {""total"": 6738415616, ""architecture"": ""llama"", ""context_length"": 4096}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""gguf"", ""llama"", ""facebook"", ""meta"", ""pytorch"", ""llama-2"", ""text-generation"", ""en"", ""arxiv:2307.09288"", ""base_model:meta-llama/Llama-2-7b-hf"", ""base_model:quantized:meta-llama/Llama-2-7b-hf"", ""license:llama2"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: meta-llama/Llama-2-7b-hf\nlanguage:\n- en\nlicense: llama2\nmodel_name: Llama 2 7B\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\ninference: false\nmodel_creator: Meta\nmodel_type: llama\nprompt_template: '{prompt}\n\n '\nquantized_by: TheBloke"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""model_type"": ""llama""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='USE_POLICY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q2_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q3_K_L.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q3_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q3_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q4_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q4_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q4_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q5_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q5_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q5_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q6_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-7b.Q8_0.gguf', size=None, blob_id=None, lfs=None)""], ""spaces"": [""JohnSmith9982/ChuanhuChatGPT"", ""MILVLG/IMPChat"", ""JohnSmith9982/ChuanhuChatGPT_Beta"", ""ominous94/ChuanhuChatGPT"", ""FISHYA/ChuanhuChatGPT"", ""markqiu/prinvest_mate"", ""data-catering/document-answering"", ""cryptokael/ChuanhuChatGPT"", ""Kate0816/ChuanhuChatGPT"", ""qiao125/ChuanhuChatGPT"", ""Kate0816/ChuanhuChatGPT1121"", ""m1n9k7/chat-llama2-7b"", ""willdas/ChuanhuChatGPT"", ""cn208138/ChuanhuChatGPT"", ""pscpeng/ChuanhuChatGPT"", ""chiye/ChuanhuChatGPT"", ""ztYU/ChuanhuChatGPT"", ""mlike/ChuanhuChatGPT"", ""lihuaaa/ChuanhuChatGPT"", ""Amadeus111111/ChuanhuChatGPT"", ""knowfoot/ChuanhuChatGPT"", ""Ho2/ChuanhuChatGPT"", ""DaY1zz/ChuanhuChatGPT"", ""nexzhu/ChuanhuChatGPT"", ""KKK33697/ChuanhuChatGPT"", ""divilis/newchatgpt"", ""zizhongfeiyang/zizhongfeiyang"", ""Meowoo/ChuanhuChatGPT"", ""BIONICT/ChuanhuChatGPT"", ""Yi3852/ChuanhuChatGPT"", ""DaliAlmost/ChatGPT"", ""Zeming007/Zemingtalk"", ""vvvvvvv0/LLM"", ""vvvvvvv0/11111"", ""thoumayest/goodman"", ""syjs10/ChuanhuChatGPT"", ""guoneng/ChuanhuChatGPT"", ""annerose10/ChuanhuChatGPT"", ""Inhome/ChuanhuChatGPT"", ""zhouhui/ChuanhuChatGPT"", ""kuxian/ChuanhuChatGPT"", ""feile123/ChuanhuChatGPT"", ""Keldos/ChuanhuChatGPT4"", ""guocai/ChuanhuChatGPT"", ""afen671/ChuanhuChatGPT"", ""sixeco/ai11"", ""hui2023099/ChuanhuChatGPT"", ""wongthomas/ChuanhuChatGPT"", ""Links-q/GPT"", ""Priyanka-Balivada/SumLLAMA"", ""framlc/DemoGPT"", ""virtualina/test1"", ""sdbhud1b/AI_Beta"", ""BhanuPrakashSamoju/base_models_rag"", ""Christer333/ChuanhuChatGPT"", ""a193496183/ChuanhuChatGPT"", ""csdawq/gptweb"", ""Lynxu/ChuanhuChatGPT"", ""linkailin/Kindergarten-ChatGPT"", ""leonsimon23/xchuanhu"", ""wxthss82/XinYiChatGPT"", ""eggacheb/ChuanhuChatGPT"", ""eggacheb/chuangpt"", ""yzxzc/ChuanhuChatGPT"", ""jchen8000/text-webui-CPUBasic"", ""toppics241/ChuanhuChatGPT"", ""alicezhai/ChuanHuChat_DemoGPT"", ""Liangs/LLMChat"", ""Felix5572/ChuanhuChatGPT"", ""lwz114/gptweb"", ""Cran-May/ChuanhuChatGPT"", ""NLPark/NLPark"", ""Boron666/ChuanhuChatGPT2"", ""vishnu714/LearnSphere_Gradio"", ""jasonhe91/ChuanhuChatGPT"", ""dev333/DevGPT"", ""songdaooi/ChuanhuChatGPT"", ""Yousefsalem/Medical-Chatbot"", ""YuoHira/ChuanhuChatGPT"", ""XiaoXingXing226/BigData_FinCom"", ""SCGY/ChuanhuChatGPT"", ""Arrcttacsrks/Llama-3.2-3B-InstructX"", ""concert/ChuanhuChatGPT"", ""Litoy5000/ChuanhuChatGPT"", ""simhaonline/AI""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-24 07:32:45+00:00"", ""cardData"": ""base_model: meta-llama/Llama-2-7b-hf\nlanguage:\n- en\nlicense: llama2\nmodel_name: Llama 2 7B\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\ninference: false\nmodel_creator: Meta\nmodel_type: llama\nprompt_template: '{prompt}\n\n '\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""64f5fd954d3b1dd311d30e28"", ""modelId"": ""TheBloke/Llama-2-7B-GGUF"", ""usedStorage"": 51209736448}",0,,0,,0,,0,,0,"Amadeus111111/ChuanhuChatGPT, FISHYA/ChuanhuChatGPT, JohnSmith9982/ChuanhuChatGPT, JohnSmith9982/ChuanhuChatGPT_Beta, Kate0816/ChuanhuChatGPT, MILVLG/IMPChat, cryptokael/ChuanhuChatGPT, data-catering/document-answering, eggacheb/ChuanhuChatGPT, eggacheb/chuangpt, huggingface/InferenceSupport/discussions/new?title=TheBloke/Llama-2-7B-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FLlama-2-7B-GGUF%5D(%2FTheBloke%2FLlama-2-7B-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, markqiu/prinvest_mate, ominous94/ChuanhuChatGPT",13
|
Llama-3-8B-Instruct-Gradient-1048k_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
gradientai/Llama-3-8B-Instruct-Gradient-1048k,N/A,N/A,0,"https://huggingface.co/WeMake/Llama-3-8B-Instruct-V41-1048k, https://huggingface.co/kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16, https://huggingface.co/kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule",3,https://huggingface.co/RamyaRamakrishna/llama3-adapters-1,1,"https://huggingface.co/QuantFactory/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/qwp4w3hyb/Llama-3-8B-Instruct-Gradient-1048k-iMat-GGUF, https://huggingface.co/Slvcxc/Llama-3-8B-Instruct-Gradient-1048k-8.0bpw-h8-exl2, https://huggingface.co/second-state/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/PrunaAI/gradientai-Llama-3-8B-Instruct-Gradient-1048k-AWQ-4bit-smashed, https://huggingface.co/solidrust/Llama-3-8B-Instruct-Gradient-1048k-AWQ, https://huggingface.co/QuantFactory/Llama-3-8B-Instruct-Gradient-1048k-GGUF-v2, https://huggingface.co/chienweichang/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/jpodivin/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/sygenaithanos/Llama-3-8B-Instruct-Gradient-1048k-Q4_0-GGUF, https://huggingface.co/kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule-q4-k-m-GGUF, https://huggingface.co/Sc0m3r/Llama-3-8B-Instruct-Gradient-1048k-Q4_K_M-GGUF, https://huggingface.co/zhentaoyu/Llama-3-8B-Instruct-Gradient-1048k-Q4_0-GGUF, https://huggingface.co/llmware/gradientai-llama3-8b-1048k-ov, https://huggingface.co/featherless-ai-quants/gradientai-Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/mradermacher/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/mradermacher/Llama-3-8B-Instruct-Gradient-1048k-i1-GGUF, https://huggingface.co/tensorblock/Llama-3-8B-Instruct-Gradient-1048k-GGUF",18,"https://huggingface.co/kromeurus/L3.1-Siithamo-v0.4-8B, https://huggingface.co/EldritchHorror/HodgePodge, https://huggingface.co/EldritchHorror/EldritchHorror, https://huggingface.co/Jebadiah/gradient-1m-OpenBio-stone-l3-8b, https://huggingface.co/dustydecapod/mergekit-linear-hdgrztx, https://huggingface.co/Fischerboot/SmallBoi, https://huggingface.co/Fischerboot/BigBoiV14, https://huggingface.co/lighteternal/Llama-3-8B-Instruct-MergeSLERP-Gradient1048k-OpenBioLLM, https://huggingface.co/td5038/Llama3-8B-Uncensored-1048k, https://huggingface.co/kromvault/L3.1-Siithamo-v0.2-8B, https://huggingface.co/kromvault/L3.1-Siithamo-v0.3-8B, https://huggingface.co/kromvault/L3.1-Ablaze-Vulca-v0.1-8B, https://huggingface.co/powermove72/Llama3-NextGen-9b",13,"ArmanShirzad/gradientai-Llama-3-8B-Instruct-Gradient-1048k, Cyleux/Llama-3-8B-Instruct-Gradient-1048k, Darok/Featherless-Feud, JackHoltone/try-this-model, Oussama2000/test2, SC999/NV_Nemotron, Yoxas/Learn, benhancock/demo, emekaboris/try-this-model, facebook/CyberSecEval, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/new?title=gradientai/Llama-3-8B-Instruct-Gradient-1048k&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgradientai%2FLlama-3-8B-Instruct-Gradient-1048k%5D(%2Fgradientai%2FLlama-3-8B-Instruct-Gradient-1048k)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, microsoft/MInference, yuvaranianandhan24/chat_with_pdf",14
|
| 3 |
+
WeMake/Llama-3-8B-Instruct-V41-1048k,"---
|
| 4 |
+
license: llama3
|
| 5 |
+
language: en
|
| 6 |
+
datasets:
|
| 7 |
+
- WeMake/Intelligent-Content-Understanding
|
| 8 |
+
base_model:
|
| 9 |
+
- gradientai/Llama-3-8B-Instruct-Gradient-1048k
|
| 10 |
+
- meta-llama/Meta-Llama-3-8B
|
| 11 |
+
pipeline_tag: text-generation
|
| 12 |
+
tags:
|
| 13 |
+
- not-for-all-audiences
|
| 14 |
+
---
|
| 15 |
+
|
| 16 |
+
# WeMake 💙 Llama-3 8B V41 Instruct 1048k
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+
Welcome to the official repository for `Llama-3-8B-Instruct-V41-1048k`, WeMake's pioneering 1 Million Token Large Language Model (LLM). This model represents a significant milestone in the evolution of natural language understanding and generation, combining the robust foundation of Meta's Llama-3 architecture with the nuanced alignment and emotional intelligence of WeMake's V41.
|
| 21 |
+
|
| 22 |
+
## Overview
|
| 23 |
+
|
| 24 |
+
**WeMake/Llama-3-8B-Instruct-V41-1048k** is a state-of-the-art language model designed to understand and generate human-like text with an unprecedented level of emotional intelligence and alignment. This model is a fork of both `gradientai/Llama-3-8B-Instruct-Gradient-1048k` and `meta-llama/Meta-Llama-3-8B`, enhanced with the unique capabilities of WeMake's V41 and trained using the proprietary WeMake ICU method.
|
| 25 |
+
|
| 26 |
+
Our model is engineered to serve a wide array of applications, from advanced conversational agents and content creation tools to sophisticated data analysis and insight generation platforms. It embodies WeMake's commitment to pushing the boundaries of AI to create more empathetic, understanding, and useful technologies.
|
| 27 |
+
|
| 28 |
+
## Key Features
|
| 29 |
+
|
| 30 |
+
- **Emotional Intelligence:** Integrates WeMake's V41 emotional intelligence, enabling the model to understand and generate responses that consider emotional context and nuances.
|
| 31 |
+
- **Alignment with Human Values:** Trained using the WeMake ICU method, ensuring the model's outputs are aligned with ethical standards and human values.
|
| 32 |
+
- **Extensive Knowledge Base:** Leverages a vast dataset, encompassing a wide range of topics, to provide accurate and contextually relevant responses.
|
| 33 |
+
- **Highly Configurable:** Offers extensive customization options to cater to specific application requirements, including adjustable generation settings and fine-tuning capabilities.
|
| 34 |
+
- **Multilingual Support:** Capable of understanding and generating text in multiple languages, making it a versatile tool for global applications.
|
| 35 |
+
|
| 36 |
+
## Model Specifications
|
| 37 |
+
|
| 38 |
+
- **Model Path:** WeMake/Llama-3-8B-Instruct-V41-1048k
|
| 39 |
+
- **Architecture:** LlamaForCausalLM
|
| 40 |
+
- **Hidden Size:** 4096
|
| 41 |
+
- **Number of Attention Heads:** 32
|
| 42 |
+
- **Number of Hidden Layers:** 32
|
| 43 |
+
- **Max Position Embeddings:** 1048576
|
| 44 |
+
- **Vocabulary Size:** 128256
|
| 45 |
+
- **Torch Data Type:** bfloat16
|
| 46 |
+
|
| 47 |
+
## License
|
| 48 |
+
|
| 49 |
+
**WeMake/Llama-3-8B-Instruct-V41-1048k** is distributed under the ""llama3"" license. For more details, please refer to the LICENSE file in this repository.
|
| 50 |
+
|
| 51 |
+
## Acknowledgments
|
| 52 |
+
|
| 53 |
+
This model is built upon the foundational work of Meta's Llama-3 and the enhancements made by Gradient's `Llama-3-8B-Instruct-Gradient-1048k`. We extend our gratitude to the researchers and developers behind these projects for their contributions to the field of AI.
|
| 54 |
+
|
| 55 |
+
## Contact
|
| 56 |
+
|
| 57 |
+
For any inquiries, please contact us at [hey@wemake.cx](mailto:hey@wemake.cx).
|
| 58 |
+
|
| 59 |
+
Join us in exploring the possibilities of emotionally intelligent and ethically aligned AI with `Llama-3-8B-Instruct-V41-1048k`. Together, let's shape the future of human-AI interaction.
|
| 60 |
+
","{""id"": ""WeMake/Llama-3-8B-Instruct-V41-1048k"", ""author"": ""WeMake"", ""sha"": ""7c747b8ad25912ccd2db820a2779b7fe5dbf9571"", ""last_modified"": ""2025-03-01 14:22:59+00:00"", ""created_at"": ""2024-05-03 06:08:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 16, ""downloads_all_time"": null, ""likes"": 8, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""not-for-all-audiences"", ""conversational"", ""en"", ""dataset:WeMake/Intelligent-Content-Understanding"", ""base_model:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""base_model:finetune:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""license:llama3"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- gradientai/Llama-3-8B-Instruct-Gradient-1048k\n- meta-llama/Meta-Llama-3-8B\ndatasets:\n- WeMake/Intelligent-Content-Understanding\nlanguage: en\nlicense: llama3\npipeline_tag: text-generation\ntags:\n- not-for-all-audiences"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-01 14:22:59+00:00"", ""cardData"": ""base_model:\n- gradientai/Llama-3-8B-Instruct-Gradient-1048k\n- meta-llama/Meta-Llama-3-8B\ndatasets:\n- WeMake/Intelligent-Content-Understanding\nlanguage: en\nlicense: llama3\npipeline_tag: text-generation\ntags:\n- not-for-all-audiences"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66347f4530c0652a8afbe40f"", ""modelId"": ""WeMake/Llama-3-8B-Instruct-V41-1048k"", ""usedStorage"": 16060556376}",1,,0,,0,"https://huggingface.co/mradermacher/Llama-3-8B-Instruct-V41-1048k-GGUF, https://huggingface.co/mradermacher/Llama-3-8B-Instruct-V41-1048k-i1-GGUF",2,,0,,0
|
| 61 |
+
kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16,"---
|
| 62 |
+
language:
|
| 63 |
+
- en
|
| 64 |
+
license: llama3
|
| 65 |
+
tags:
|
| 66 |
+
- text-generation-inference
|
| 67 |
+
- transformers
|
| 68 |
+
- unsloth
|
| 69 |
+
- llama
|
| 70 |
+
- trl
|
| 71 |
+
base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k
|
| 72 |
+
datasets:
|
| 73 |
+
- zjunlp/Mol-Instructions
|
| 74 |
+
---
|
| 75 |
+
|
| 76 |
+
- **Developed by:** kevinkawchak
|
| 77 |
+
- **License:** llama3
|
| 78 |
+
- **Finetuned from model :** gradientai/Llama-3-8B-Instruct-Gradient-1048k
|
| 79 |
+
- **Finetuned using dataset :** zjunlp/Mol-Instructions, cc-by-4.0
|
| 80 |
+
- **Dataset identification:** Molecule-oriented Instructions
|
| 81 |
+
- **Dataset function:** Description guided molecule design
|
| 82 |
+
|
| 83 |
+
## May 07, 2024: Additional Fine-tunings, Built with Meta Llama 3 <br>
|
| 84 |
+
1) gradientai/Llama-3-8B-Instruct-Gradient-1048k [Model](https://huggingface.co/gradientai/Llama-3-8B-Instruct-Gradient-1048k) <br>
|
| 85 |
+
Llama 3 8B update: 1040K context length from 8K, and highest RAM consumption<br>
|
| 86 |
+
""What is the structure for adenine?"" Verbose SELFIES structure, but logical<br>
|
| 87 |
+
[Fine-tuned](https://huggingface.co/kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16) on Mol-Instructions, float16, [GitHub](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Llama-3-8B-Instruct-Gradient-1048k-Molecule.ipynb), 610 seconds, A100 40GB <br>
|
| 88 |
+
|
| 89 |
+
2) NousResearch/Hermes-2-Pro-Llama-3-8B [Model](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B)<br>
|
| 90 |
+
Llama 3 8B update: Cleaned OpenHermes 2.5, new Function Calling, JSON Mode dataset<br>
|
| 91 |
+
""What is the structure for adenine?"" Concise SELFIES structure, but less logical <br>
|
| 92 |
+
[Fine-tuned](https://huggingface.co/kevinkawchak/NousResearch-Hermes-2-Pro-Llama-3-8B-Molecule16) on Mol-Instructions, float16, [GitHub](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Hermes-2-Pro-Llama-3-8B-Molecule.ipynb), 599 seconds, A100 40GB <br>
|
| 93 |
+
|
| 94 |
+
3) nvidia/Llama3-ChatQA-1.5-8B [Model](https://huggingface.co/nvidia/Llama3-ChatQA-1.5-8B)<br>
|
| 95 |
+
Llama 3 8B update: ChatQA-1.5 to enhance tabular and arithmetic calculation capability<br>
|
| 96 |
+
""What is the structure for adenine?"" Verbose SELFIES structure and less logical <br>
|
| 97 |
+
[Fine-tuned](https://huggingface.co/kevinkawchak/nvidia-Llama3-ChatQA-1.5-8B-Molecule16) on Mol-Instructions, float16, [GitHub](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Llama3-ChatQA-1.5-8B-Molecule.ipynb), 599 seconds, A100 40GB <br>
|
| 98 |
+
|
| 99 |
+
Responses were verified against the Wikipedia [Adenine](https://en.wikipedia.org/wiki/Adenine) SMILES format and a SMILES to SELFIES python notebook estimated [generator](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/SMILES%20to%20SELFIES%20estimator.ipynb). <br>
|
| 100 |
+
Fine-tunings were performed using the Apache-2.0 unsloth 'Alpaca + Llama-3 8b full example' Colab [notebook](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing).
|
| 101 |
+
|
| 102 |
+
## Primary Study
|
| 103 |
+
The following are modifications or improvements to original notebooks. Please refer to the authors' models for the published primary work.
|
| 104 |
+
[Cover Image](https://drive.google.com/file/d/1J-spZMzLlPxkqfMrPxvtMZiD2_hfcGyr/view?usp=sharing). [META LLAMA 3 COMMUNITY LICENSE AGREEMENT](https://llama.meta.com/llama3/license/). Built with Meta Llama 3. <br>
|
| 105 |
+
|
| 106 |
+
A 4-bit quantization of Meta-Llama-3-8B-Instruct was used to reduce training memory requirements when fine-tuning on the zjunlp/Mol-Instructions dataset. (1-2) In addition, the minimum LoRA rank value was utilized to reduce the overall size of created models. In specific, the molecule-oriented instructions description guided molecule design was implemented to answer general questions and general biochemistry questions. General questions were answered with high accuracy, while biochemistry related questions returned 'SELFIES' structures but with limited accuracy.
|
| 107 |
+
|
| 108 |
+
The notebook featured Torch and Hugging Face libraries using the Unsloth llama-3-8b-Instruct-bnb-4bit quantization model. Training loss decreased steadily from 1.97 to 0.73 over 60 steps. Additional testing regarding the appropriate level of compression or hyperparameter adjustments for accurate SELFIES chemical structures outputs is relevant, as shown in the GitHub notebook for research purposes (3). A 16-bit and reduced 4-bit size were uploaded to Hugging Face. (4-5)
|
| 109 |
+
|
| 110 |
+
Update 04/24: The number of training steps were increased to further decrease loss, while maintaining reduced memory requirements through quantization and reduced size through LoRA. This allowed for significantly improved responses to biochemistry related questions, and were saved at the following LLM Model sizes: [8.03B](https://huggingface.co/kevinkawchak/Meta-Llama-3-8B-Instruct-Molecule16), [4.65B](https://huggingface.co/kevinkawchak/Meta-Llama-3-8B-Instruct-Molecule04). [github](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Meta-Llama-3-8B-Instruct-Molecule.ipynb).
|
| 111 |
+
|
| 112 |
+
References:
|
| 113 |
+
1) unsloth: https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit
|
| 114 |
+
2) zjunlp: https://huggingface.co/datasets/zjunlp/Mol-Instructions
|
| 115 |
+
3) github: https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Meta-Llama-3-8B-Instruct-Mol.ipynb
|
| 116 |
+
4) hugging face: https://huggingface.co/kevinkawchak/Meta-Llama-3-8B-Instruct-LoRA-Mol16
|
| 117 |
+
5) hugging face: https://huggingface.co/kevinkawchak/Meta-Llama-3-8B-Instruct-LoRA-Mol04
|
| 118 |
+
|
| 119 |
+
@inproceedings{fang2023mol, <br>
|
| 120 |
+
author = {Yin Fang and<br>
|
| 121 |
+
Xiaozhuan Liang and<br>
|
| 122 |
+
Ningyu Zhang and<br>
|
| 123 |
+
Kangwei Liu and<br>
|
| 124 |
+
Rui Huang and<br>
|
| 125 |
+
Zhuo Chen and<br>
|
| 126 |
+
Xiaohui Fan and<br>
|
| 127 |
+
Huajun Chen},<br>
|
| 128 |
+
title = {Mol-Instructions: {A} Large-Scale Biomolecular Instruction Dataset<br>
|
| 129 |
+
for Large Language Models},<br>
|
| 130 |
+
booktitle = {{ICLR}},<br>
|
| 131 |
+
publisher = {OpenReview.net},<br>
|
| 132 |
+
year = {2024},<br>
|
| 133 |
+
url = {https://openreview.net/pdf?id=Tlsdsb6l9n}}<br>
|
| 134 |
+
|
| 135 |
+
This llama model was trained with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
| 136 |
+
|
| 137 |
+
[<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)","{""id"": ""kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16"", ""author"": ""kevinkawchak"", ""sha"": ""7d4041e5def52a1dae76b76dc1161dda0d972669"", ""last_modified"": ""2024-05-08 05:55:15+00:00"", ""created_at"": ""2024-05-06 05:42:11+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""text-generation-inference"", ""unsloth"", ""trl"", ""conversational"", ""en"", ""dataset:zjunlp/Mol-Instructions"", ""base_model:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""base_model:finetune:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""license:llama3"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k\ndatasets:\n- zjunlp/Mol-Instructions\nlanguage:\n- en\nlicense: llama3\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-08 05:55:15+00:00"", ""cardData"": ""base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k\ndatasets:\n- zjunlp/Mol-Instructions\nlanguage:\n- en\nlicense: llama3\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66386db3e4156d34a46d3f04"", ""modelId"": ""kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16"", ""usedStorage"": 16060556376}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkevinkawchak%2Fgradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16%5D(%2Fkevinkawchak%2Fgradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 138 |
+
kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule,"---
|
| 139 |
+
language:
|
| 140 |
+
- en
|
| 141 |
+
license: apache-2.0
|
| 142 |
+
tags:
|
| 143 |
+
- text-generation-inference
|
| 144 |
+
- transformers
|
| 145 |
+
- unsloth
|
| 146 |
+
- llama
|
| 147 |
+
- trl
|
| 148 |
+
base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k
|
| 149 |
+
---
|
| 150 |
+
|
| 151 |
+
# Uploaded model
|
| 152 |
+
|
| 153 |
+
- **Developed by:** kevinkawchak
|
| 154 |
+
- **License:** apache-2.0
|
| 155 |
+
- **Finetuned from model :** gradientai/Llama-3-8B-Instruct-Gradient-1048k
|
| 156 |
+
- **Finetuned dataset:** zjunlp/Mol-Instructions/Molecule-oriented Instructions/description_guided_molecule_design
|
| 157 |
+
|
| 158 |
+
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
| 159 |
+
|
| 160 |
+
[<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
|
| 161 |
+
","{""id"": ""kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule"", ""author"": ""kevinkawchak"", ""sha"": ""54671b08eee04494d29f9d4855e349e760e759af"", ""last_modified"": ""2024-06-21 02:53:15+00:00"", ""created_at"": ""2024-06-17 20:12:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""text-generation-inference"", ""unsloth"", ""trl"", ""conversational"", ""en"", ""base_model:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""base_model:finetune:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|reserved_special_token_250|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-21 02:53:15+00:00"", ""cardData"": ""base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""667098bfc22463d79063ec4b"", ""modelId"": ""kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule"", ""usedStorage"": 48181669128}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkevinkawchak%2Fgradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule%5D(%2Fkevinkawchak%2Fgradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
MagicPrompt-Stable-Diffusion_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Gustavosta/MagicPrompt-Stable-Diffusion,"---
|
| 3 |
+
license: mit
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# MagicPrompt - Stable Diffusion
|
| 7 |
+
|
| 8 |
+
This is a model from the MagicPrompt series of models, which are [GPT-2](https://huggingface.co/gpt2) models intended to generate prompt texts for imaging AIs, in this case: [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion).
|
| 9 |
+
|
| 10 |
+
## 🖼️ Here's an example:
|
| 11 |
+
|
| 12 |
+
<img src=""https://files.catbox.moe/ac3jq7.png"">
|
| 13 |
+
|
| 14 |
+
This model was trained with 150,000 steps and a set of about 80,000 data filtered and extracted from the image finder for Stable Diffusion: ""[Lexica.art](https://lexica.art/)"". It was a little difficult to extract the data, since the search engine still doesn't have a public API without being protected by cloudflare, but if you want to take a look at the original dataset, you can have a look here: [datasets/Gustavosta/Stable-Diffusion-Prompts](https://huggingface.co/datasets/Gustavosta/Stable-Diffusion-Prompts).
|
| 15 |
+
|
| 16 |
+
If you want to test the model with a demo, you can go to: ""[spaces/Gustavosta/MagicPrompt-Stable-Diffusion](https://huggingface.co/spaces/Gustavosta/MagicPrompt-Stable-Diffusion)"".
|
| 17 |
+
|
| 18 |
+
## 💻 You can see other MagicPrompt models:
|
| 19 |
+
|
| 20 |
+
- For Dall-E 2: [Gustavosta/MagicPrompt-Dalle](https://huggingface.co/Gustavosta/MagicPrompt-Dalle)
|
| 21 |
+
- For Midjourney: [Gustavosta/MagicPrompt-Midourney](https://huggingface.co/Gustavosta/MagicPrompt-Midjourney) **[⚠️ In progress]**
|
| 22 |
+
- MagicPrompt full: [Gustavosta/MagicPrompt](https://huggingface.co/Gustavosta/MagicPrompt) **[⚠️ In progress]**
|
| 23 |
+
|
| 24 |
+
## ⚖️ Licence:
|
| 25 |
+
|
| 26 |
+
[MIT](https://huggingface.co/models?license=license:mit)
|
| 27 |
+
|
| 28 |
+
When using this model, please credit: [Gustavosta](https://huggingface.co/Gustavosta)
|
| 29 |
+
|
| 30 |
+
**Thanks for reading this far! :)**
|
| 31 |
+
","{""id"": ""Gustavosta/MagicPrompt-Stable-Diffusion"", ""author"": ""Gustavosta"", ""sha"": ""c2dfdbff1007791b5952aff9c02e622a0461f914"", ""last_modified"": ""2023-07-09 22:10:48+00:00"", ""created_at"": ""2022-09-17 22:34:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 16809, ""downloads_all_time"": null, ""likes"": 727, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""coreml"", ""safetensors"", ""gpt2"", ""text-generation"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: mit"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""GPT2LMHeadModel""], ""model_type"": ""gpt2"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='coreml/text-generation/float16_model.mlpackage/Data/com.apple.CoreML/model.mlmodel', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='coreml/text-generation/float16_model.mlpackage/Data/com.apple.CoreML/weights/weight.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='coreml/text-generation/float16_model.mlpackage/Manifest.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='coreml/text-generation/float32_model.mlpackage/Data/com.apple.CoreML/model.mlmodel', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='coreml/text-generation/float32_model.mlpackage/Data/com.apple.CoreML/weights/weight.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='coreml/text-generation/float32_model.mlpackage/Manifest.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Gustavosta/MagicPrompt-Stable-Diffusion"", ""huggingface-projects/magic-diffusion"", ""doevent/Stable-Diffusion-prompt-generator"", ""yizhangliu/Text-to-Image"", ""RamAnanth1/visual-chatGPT"", ""Yntec/ToyWorldXL"", ""phenomenon1981/MagicPrompt-Stable-Diffusion"", ""awacke1/Prompt-Refinery-Text-to-Image-Generation"", ""KBaba7/Quant"", ""BoomerangGirl/MagicPrompt-Stable-Diffusion"", ""Nickhilearla135095/maximum_diffusion"", ""seawolf2357/sd-prompt-gen"", ""Kaludi/Stable-Diffusion-Prompt-Generator_App"", ""duchaba/sd_prompt_helper"", ""shogi880/ChatGPT-StableDiffusion-CharacterDesign"", ""rabiyulfahim/Prompt-Refinery-Text-to-Image-Generation"", ""deepparag/DreamlikeArt-Diffusion-1.0"", ""j43fer/MagicPrompt-Stable-Diffusion"", ""om-app/magic-diffusion"", ""om-app/Promt-to-Image-diffusions"", ""Daniton/MagicPrompt-Stable-Diffusion"", ""ehristoforu/Rensor"", ""bhaskartripathi/LLM_Quantization"", ""alisrbdni/magic-to-diffusion"", ""Silence1412/Stable_Diffusion_Cpu"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""pngwn/Stable-Diffusion-prompt-generator"", ""aichina/MagicPrompt-Stable-Diffusion"", ""markmagic/magic-diffusion"", ""Dao3/Top-20-Models"", ""Mrchuw/MagicPrompt-Stable-Diffusion"", ""Tasslehawk/Stable-Diffusion-prompt-generator"", ""5m4ck3r/Prompt-Gen"", ""ZeroTwo3/MagicPrompt-Stable-Diffusion"", ""bala0o8o0/Prompt-Enhancer"", ""ClaudioX/mg_sd_esp"", ""eeyorestoned/maximum_diffusion"", ""trysem/visua"", ""yuan2023/Stable-Diffusion-Prompt-Generator_App"", ""gato001k1/maximum_diffusion0k"", ""TeamMlx/MagicPrompt-Stable-Diffusion"", ""KKMobile/MagicPrompt-Stable-Diffusion"", ""ysharma/visual_chatgpt_dummy"", ""Dao3/MagicPrompt-Stable-Diffusion"", ""jefftko/Stable-Diffusion-prompt-generator"", ""3mrology/Chameleon_Text2Img_Generation_Demo"", ""Ifeanyi/promptGenerator"", ""dreamdrop-art/000555111"", ""phenixrhyder/MagicPrompt"", ""Achyuth4/MagicPrompt-Stable-Diffusion"", ""awqwqwq/Stable-Diffusion-prompt-generator"", ""bradarrML/magic-diffusion"", ""Joeythemonster/magic-diffusion"", ""cloudwp/Top-20-Diffusion"", ""Ali36Ahmad/MagicPrompt-Stable-Diffusion"", ""Ali36Ahmad/magic-diffusion"", ""pngwn/huguru"", ""nightfury/Magic_Text_to_prompt_to_art_Diffusion"", ""alisrbdni/MagicPrompt-Stable-Diffusion"", ""Nexxt/MagicPrompt-Stable-Diffusion"", ""Armored-Atom/DiFuse_Your_Thoughts"", ""johnsu6616/SD_Helper_01"", ""skyxinsun/Gustavosta-MagicPrompt-Stable-Diffusion"", ""willianmcs/visual-chatgpt"", ""Libra7578/Promt-to-Image-diffusions"", ""Stereo0001/MagicPrompt-Stable-Diffusion"", ""donalda/Gustavosta-MagicPrompt-Stable-Diffusion"", ""ai-art/magic-diffusion-generator"", ""kbora/minerva-generate-docker"", ""Alfasign/Einfach.Stable_DiffPomrpter"", ""Harshveer/Diffusion30x"", ""awacke1/MagicPrompt-Stable-Diffusion"", ""svjack/MagicPrompt-Stable-Diffusion"", ""Omnibus/2-button-Story-Board"", ""poetrychor/Gustavosta-MagicPrompt-Stable-Diffusion"", ""Ashrafb/MagicPrompt-Stable-Diffusion"", ""Vedits/Magic-Prompt-generator"", ""vih-v/Stable-Diffusion-prompt-generator"", ""Abhaykoul/Prompt_generator_for_helpingAI-tti"", ""Omnibus/top-20-diffusion"", ""ruslanmv/convert_to_gguf"", ""Rooc/Prompt-Generator"", ""Nymbo/MagicPrompt-Stable-Diffusion"", ""Ddfndjs/Cxxdx"", ""TeamHaltmannSusanaHWCEO/StreamlitRipperv0Diffusion"", ""ADA3e21/MagicPrompt-Stable-Diffusion"", ""tommy24/magic-diffusion"", ""ivaneliseeff/prompt2"", ""gvargas99/inspirationai1"", ""ZKYT/Gustavosta-MagicPrompt-Stable-Diffusion"", ""pepereeee/aiartnik"", ""next-social/audio_img"", ""om-app/Art-diffusion"", ""ismot/9t8"", ""Warkaz/diffusion"", ""TPKING/Gustavosta-MagicPrompt-Stable-Diffusion"", ""Coqtail/Gustavosta-MagicPrompt-Stable-Diffusion"", ""SAPTADIP/stable-diffusion-prompt-generator"", ""procrastinya/test_space""], ""safetensors"": {""parameters"": {""F32"": 124439808, ""U8"": 12582912}, ""total"": 137022720}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-07-09 22:10:48+00:00"", ""cardData"": ""license: mit"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""63264b5fff539edeea9491f6"", ""modelId"": ""Gustavosta/MagicPrompt-Stable-Diffusion"", ""usedStorage"": 4632052188}",0,,0,https://huggingface.co/jeong-jasonji/CSE574_prompter,1,"https://huggingface.co/mradermacher/MagicPrompt-Stable-Diffusion-GGUF, https://huggingface.co/mradermacher/MagicPrompt-Stable-Diffusion-i1-GGUF, https://huggingface.co/tensorblock/MagicPrompt-Stable-Diffusion-GGUF, https://huggingface.co/PrunaAI/Gustavosta-MagicPrompt-Stable-Diffusion-GGUF-smashed",4,,0,"BoomerangGirl/MagicPrompt-Stable-Diffusion, Gustavosta/MagicPrompt-Stable-Diffusion, KBaba7/Quant, Kaludi/Stable-Diffusion-Prompt-Generator_App, Yntec/ToyWorldXL, awacke1/Prompt-Refinery-Text-to-Image-Generation, doevent/Stable-Diffusion-prompt-generator, duchaba/sd_prompt_helper, ehristoforu/Rensor, huggingface-projects/magic-diffusion, huggingface/InferenceSupport/discussions/new?title=Gustavosta/MagicPrompt-Stable-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BGustavosta%2FMagicPrompt-Stable-Diffusion%5D(%2FGustavosta%2FMagicPrompt-Stable-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, seawolf2357/sd-prompt-gen, yizhangliu/Text-to-Image",13
|
Molmo-7B-O-0924_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
allenai/Molmo-7B-O-0924,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
base_model:
|
| 7 |
+
- openai/clip-vit-large-patch14-336
|
| 8 |
+
- allenai/OLMo-7B-1124
|
| 9 |
+
pipeline_tag: image-text-to-text
|
| 10 |
+
tags:
|
| 11 |
+
- multimodal
|
| 12 |
+
- olmo
|
| 13 |
+
- molmo
|
| 14 |
+
- pixmo
|
| 15 |
+
library_name: transformers
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
<img src=""molmo_logo.png"" alt=""Logo for the Molmo Project"" style=""width: auto; height: 50px;"">
|
| 19 |
+
|
| 20 |
+
# Molmo 7B-O
|
| 21 |
+
|
| 22 |
+
Molmo is a family of open vision-language models developed by the Allen Institute for AI.
|
| 23 |
+
Molmo models are trained on PixMo, a dataset of 1 million, highly-curated image-text pairs.
|
| 24 |
+
It has state-of-the-art performance among multimodal models with a similar size while being fully open-source.
|
| 25 |
+
You can find all models in the Molmo family [here](https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19).
|
| 26 |
+
**Learn more** about the Molmo family [in our announcement blog post](https://molmo.allenai.org/blog) or the [paper](https://huggingface.co/papers/2409.17146).
|
| 27 |
+
|
| 28 |
+
Molmo 7B-O is based on [OLMo-7B-1024](https://huggingface.co/allenai/OLMo-7B-1024-preview) (a **preview** of next generation of OLMo models)
|
| 29 |
+
and uses [OpenAI CLIP](https://huggingface.co/openai/clip-vit-large-patch14-336) as vision backbone.
|
| 30 |
+
It performs comfortably between GPT-4V and GPT-4o on both academic benchmarks and human evaluation.
|
| 31 |
+
|
| 32 |
+
This checkpoint is a **preview** of the Molmo release. All artifacts used in creating Molmo (PixMo dataset, training code, evaluations, intermediate checkpoints) will be made available at a later date, furthering our commitment to open-source AI development and reproducibility.
|
| 33 |
+
|
| 34 |
+
[**Sign up here**](https://docs.google.com/forms/d/e/1FAIpQLSdML1MhNNBDsCHpgWG65Oydg2SjZzVasyqlP08nBrWjZp_c7A/viewform) to be the first to know when artifacts are released.
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
Quick links:
|
| 38 |
+
- 💬 [Demo](https://molmo.allenai.org/)
|
| 39 |
+
- 📂 [All Models](https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19)
|
| 40 |
+
- 📃 [Paper](https://molmo.allenai.org/paper.pdf)
|
| 41 |
+
- 🎥 [Blog with Videos](https://molmo.allenai.org/blog)
|
| 42 |
+
|
| 43 |
+
## Quick Start
|
| 44 |
+
|
| 45 |
+
To run Molmo, first install dependencies:
|
| 46 |
+
|
| 47 |
+
```bash
|
| 48 |
+
pip install einops torchvision
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
Then, follow these steps:
|
| 52 |
+
|
| 53 |
+
```python
|
| 54 |
+
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
| 55 |
+
from PIL import Image
|
| 56 |
+
import requests
|
| 57 |
+
|
| 58 |
+
# load the processor
|
| 59 |
+
processor = AutoProcessor.from_pretrained(
|
| 60 |
+
'allenai/Molmo-7B-O-0924',
|
| 61 |
+
trust_remote_code=True,
|
| 62 |
+
torch_dtype='auto',
|
| 63 |
+
device_map='auto'
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# load the model
|
| 67 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 68 |
+
'allenai/Molmo-7B-O-0924',
|
| 69 |
+
trust_remote_code=True,
|
| 70 |
+
torch_dtype='auto',
|
| 71 |
+
device_map='auto'
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# process the image and text
|
| 75 |
+
inputs = processor.process(
|
| 76 |
+
images=[Image.open(requests.get(""https://picsum.photos/id/237/536/354"", stream=True).raw)],
|
| 77 |
+
text=""Describe this image.""
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
# move inputs to the correct device and make a batch of size 1
|
| 81 |
+
inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
|
| 82 |
+
|
| 83 |
+
# generate output; maximum 200 new tokens; stop generation when <|endoftext|> is generated
|
| 84 |
+
output = model.generate_from_batch(
|
| 85 |
+
inputs,
|
| 86 |
+
GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
|
| 87 |
+
tokenizer=processor.tokenizer
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# only get generated tokens; decode them to text
|
| 91 |
+
generated_tokens = output[0,inputs['input_ids'].size(1):]
|
| 92 |
+
generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
| 93 |
+
|
| 94 |
+
# print the generated text
|
| 95 |
+
print(generated_text)
|
| 96 |
+
|
| 97 |
+
# >>> This photograph captures an adorable black Labrador puppy sitting on a weathered
|
| 98 |
+
# wooden deck. The deck's planks, which are a mix of light and dark brown with ...
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
To make inference more efficient, run with autocast:
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
with torch.autocast(device_type=""cuda"", enabled=True, dtype=torch.bfloat16):
|
| 105 |
+
output = model.generate_from_batch(
|
| 106 |
+
inputs,
|
| 107 |
+
GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
|
| 108 |
+
tokenizer=processor.tokenizer
|
| 109 |
+
)
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
We did most of our evaluations in this setting (autocast on, but float32 weights)
|
| 113 |
+
|
| 114 |
+
To even further reduce the memory requirements, the model can be run with bfloat16 weights:
|
| 115 |
+
|
| 116 |
+
```python
|
| 117 |
+
model.to(dtype=torch.bfloat16)
|
| 118 |
+
inputs[""images""] = inputs[""images""].to(torch.bfloat16)
|
| 119 |
+
output = model.generate_from_batch(
|
| 120 |
+
inputs,
|
| 121 |
+
GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
|
| 122 |
+
tokenizer=processor.tokenizer
|
| 123 |
+
)
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
Note that this can sometimes change the output of the model compared to running with float32 weights.
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
## Evaluations
|
| 130 |
+
|
| 131 |
+
| Model | Average Score on 11 Academic Benchmarks | Human Preference Elo Rating |
|
| 132 |
+
|-----------------------------|-----------------------------------------|-----------------------------|
|
| 133 |
+
| Molmo 72B | 81.2 | 1077 |
|
| 134 |
+
| Molmo 7B-D | 77.3 | 1056 |
|
| 135 |
+
| **Molmo 7B-O (this model)** | **74.6** | **1051** |
|
| 136 |
+
| MolmoE 1B | 68.6 | 1032 |
|
| 137 |
+
| GPT-4o | 78.5 | 1079 |
|
| 138 |
+
| GPT-4V | 71.1 | 1041 |
|
| 139 |
+
| Gemini 1.5 Pro | 78.3 | 1074 |
|
| 140 |
+
| Gemini 1.5 Flash | 75.1 | 1054 |
|
| 141 |
+
| Claude 3.5 Sonnet | 76.7 | 1069 |
|
| 142 |
+
| Claude 3 Opus | 66.4 | 971 |
|
| 143 |
+
| Claude 3 Haiku | 65.3 | 999 |
|
| 144 |
+
| Qwen VL2 72B | 79.4 | 1037 |
|
| 145 |
+
| Qwen VL2 7B | 73.7 | 1025 |
|
| 146 |
+
| Intern VL2 LLAMA 76B | 77.1 | 1018 |
|
| 147 |
+
| Intern VL2 8B | 69.4 | 953 |
|
| 148 |
+
| Pixtral 12B | 69.5 | 1016 |
|
| 149 |
+
| Phi3.5-Vision 4B | 59.7 | 982 |
|
| 150 |
+
| PaliGemma 3B | 50.0 | 937 |
|
| 151 |
+
| LLAVA OneVision 72B | 76.6 | 1051 |
|
| 152 |
+
| LLAVA OneVision 7B | 72.0 | 1024 |
|
| 153 |
+
| Cambrian-1 34B | 66.8 | 953 |
|
| 154 |
+
| Cambrian-1 8B | 63.4 | 952 |
|
| 155 |
+
| xGen - MM - Interleave 4B | 59.5 | 979 |
|
| 156 |
+
| LLAVA-1.5 13B | 43.9 | 960 |
|
| 157 |
+
| LLAVA-1.5 7B | 40.7 | 951 |
|
| 158 |
+
|
| 159 |
+
*Benchmarks: AI2D test, ChartQA test, VQA v2.0 test, DocQA test, InfographicVQA test, TextVQA val, RealWorldQA, MMMU val, MathVista testmini, CountBenchQA, Flickr Count (we collected this new dataset that is significantly harder than CountBenchQA).*
|
| 160 |
+
|
| 161 |
+
## FAQs
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
### I'm getting an error a broadcast error when processing images!
|
| 165 |
+
|
| 166 |
+
Your image might not be in RGB format. You can convert it using the following code snippet:
|
| 167 |
+
|
| 168 |
+
```python
|
| 169 |
+
from PIL import Image
|
| 170 |
+
|
| 171 |
+
image = Image.open(...)
|
| 172 |
+
|
| 173 |
+
if image.mode != ""RGB"":
|
| 174 |
+
image = image.convert(""RGB"")
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
### Molmo doesn't work great with transparent images!
|
| 178 |
+
|
| 179 |
+
We received reports that Molmo models might struggle with transparent images.
|
| 180 |
+
For the time being, we recommend adding a white or dark background to your images before passing them to the model. The code snippet below shows how to do this using the Python Imaging Library (PIL):
|
| 181 |
+
|
| 182 |
+
```python
|
| 183 |
+
|
| 184 |
+
# Load the image
|
| 185 |
+
url = ""...""
|
| 186 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
| 187 |
+
|
| 188 |
+
# Convert the image to grayscale to calculate brightness
|
| 189 |
+
gray_image = image.convert('L') # Convert to grayscale
|
| 190 |
+
|
| 191 |
+
# Calculate the average brightness
|
| 192 |
+
stat = ImageStat.Stat(gray_image)
|
| 193 |
+
average_brightness = stat.mean[0] # Get the average value
|
| 194 |
+
|
| 195 |
+
# Define background color based on brightness (threshold can be adjusted)
|
| 196 |
+
bg_color = (0, 0, 0) if average_brightness > 127 else (255, 255, 255)
|
| 197 |
+
|
| 198 |
+
# Create a new image with the same size as the original, filled with the background color
|
| 199 |
+
new_image = Image.new('RGB', image.size, bg_color)
|
| 200 |
+
|
| 201 |
+
# Paste the original image on top of the background (use image as a mask if needed)
|
| 202 |
+
new_image.paste(image, (0, 0), image if image.mode == 'RGBA' else None)
|
| 203 |
+
|
| 204 |
+
# Now you can pass the new_image to Molmo
|
| 205 |
+
processor = AutoProcessor.from_pretrained(
|
| 206 |
+
'allenai/Molmo-7B-D-0924',
|
| 207 |
+
trust_remote_code=True,
|
| 208 |
+
torch_dtype='auto',
|
| 209 |
+
device_map='auto'
|
| 210 |
+
)
|
| 211 |
+
```
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
## License and Use
|
| 215 |
+
|
| 216 |
+
This model is licensed under Apache 2.0. It is intended for research and educational use.
|
| 217 |
+
For more information, please see our [Responsible Use Guidelines](https://allenai.org/responsible-use).
|
| 218 |
+
","{""id"": ""allenai/Molmo-7B-O-0924"", ""author"": ""allenai"", ""sha"": ""0e727957abd46f3ef741ddbda3452db1df873a6e"", ""last_modified"": ""2024-11-15 06:53:47+00:00"", ""created_at"": ""2024-09-25 05:53:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6776, ""downloads_all_time"": null, ""likes"": 157, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""molmo"", ""text-generation"", ""multimodal"", ""olmo"", ""pixmo"", ""image-text-to-text"", ""conversational"", ""custom_code"", ""en"", ""arxiv:2409.17146"", ""base_model:openai/clip-vit-large-patch14-336"", ""base_model:finetune:openai/clip-vit-large-patch14-336"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openai/clip-vit-large-patch14-336\n- allenai/OLMo-7B-1124\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- multimodal\n- olmo\n- molmo\n- pixmo"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MolmoForCausalLM""], ""auto_map"": {""AutoConfig"": ""config_molmo.MolmoConfig"", ""AutoModelForCausalLM"": ""modeling_molmo.MolmoForCausalLM""}, ""model_type"": ""molmo"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""{% for message in messages -%}\n {%- if (loop.index % 2 == 1 and message['role'] != 'user') or \n (loop.index % 2 == 0 and message['role'].lower() != 'assistant') -%}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif -%}\n {{ message['role'].capitalize() + ': ' + message['content'] }}\n {%- if not loop.last -%}\n {{ ' ' }}\n {%- endif %}\n {%- endfor -%}\n {%- if add_generation_prompt -%}\n {{ ' Assistant:' }}\n {%- endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|pad|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_molmo.MolmoForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_preprocessing_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='molmo_logo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessing_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""Fizzarolli/Molmo-7B-O-0924"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""F32"": 7665032192}, ""total"": 7665032192}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-15 06:53:47+00:00"", ""cardData"": ""base_model:\n- openai/clip-vit-large-patch14-336\n- allenai/OLMo-7B-1124\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- multimodal\n- olmo\n- molmo\n- pixmo"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_molmo.MolmoForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""66f3a54e5294d1cc7c2e5fea"", ""modelId"": ""allenai/Molmo-7B-O-0924"", ""usedStorage"": 30660216256}",0,,0,,0,"https://huggingface.co/cyan2k/molmo-7B-O-bnb-4bit, https://huggingface.co/impactframes/molmo-7B-O-bnb-4bit",2,,0,"FallnAI/Quantize-HF-Models, Fizzarolli/Molmo-7B-O-0924, K00B404/LLM_Quantization, KBaba7/Quant, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=allenai/Molmo-7B-O-0924&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Ballenai%2FMolmo-7B-O-0924%5D(%2Fallenai%2FMolmo-7B-O-0924)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ruslanmv/convert_to_gguf, totolook/Quant",8
|
MythoMax-L2-13b_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Gryphe/MythoMax-L2-13b,"---
|
| 3 |
+
license: other
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
---
|
| 7 |
+
With Llama 3 released, it's time for MythoMax to slowly fade away... [Let's do it in style!](https://suno.com/song/3d69cd72-e893-4193-866f-385f47778ce0)
|
| 8 |
+
|
| 9 |
+
An improved, potentially even perfected variant of MythoMix, my [MythoLogic-L2](https://huggingface.co/Gryphe/MythoLogic-L2-13b) and [Huginn](https://huggingface.co/The-Face-Of-Goonery/Huginn-13b-FP16) merge using a highly experimental tensor type merge technique. The main difference with MythoMix is that I allowed more of Huginn to intermingle with the single tensors located at the front and end of a model, resulting in increased coherency across the entire structure.
|
| 10 |
+
|
| 11 |
+
The script and the acccompanying templates I used to produce both can [be found here](https://github.com/Gryphe/BlockMerge_Gradient/tree/main/YAML).
|
| 12 |
+
|
| 13 |
+
This model is proficient at both roleplaying and storywriting due to its unique nature.
|
| 14 |
+
|
| 15 |
+
Quantized models are available from TheBloke: [GGUF](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF) - [GPTQ](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ) - [AWQ](https://huggingface.co/TheBloke/MythoMax-L2-13B-AWQ) (You're the best!)
|
| 16 |
+
|
| 17 |
+
## Model details
|
| 18 |
+
|
| 19 |
+
The idea behind this merge is that each layer is composed of several tensors, which are in turn responsible for specific functions. Using MythoLogic-L2's robust understanding as its input and Huginn's extensive writing capability as its output seems to have resulted in a model that exceeds at both, confirming my theory. (More details to be released at a later time)
|
| 20 |
+
|
| 21 |
+
This type of merge is incapable of being illustrated, as each of its 363 tensors had an unique ratio applied to it. As with my prior merges, gradients were part of these ratios to further finetune its behaviour.
|
| 22 |
+
|
| 23 |
+
## Prompt Format
|
| 24 |
+
|
| 25 |
+
This model primarily uses Alpaca formatting, so for optimal model performance, use:
|
| 26 |
+
```
|
| 27 |
+
<System prompt/Character Card>
|
| 28 |
+
|
| 29 |
+
### Instruction:
|
| 30 |
+
Your instruction or question here.
|
| 31 |
+
For roleplay purposes, I suggest the following - Write <CHAR NAME>'s next reply in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.
|
| 32 |
+
|
| 33 |
+
### Response:
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
---
|
| 37 |
+
license: other
|
| 38 |
+
---","{""id"": ""Gryphe/MythoMax-L2-13b"", ""author"": ""Gryphe"", ""sha"": ""58e77dd48a65176f97f6f376c93efe9caad9c130"", ""last_modified"": ""2024-04-21 17:42:57+00:00"", ""created_at"": ""2023-08-10 20:35:34+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6263, ""downloads_all_time"": null, ""likes"": 305, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""en"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: other"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00008-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00009-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00010-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00011-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00012-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00013-of-00013.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""akhaliq/anycoder"", ""featherless-ai/try-this-model"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""AiActivity/AI-Assistant"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""NiansuhAI/Main"", ""kz-transformers/kaz-llm-lb"", ""PeepDaSlan9/Gryphe-MythoMax-L2-13b"", ""felixz/open_llm_leaderboard"", ""Darok/Featherless-Feud"", ""ChrisNguyenAI/Chat-multi-models"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""bardsai/performance-llm-board"", ""emekaboris/try-this-model"", ""BAAI/open_flageval_vlm_leaderboard"", ""neubla/neubla-llm-evaluation-board"", ""lambdabrendan/Lambda-LLM-Calculator"", ""artificialguybr/OpenRouter-LLM-Chat"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""imjunaidafzal/can-it-run-llm"", ""SC999/NV_Nemotron"", ""sanbo1200/Main1"", ""Arifzyn/Gryphe-MythoMax-L2-13b"", ""marthasimmons/Gryphe-MythoMax-L2-13b"", ""n0rwegiancoder/Gryphe-MythoMax-L2-13b"", ""nonhuman/nnnn"", ""smothiki/open_llm_leaderboard"", ""AneelSen/Gryphe-MythoMax-L2-13b"", ""okeanos/can-it-run-llm"", ""teriy/Gryphe-MythoMax-L2-13b"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""Nymbo/can-it-run-llm"", ""muellerzr/can-it-run-llm"", ""SomeDude1/Gryphe-MythoMax-L2-13b"", ""kbmlcoding/open_llm_leaderboard_free"", ""Kaballas/Pilot"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""Asiya057/Incarna-Mind"", ""Asiya057/Incarna-Mind-POC"", ""Xhaheen/AI_safety_testing"", ""Xhaheen/phoeniks_redteamers"", ""mjalg/IFEvalTR"", ""srinuksv/Main"", ""dawood/anychat"", ""vuxuanhoan/anychat"", ""JackHoltone/try-this-model"", ""baffo32/OpenRouter-LLM-Chat-Fork"", ""Mackintoshj/anychat"", ""mariamgvelesiani/anychat"", ""yalotaibii/anychat"", ""ilovemystagename/anychat"", ""sanbo1200/Main"", ""sanbo110/Main"", ""k11112/try-this-model"", ""Mister12rayyan/RYanychat"", ""Starchik1/anychat"", ""sanbo110/Main1"", ""Starchik/CodeBox"", ""BaRiDo/TheComedyCache"", ""PyScoutAI/PyscoutAI"", ""fmlemos/zeroshot-chatbot-openrouter"", ""ajotta/IA_Escritora"", ""h4sch/any_coder""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-21 17:42:57+00:00"", ""cardData"": ""language:\n- en\nlicense: other"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64d54a162fe2c11264f18e92"", ""modelId"": ""Gryphe/MythoMax-L2-13b"", ""usedStorage"": 52064120716}",0,"https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML, https://huggingface.co/theNovaAI/Hypernova-experimental",2,"https://huggingface.co/youndukn/mythomax_lora_adapter, https://huggingface.co/youndukn/mythomax-7b-sft-qlora, https://huggingface.co/youndukn/mythomax-13b-sft-lora, https://huggingface.co/youndukn/zephyr-7b-sft-qlora-8bit-adapter, https://huggingface.co/Guilherme34/Samantha-Mythomax-l2-13b",5,"https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF, https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ, https://huggingface.co/mradermacher/MythoMax-L2-13b-i1-GGUF, https://huggingface.co/Clevyby/Mythomax-L2-13b-Q4_K_M-GGUF, https://huggingface.co/TheBloke/MythoMax-L2-13B-AWQ, https://huggingface.co/4bit/MythoMax-L2-13B-GPTQ, https://huggingface.co/GusPuffy/sq-MythoMax-L2-13b-w4-s0, https://huggingface.co/Andrewwwwww/MythoMax-L2-13B-GGUF, https://huggingface.co/mradermacher/MythoMax-L2-13b-GGUF, https://huggingface.co/theNovaAI/Hypernova-experimental-GPTQ, https://huggingface.co/theNovaAI/Hypernova-experimental-GGUF, https://huggingface.co/PrunaAI/Gryphe-MythoMax-L2-13b-bnb-4bit-smashed, https://huggingface.co/DevQuasar/Gryphe.MythoMax-L2-13b-GGUF",13,"https://huggingface.co/neils1984/SnowyMaxRP-l2-13b, https://huggingface.co/gotchu/season-8-13bmerge, https://huggingface.co/mergekit-community/mergekit-passthrough-vptgfhk, https://huggingface.co/backyardai/Psyonic-Cetacean-MythoMax-Ultra-Quality-29B, https://huggingface.co/ClaudioItaly/Maxtopia-13B, https://huggingface.co/QuantFactory/Maxtopia-13B-GGUF",6,"AiActivity/AI-Assistant, BAAI/open_cn_llm_leaderboard, ChrisNguyenAI/Chat-multi-models, Darok/Featherless-Feud, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, NiansuhAI/Main, OPTML-Group/UnlearnCanvas-Benchmark, PeepDaSlan9/Gryphe-MythoMax-L2-13b, bardsai/performance-llm-board, emekaboris/try-this-model, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/new?title=Gryphe/MythoMax-L2-13b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BGryphe%2FMythoMax-L2-13b%5D(%2FGryphe%2FMythoMax-L2-13b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",13
|
| 39 |
+
TheBloke/MythoMax-L2-13B-GGML,"---
|
| 40 |
+
language:
|
| 41 |
+
- en
|
| 42 |
+
license: llama2
|
| 43 |
+
model_name: MythoMax L2 13B
|
| 44 |
+
inference: false
|
| 45 |
+
model_creator: Gryphe
|
| 46 |
+
model_link: https://huggingface.co/Gryphe/MythoMax-L2-13b
|
| 47 |
+
model_type: llama
|
| 48 |
+
quantized_by: TheBloke
|
| 49 |
+
base_model: Gryphe/MythoMax-L2-13b
|
| 50 |
+
---
|
| 51 |
+
|
| 52 |
+
<!-- header start -->
|
| 53 |
+
<!-- 200823 -->
|
| 54 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 55 |
+
<img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 56 |
+
</div>
|
| 57 |
+
<div style=""display: flex; justify-content: space-between; width: 100%;"">
|
| 58 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-start;"">
|
| 59 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
|
| 60 |
+
</div>
|
| 61 |
+
<div style=""display: flex; flex-direction: column; align-items: flex-end;"">
|
| 62 |
+
<p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
|
| 63 |
+
</div>
|
| 64 |
+
</div>
|
| 65 |
+
<div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
|
| 66 |
+
<hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
|
| 67 |
+
<!-- header end -->
|
| 68 |
+
|
| 69 |
+
# MythoMax L2 13B - GGML
|
| 70 |
+
- Model creator: [Gryphe](https://huggingface.co/Gryphe)
|
| 71 |
+
- Original model: [MythoMax L2 13B](https://huggingface.co/Gryphe/MythoMax-L2-13b)
|
| 72 |
+
|
| 73 |
+
## Description
|
| 74 |
+
|
| 75 |
+
This repo contains GGML format model files for [Gryphe's MythoMax L2 13B](https://huggingface.co/Gryphe/MythoMax-L2-13b).
|
| 76 |
+
|
| 77 |
+
### Important note regarding GGML files.
|
| 78 |
+
|
| 79 |
+
The GGML format has now been superseded by GGUF. As of August 21st 2023, [llama.cpp](https://github.com/ggerganov/llama.cpp) no longer supports GGML models. Third party clients and libraries are expected to still support it for a time, but many may also drop support.
|
| 80 |
+
|
| 81 |
+
Please use the GGUF models instead.
|
| 82 |
+
### About GGML
|
| 83 |
+
|
| 84 |
+
GGML files are for CPU + GPU inference using [llama.cpp](https://github.com/ggerganov/llama.cpp) and libraries and UIs which support this format, such as:
|
| 85 |
+
* [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most popular web UI. Supports NVidia CUDA GPU acceleration.
|
| 86 |
+
* [KoboldCpp](https://github.com/LostRuins/koboldcpp), a powerful GGML web UI with GPU acceleration on all platforms (CUDA and OpenCL). Especially good for story telling.
|
| 87 |
+
* [LM Studio](https://lmstudio.ai/), a fully featured local GUI with GPU acceleration on both Windows (NVidia and AMD), and macOS.
|
| 88 |
+
* [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with CUDA GPU acceleration via the c_transformers backend.
|
| 89 |
+
* [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server.
|
| 90 |
+
* [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.
|
| 91 |
+
|
| 92 |
+
## Repositories available
|
| 93 |
+
|
| 94 |
+
* [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ)
|
| 95 |
+
* [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF)
|
| 96 |
+
* [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference (deprecated)](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML)
|
| 97 |
+
* [Gryphe's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/Gryphe/MythoMax-L2-13b)
|
| 98 |
+
|
| 99 |
+
## Prompt template: Custom
|
| 100 |
+
|
| 101 |
+
```
|
| 102 |
+
{system_message}
|
| 103 |
+
|
| 104 |
+
### Instruction:
|
| 105 |
+
{prompt}
|
| 106 |
+
(For roleplay purposes, I suggest the following - Write <CHAR NAME>'s next reply in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.)
|
| 107 |
+
|
| 108 |
+
### Response:
|
| 109 |
+
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
<!-- compatibility_ggml start -->
|
| 114 |
+
## Compatibility
|
| 115 |
+
|
| 116 |
+
These quantised GGML files are compatible with llama.cpp between June 6th (commit `2d43387`) and August 21st 2023.
|
| 117 |
+
|
| 118 |
+
For support with latest llama.cpp, please use GGUF files instead.
|
| 119 |
+
|
| 120 |
+
The final llama.cpp commit with support for GGML was: [dadbed99e65252d79f81101a392d0d6497b86caa](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa)
|
| 121 |
+
|
| 122 |
+
As of August 23rd 2023 they are still compatible with all UIs, libraries and utilities which use GGML. This may change in the future.
|
| 123 |
+
|
| 124 |
+
## Explanation of the new k-quant methods
|
| 125 |
+
<details>
|
| 126 |
+
<summary>Click to see details</summary>
|
| 127 |
+
|
| 128 |
+
The new methods available are:
|
| 129 |
+
* GGML_TYPE_Q2_K - ""type-1"" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)
|
| 130 |
+
* GGML_TYPE_Q3_K - ""type-0"" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw.
|
| 131 |
+
* GGML_TYPE_Q4_K - ""type-1"" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.
|
| 132 |
+
* GGML_TYPE_Q5_K - ""type-1"" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw
|
| 133 |
+
* GGML_TYPE_Q6_K - ""type-0"" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw
|
| 134 |
+
* GGML_TYPE_Q8_K - ""type-0"" 8-bit quantization. Only used for quantizing intermediate results. The difference to the existing Q8_0 is that the block size is 256. All 2-6 bit dot products are implemented for this quantization type.
|
| 135 |
+
|
| 136 |
+
Refer to the Provided Files table below to see what files use which methods, and how.
|
| 137 |
+
</details>
|
| 138 |
+
<!-- compatibility_ggml end -->
|
| 139 |
+
|
| 140 |
+
## Provided files
|
| 141 |
+
|
| 142 |
+
| Name | Quant method | Bits | Size | Max RAM required | Use case |
|
| 143 |
+
| ---- | ---- | ---- | ---- | ---- | ----- |
|
| 144 |
+
| [mythomax-l2-13b.ggmlv3.q2_K.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q2_K.bin) | q2_K | 2 | 5.51 GB| 8.01 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.vw and feed_forward.w2 tensors, GGML_TYPE_Q2_K for the other tensors. |
|
| 145 |
+
| [mythomax-l2-13b.ggmlv3.q3_K_S.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q3_K_S.bin) | q3_K_S | 3 | 5.66 GB| 8.16 GB | New k-quant method. Uses GGML_TYPE_Q3_K for all tensors |
|
| 146 |
+
| [mythomax-l2-13b.ggmlv3.q3_K_M.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q3_K_M.bin) | q3_K_M | 3 | 6.31 GB| 8.81 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K |
|
| 147 |
+
| [mythomax-l2-13b.ggmlv3.q3_K_L.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q3_K_L.bin) | q3_K_L | 3 | 6.93 GB| 9.43 GB | New k-quant method. Uses GGML_TYPE_Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K |
|
| 148 |
+
| [mythomax-l2-13b.ggmlv3.q4_0.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q4_0.bin) | q4_0 | 4 | 7.37 GB| 9.87 GB | Original quant method, 4-bit. |
|
| 149 |
+
| [mythomax-l2-13b.ggmlv3.q4_K_S.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q4_K_S.bin) | q4_K_S | 4 | 7.37 GB| 9.87 GB | New k-quant method. Uses GGML_TYPE_Q4_K for all tensors |
|
| 150 |
+
| [mythomax-l2-13b.ggmlv3.q4_K_M.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q4_K_M.bin) | q4_K_M | 4 | 7.87 GB| 10.37 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q4_K |
|
| 151 |
+
| [mythomax-l2-13b.ggmlv3.q4_1.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q4_1.bin) | q4_1 | 4 | 8.17 GB| 10.67 GB | Original quant method, 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models. |
|
| 152 |
+
| [mythomax-l2-13b.ggmlv3.q5_0.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q5_0.bin) | q5_0 | 5 | 8.97 GB| 11.47 GB | Original quant method, 5-bit. Higher accuracy, higher resource usage and slower inference. |
|
| 153 |
+
| [mythomax-l2-13b.ggmlv3.q5_K_S.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q5_K_S.bin) | q5_K_S | 5 | 8.97 GB| 11.47 GB | New k-quant method. Uses GGML_TYPE_Q5_K for all tensors |
|
| 154 |
+
| [mythomax-l2-13b.ggmlv3.q5_K_M.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q5_K_M.bin) | q5_K_M | 5 | 9.23 GB| 11.73 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q5_K |
|
| 155 |
+
| [mythomax-l2-13b.ggmlv3.q5_1.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q5_1.bin) | q5_1 | 5 | 9.78 GB| 12.28 GB | Original quant method, 5-bit. Even higher accuracy, resource usage and slower inference. |
|
| 156 |
+
| [mythomax-l2-13b.ggmlv3.q6_K.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q6_K.bin) | q6_K | 6 | 10.68 GB| 13.18 GB | New k-quant method. Uses GGML_TYPE_Q8_K for all tensors - 6-bit quantization |
|
| 157 |
+
| [mythomax-l2-13b.ggmlv3.q8_0.bin](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML/blob/main/mythomax-l2-13b.ggmlv3.q8_0.bin) | q8_0 | 8 | 13.79 GB| 16.29 GB | Original quant method, 8-bit. Almost indistinguishable from float16. High resource use and slow. Not recommended for most users. |
|
| 158 |
+
|
| 159 |
+
**Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead.
|
| 160 |
+
|
| 161 |
+
## How to run in `llama.cpp`
|
| 162 |
+
|
| 163 |
+
Make sure you are using `llama.cpp` from commit [dadbed99e65252d79f81101a392d0d6497b86caa](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa) or earlier.
|
| 164 |
+
|
| 165 |
+
For compatibility with latest llama.cpp, please use GGUF files instead.
|
| 166 |
+
|
| 167 |
+
```
|
| 168 |
+
./main -t 10 -ngl 32 -m mythomax-l2-13b.ggmlv3.q4_K_M.bin --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p ""```\nYou are a story writing assistant.\n\n### Instruction:\nWrite a story about llamas\n(For roleplay purposes, I suggest the following - Write <CHAR NAME>'s next reply in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.)\n\n### Response:\n\n```""
|
| 169 |
+
```
|
| 170 |
+
Change `-t 10` to the number of physical CPU cores you have. For example if your system has 8 cores/16 threads, use `-t 8`.
|
| 171 |
+
|
| 172 |
+
Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.
|
| 173 |
+
|
| 174 |
+
Change `-c 2048` to the desired sequence length for this model. For example, `-c 4096` for a Llama 2 model. For models that use RoPE, add `--rope-freq-base 10000 --rope-freq-scale 0.5` for doubled context, or `--rope-freq-base 10000 --rope-freq-scale 0.25` for 4x context.
|
| 175 |
+
|
| 176 |
+
If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
|
| 177 |
+
|
| 178 |
+
For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
|
| 179 |
+
|
| 180 |
+
## How to run in `text-generation-webui`
|
| 181 |
+
|
| 182 |
+
Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md).
|
| 183 |
+
|
| 184 |
+
<!-- footer start -->
|
| 185 |
+
<!-- 200823 -->
|
| 186 |
+
## Discord
|
| 187 |
+
|
| 188 |
+
For further support, and discussions on these models and AI in general, join us at:
|
| 189 |
+
|
| 190 |
+
[TheBloke AI's Discord server](https://discord.gg/theblokeai)
|
| 191 |
+
|
| 192 |
+
## Thanks, and how to contribute.
|
| 193 |
+
|
| 194 |
+
Thanks to the [chirper.ai](https://chirper.ai) team!
|
| 195 |
+
|
| 196 |
+
I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
|
| 197 |
+
|
| 198 |
+
If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
|
| 199 |
+
|
| 200 |
+
Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
|
| 201 |
+
|
| 202 |
+
* Patreon: https://patreon.com/TheBlokeAI
|
| 203 |
+
* Ko-Fi: https://ko-fi.com/TheBlokeAI
|
| 204 |
+
|
| 205 |
+
**Special thanks to**: Aemon Algiz.
|
| 206 |
+
|
| 207 |
+
**Patreon special mentions**: Russ Johnson, J, alfie_i, Alex, NimbleBox.ai, Chadd, Mandus, Nikolai Manek, Ken Nordquist, ya boyyy, Illia Dulskyi, Viktor Bowallius, vamX, Iucharbius, zynix, Magnesian, Clay Pascal, Pierre Kircher, Enrico Ros, Tony Hughes, Elle, Andrey, knownsqashed, Deep Realms, Jerry Meng, Lone Striker, Derek Yates, Pyrater, Mesiah Bishop, James Bentley, Femi Adebogun, Brandon Frisco, SuperWojo, Alps Aficionado, Michael Dempsey, Vitor Caleffi, Will Dee, Edmond Seymore, usrbinkat, LangChain4j, Kacper Wikieł, Luke Pendergrass, John Detwiler, theTransient, Nathan LeClaire, Tiffany J. Kim, biorpg, Eugene Pentland, Stanislav Ovsiannikov, Fred von Graf, terasurfer, Kalila, Dan Guido, Nitin Borwankar, 阿明, Ai Maven, John Villwock, Gabriel Puliatti, Stephen Murray, Asp the Wyvern, danny, Chris Smitley, ReadyPlayerEmma, S_X, Daniel P. Andersen, Olakabola, Jeffrey Morgan, Imad Khwaja, Caitlyn Gatomon, webtim, Alicia Loh, Trenton Dambrowitz, Swaroop Kallakuri, Erik Bjäreholt, Leonard Tan, Spiking Neurons AB, Luke @flexchar, Ajan Kanaga, Thomas Belote, Deo Leter, RoA, Willem Michiel, transmissions 11, subjectnull, Matthew Berman, Joseph William Delisle, David Ziegler, Michael Davis, Johann-Peter Hartmann, Talal Aujan, senxiiz, Artur Olbinski, Rainer Wilmers, Spencer Kim, Fen Risland, Cap'n Zoog, Rishabh Srivastava, Michael Levine, Geoffrey Montalvo, Sean Connelly, Alexandros Triantafyllidis, Pieter, Gabriel Tamborski, Sam, Subspace Studios, Junyu Yang, Pedro Madruga, Vadim, Cory Kujawski, K, Raven Klaugh, Randy H, Mano Prime, Sebastain Graf, Space Cruiser
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
Thank you to all my generous patrons and donaters!
|
| 211 |
+
|
| 212 |
+
And thank you again to a16z for their generous grant.
|
| 213 |
+
|
| 214 |
+
<!-- footer end -->
|
| 215 |
+
|
| 216 |
+
# Original model card: Gryphe's MythoMax L2 13B
|
| 217 |
+
|
| 218 |
+
An improved, potentially even perfected variant of MythoMix, my [MythoLogic-L2](https://huggingface.co/Gryphe/MythoLogic-L2-13b) and [Huginn](https://huggingface.co/The-Face-Of-Goonery/Huginn-13b-FP16) merge using a highly experimental tensor type merge technique. The main difference with MythoMix is that I allowed more of Huginn to intermingle with the single tensors located at the front and end of a model, resulting in increased coherency across the entire structure.
|
| 219 |
+
|
| 220 |
+
The script and the acccompanying templates I used to produce both can [be found here](https://github.com/Gryphe/BlockMerge_Gradient/tree/main/YAML).
|
| 221 |
+
|
| 222 |
+
This model is proficient at both roleplaying and storywriting due to its unique nature.
|
| 223 |
+
|
| 224 |
+
Quantized models are available from TheBloke: [GGML](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML) - [GPTQ](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ) (You're the best!)
|
| 225 |
+
|
| 226 |
+
## Model details
|
| 227 |
+
|
| 228 |
+
The idea behind this merge is that each layer is composed of several tensors, which are in turn responsible for specific functions. Using MythoLogic-L2's robust understanding as its input and Huginn's extensive writing capability as its output seems to have resulted in a model that exceeds at both, confirming my theory. (More details to be released at a later time)
|
| 229 |
+
|
| 230 |
+
This type of merge is incapable of being illustrated, as each of its 363 tensors had an unique ratio applied to it. As with my prior merges, gradients were part of these ratios to further finetune its behaviour.
|
| 231 |
+
|
| 232 |
+
## Prompt Format
|
| 233 |
+
|
| 234 |
+
This model primarily uses Alpaca formatting, so for optimal model performance, use:
|
| 235 |
+
```
|
| 236 |
+
<System prompt/Character Card>
|
| 237 |
+
|
| 238 |
+
### Instruction:
|
| 239 |
+
Your instruction or question here.
|
| 240 |
+
For roleplay purposes, I suggest the following - Write <CHAR NAME>'s next reply in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.
|
| 241 |
+
|
| 242 |
+
### Response:
|
| 243 |
+
```
|
| 244 |
+
|
| 245 |
+
---
|
| 246 |
+
license: other
|
| 247 |
+
---
|
| 248 |
+
","{""id"": ""TheBloke/MythoMax-L2-13B-GGML"", ""author"": ""TheBloke"", ""sha"": ""c7300d62a6113791e9c83a2658d6e5389810256f"", ""last_modified"": ""2023-09-27 13:01:19+00:00"", ""created_at"": ""2023-08-11 07:27:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 22, ""downloads_all_time"": null, ""likes"": 83, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""en"", ""base_model:Gryphe/MythoMax-L2-13b"", ""base_model:finetune:Gryphe/MythoMax-L2-13b"", ""license:llama2"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Gryphe/MythoMax-L2-13b\nlanguage:\n- en\nlicense: llama2\nmodel_name: MythoMax L2 13B\ninference: false\nmodel_creator: Gryphe\nmodel_link: https://huggingface.co/Gryphe/MythoMax-L2-13b\nmodel_type: llama\nquantized_by: TheBloke"", ""widget_data"": null, ""model_index"": null, ""config"": {""model_type"": ""llama""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='USE_POLICY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q2_K.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q3_K_L.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q3_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q3_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q4_0.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q4_1.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q4_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q4_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q5_0.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q5_1.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q5_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q5_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q6_K.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mythomax-l2-13b.ggmlv3.q8_0.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-27 13:01:19+00:00"", ""cardData"": ""base_model: Gryphe/MythoMax-L2-13b\nlanguage:\n- en\nlicense: llama2\nmodel_name: MythoMax L2 13B\ninference: false\nmodel_creator: Gryphe\nmodel_link: https://huggingface.co/Gryphe/MythoMax-L2-13b\nmodel_type: llama\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""64d5e2dc3ca2924d6e61148a"", ""modelId"": ""TheBloke/MythoMax-L2-13B-GGML"", ""usedStorage"": 116594290432}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=TheBloke/MythoMax-L2-13B-GGML&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FMythoMax-L2-13B-GGML%5D(%2FTheBloke%2FMythoMax-L2-13B-GGML)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 249 |
+
theNovaAI/Hypernova-experimental,"---
|
| 250 |
+
language:
|
| 251 |
+
- en
|
| 252 |
+
license: cc-by-nc-sa-4.0
|
| 253 |
+
library_name: transformers
|
| 254 |
+
base_model:
|
| 255 |
+
- Undi95/Emerald-13B
|
| 256 |
+
- Gryphe/MythoMax-L2-13b
|
| 257 |
+
inference: false
|
| 258 |
+
---
|
| 259 |
+
## Hypernova-experimental
|
| 260 |
+
Tried some new stuff this time around. Very different outcome than I expected.
|
| 261 |
+
This is an experimental model that was created for the development of NovaAI.
|
| 262 |
+
|
| 263 |
+
Good at chatting and some RP. Sometimes gets characters mixed up. Can occasionally struggle with context.
|
| 264 |
+
|
| 265 |
+
Quantized model here: [theNovaAI/Hypernova-experimental-GPTQ](https://huggingface.co/theNovaAI/Hypernova-experimental-GPTQ)
|
| 266 |
+
|
| 267 |
+
## Prompt Template: Alpaca
|
| 268 |
+
|
| 269 |
+
```
|
| 270 |
+
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
| 271 |
+
|
| 272 |
+
### Instruction:
|
| 273 |
+
{prompt}
|
| 274 |
+
|
| 275 |
+
### Response:
|
| 276 |
+
```
|
| 277 |
+
|
| 278 |
+
### Models Merged
|
| 279 |
+
|
| 280 |
+
The following models were included in the merge:
|
| 281 |
+
* [Undi95/Emerald-13B](https://huggingface.co/Undi95/Emerald-13B)
|
| 282 |
+
* [Gryphe/MythoMax-L2-13b](https://huggingface.co/Gryphe/MythoMax-L2-13b)
|
| 283 |
+
|
| 284 |
+
Some finetuning done as well","{""id"": ""theNovaAI/Hypernova-experimental"", ""author"": ""theNovaAI"", ""sha"": ""df1e96769b09dbc621c3123f68d914cb4071af12"", ""last_modified"": ""2024-08-11 06:02:29+00:00"", ""created_at"": ""2024-05-01 02:50:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""en"", ""base_model:Gryphe/MythoMax-L2-13b"", ""base_model:finetune:Gryphe/MythoMax-L2-13b"", ""license:cc-by-nc-sa-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Undi95/Emerald-13B\n- Gryphe/MythoMax-L2-13b\nlanguage:\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-sa-4.0\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00014.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""featherless-ai/try-this-model"", ""Darok/Featherless-Feud"", ""emekaboris/try-this-model"", ""SC999/NV_Nemotron"", ""JackHoltone/try-this-model"", ""k11112/try-this-model""], ""safetensors"": {""parameters"": {""F16"": 13015864320}, ""total"": 13015864320}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-11 06:02:29+00:00"", ""cardData"": ""base_model:\n- Undi95/Emerald-13B\n- Gryphe/MythoMax-L2-13b\nlanguage:\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-sa-4.0\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6631ade1e0e505bd2f194d8d"", ""modelId"": ""theNovaAI/Hypernova-experimental"", ""usedStorage"": 26032270403}",1,,0,,0,"https://huggingface.co/mradermacher/Hypernova-experimental-GGUF, https://huggingface.co/mradermacher/Hypernova-experimental-i1-GGUF, https://huggingface.co/featherless-ai-quants/theNovaAI-Hypernova-experimental-GGUF",3,,0,"Darok/Featherless-Feud, JackHoltone/try-this-model, SC999/NV_Nemotron, emekaboris/try-this-model, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/new?title=theNovaAI/Hypernova-experimental&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BtheNovaAI%2FHypernova-experimental%5D(%2FtheNovaAI%2FHypernova-experimental)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, k11112/try-this-model",7
|
NuExtract_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,653 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
numind/NuExtract,"---
|
| 3 |
+
license: mit
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
base_model: microsoft/Phi-3-mini-4k-instruct
|
| 7 |
+
new_version: numind/NuExtract-v1.5
|
| 8 |
+
---
|
| 9 |
+
> ⚠️ **_NOTE:_** This model is out-dated. Find the updated version [here](https://huggingface.co/numind/NuExtract-v1.5)
|
| 10 |
+
|
| 11 |
+
# Structure Extraction Model by NuMind 🔥
|
| 12 |
+
|
| 13 |
+
NuExtract is a version of [phi-3-mini](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct), fine-tuned on a private high-quality synthetic dataset for information extraction.
|
| 14 |
+
To use the model, provide an input text (less than 2000 tokens) and a JSON template describing the information you need to extract.
|
| 15 |
+
|
| 16 |
+
Note: This model is purely extractive, so all text output by the model is present as is in the original text. You can also provide an example of output formatting to help the model understand your task more precisely.
|
| 17 |
+
|
| 18 |
+
Try it here: https://huggingface.co/spaces/numind/NuExtract
|
| 19 |
+
|
| 20 |
+
We also provide a tiny(0.5B) and large(7B) version of this model: [NuExtract-tiny](https://huggingface.co/numind/NuExtract-tiny) and [NuExtract-large](https://huggingface.co/numind/NuExtract-large)
|
| 21 |
+
|
| 22 |
+
**Checkout other models by NuMind:**
|
| 23 |
+
* SOTA Zero-shot NER Model [NuNER Zero](https://huggingface.co/numind/NuNER_Zero)
|
| 24 |
+
* SOTA Multilingual Entity Recognition Foundation Model: [link](https://huggingface.co/numind/entity-recognition-multilingual-general-sota-v1)
|
| 25 |
+
* SOTA Sentiment Analysis Foundation Model: [English](https://huggingface.co/numind/generic-sentiment-v1), [Multilingual](https://huggingface.co/numind/generic-sentiment-multi-v1)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## Benchmark
|
| 29 |
+
|
| 30 |
+
Benchmark 0 shot (will release soon):
|
| 31 |
+
|
| 32 |
+
<p align=""left"">
|
| 33 |
+
<img src=""result.png"" width=""600"">
|
| 34 |
+
</p>
|
| 35 |
+
|
| 36 |
+
Benchmark fine-tunning (see blog post):
|
| 37 |
+
|
| 38 |
+
<p align=""left"">
|
| 39 |
+
<img src=""result_ft.png"" width=""600"">
|
| 40 |
+
</p>
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
## Usage
|
| 44 |
+
|
| 45 |
+
To use the model:
|
| 46 |
+
|
| 47 |
+
```python
|
| 48 |
+
import json
|
| 49 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def predict_NuExtract(model, tokenizer, text, schema, example=["""", """", """"]):
|
| 53 |
+
schema = json.dumps(json.loads(schema), indent=4)
|
| 54 |
+
input_llm = ""<|input|>\n### Template:\n"" + schema + ""\n""
|
| 55 |
+
for i in example:
|
| 56 |
+
if i != """":
|
| 57 |
+
input_llm += ""### Example:\n""+ json.dumps(json.loads(i), indent=4)+""\n""
|
| 58 |
+
|
| 59 |
+
input_llm += ""### Text:\n""+text +""\n<|output|>\n""
|
| 60 |
+
input_ids = tokenizer(input_llm, return_tensors=""pt"",truncation = True, max_length=4000).to(""cuda"")
|
| 61 |
+
|
| 62 |
+
output = tokenizer.decode(model.generate(**input_ids)[0], skip_special_tokens=True)
|
| 63 |
+
return output.split(""<|output|>"")[1].split(""<|end-output|>"")[0]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# We recommend using bf16 as it results in negligable performance loss
|
| 67 |
+
model = AutoModelForCausalLM.from_pretrained(""numind/NuExtract"", torch_dtype=torch.bfloat16, trust_remote_code=True)
|
| 68 |
+
tokenizer = AutoTokenizer.from_pretrained(""numind/NuExtract"", trust_remote_code=True)
|
| 69 |
+
|
| 70 |
+
model.to(""cuda"")
|
| 71 |
+
|
| 72 |
+
model.eval()
|
| 73 |
+
|
| 74 |
+
text = """"""We introduce Mistral 7B, a 7–billion-parameter language model engineered for
|
| 75 |
+
superior performance and efficiency. Mistral 7B outperforms the best open 13B
|
| 76 |
+
model (Llama 2) across all evaluated benchmarks, and the best released 34B
|
| 77 |
+
model (Llama 1) in reasoning, mathematics, and code generation. Our model
|
| 78 |
+
leverages grouped-query attention (GQA) for faster inference, coupled with sliding
|
| 79 |
+
window attention (SWA) to effectively handle sequences of arbitrary length with a
|
| 80 |
+
reduced inference cost. We also provide a model fine-tuned to follow instructions,
|
| 81 |
+
Mistral 7B – Instruct, that surpasses Llama 2 13B – chat model both on human and
|
| 82 |
+
automated benchmarks. Our models are released under the Apache 2.0 license.
|
| 83 |
+
Code: https://github.com/mistralai/mistral-src
|
| 84 |
+
Webpage: https://mistral.ai/news/announcing-mistral-7b/""""""
|
| 85 |
+
|
| 86 |
+
schema = """"""{
|
| 87 |
+
""Model"": {
|
| 88 |
+
""Name"": """",
|
| 89 |
+
""Number of parameters"": """",
|
| 90 |
+
""Number of max token"": """",
|
| 91 |
+
""Architecture"": []
|
| 92 |
+
},
|
| 93 |
+
""Usage"": {
|
| 94 |
+
""Use case"": [],
|
| 95 |
+
""Licence"": """"
|
| 96 |
+
}
|
| 97 |
+
}""""""
|
| 98 |
+
|
| 99 |
+
prediction = predict_NuExtract(model, tokenizer, text, schema, example=["""","""",""""])
|
| 100 |
+
print(prediction)
|
| 101 |
+
|
| 102 |
+
```","{""id"": ""numind/NuExtract"", ""author"": ""numind"", ""sha"": ""1b6c9d9c995fac939d0c663125d33cca79d9101d"", ""last_modified"": ""2024-10-17 15:26:25+00:00"", ""created_at"": ""2024-05-31 09:53:13+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 887, ""downloads_all_time"": null, ""likes"": 220, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""phi3"", ""text-generation"", ""conversational"", ""custom_code"", ""en"", ""base_model:microsoft/Phi-3-mini-4k-instruct"", ""base_model:finetune:microsoft/Phi-3-mini-4k-instruct"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-mini-4k-instruct\nlanguage:\n- en\nlicense: mit\nnew_version: numind/NuExtract-v1.5"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-mini-4k-instruct--configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-mini-4k-instruct--modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|end-output|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='result_ft.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""numind/NuExtract"", ""darshil3011/numind-NuExtract""], ""safetensors"": {""parameters"": {""F32"": 3821079552}, ""total"": 3821079552}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-17 15:26:25+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-mini-4k-instruct\nlanguage:\n- en\nlicense: mit\nnew_version: numind/NuExtract-v1.5"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66599e09e71d3742325471bf"", ""modelId"": ""numind/NuExtract"", ""usedStorage"": 15284840579}",0,"https://huggingface.co/PrunaAI/numind-NuExtract-HQQ-2bit-smashed, https://huggingface.co/PrunaAI/numind-NuExtract-HQQ-4bit-smashed, https://huggingface.co/PrunaAI/numind-NuExtract-QUANTO-int4bit-smashed, https://huggingface.co/PrunaAI/numind-NuExtract-QUANTO-int2bit-smashed, https://huggingface.co/PrunaAI/numind-NuExtract-HQQ-1bit-smashed, https://huggingface.co/PrunaAI/numind-NuExtract-QUANTO-float8bit-smashed, https://huggingface.co/marquesafonso/NuExtract-openvino-8bit",7,,0,"https://huggingface.co/chrisseiler96/NuExtract-Q4_K_M-GGUF, https://huggingface.co/PrunaAI/numind-NuExtract-bnb-4bit-smashed, https://huggingface.co/nvhf/NuExtract-Q6_K-GGUF, https://huggingface.co/mradermacher/NuExtract-GGUF, https://huggingface.co/mradermacher/NuExtract-i1-GGUF",5,,0,"darshil3011/numind-NuExtract, huggingface/InferenceSupport/discussions/new?title=numind/NuExtract&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnumind%2FNuExtract%5D(%2Fnumind%2FNuExtract)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, numind/NuExtract",3
|
| 103 |
+
PrunaAI/numind-NuExtract-HQQ-2bit-smashed,"---
|
| 104 |
+
thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
|
| 105 |
+
base_model: numind/NuExtract
|
| 106 |
+
metrics:
|
| 107 |
+
- memory_disk
|
| 108 |
+
- memory_inference
|
| 109 |
+
- inference_latency
|
| 110 |
+
- inference_throughput
|
| 111 |
+
- inference_CO2_emissions
|
| 112 |
+
- inference_energy_consumption
|
| 113 |
+
tags:
|
| 114 |
+
- pruna-ai
|
| 115 |
+
---
|
| 116 |
+
<!-- header start -->
|
| 117 |
+
<!-- 200823 -->
|
| 118 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 119 |
+
<a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
|
| 120 |
+
<img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 121 |
+
</a>
|
| 122 |
+
</div>
|
| 123 |
+
<!-- header end -->
|
| 124 |
+
|
| 125 |
+
[](https://twitter.com/PrunaAI)
|
| 126 |
+
[](https://github.com/PrunaAI)
|
| 127 |
+
[](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
|
| 128 |
+
[](https://discord.gg/rskEr4BZJx)
|
| 129 |
+
|
| 130 |
+
# Simply make AI models cheaper, smaller, faster, and greener!
|
| 131 |
+
|
| 132 |
+
- Give a thumbs up if you like this model!
|
| 133 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 134 |
+
- Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 135 |
+
- Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
|
| 136 |
+
- Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
|
| 137 |
+
|
| 138 |
+
## Results
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
|
| 142 |
+
**Frequently Asked Questions**
|
| 143 |
+
- ***How does the compression work?*** The model is compressed with hqq.
|
| 144 |
+
- ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
|
| 145 |
+
- ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
|
| 146 |
+
- ***What is the model format?*** We use safetensors.
|
| 147 |
+
- ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
|
| 148 |
+
- ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
|
| 149 |
+
- ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 150 |
+
- ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
|
| 151 |
+
- ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
|
| 152 |
+
|
| 153 |
+
## Setup
|
| 154 |
+
|
| 155 |
+
You can run the smashed model with these steps:
|
| 156 |
+
|
| 157 |
+
0. Check requirements from the original repo numind/NuExtract installed. In particular, check python, cuda, and transformers versions.
|
| 158 |
+
1. Make sure that you have installed quantization related packages.
|
| 159 |
+
```bash
|
| 160 |
+
pip install hqq
|
| 161 |
+
```
|
| 162 |
+
2. Load & run the model.
|
| 163 |
+
```python
|
| 164 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 165 |
+
from hqq.engine.hf import HQQModelForCausalLM
|
| 166 |
+
from hqq.models.hf.base import AutoHQQHFModel
|
| 167 |
+
|
| 168 |
+
try:
|
| 169 |
+
model = HQQModelForCausalLM.from_quantized(""PrunaAI/numind-NuExtract-HQQ-2bit-smashed"", device_map='auto')
|
| 170 |
+
except:
|
| 171 |
+
model = AutoHQQHFModel.from_quantized(""PrunaAI/numind-NuExtract-HQQ-2bit-smashed"")
|
| 172 |
+
tokenizer = AutoTokenizer.from_pretrained(""numind/NuExtract"")
|
| 173 |
+
|
| 174 |
+
input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
|
| 175 |
+
|
| 176 |
+
outputs = model.generate(input_ids, max_new_tokens=216)
|
| 177 |
+
tokenizer.decode(outputs[0])
|
| 178 |
+
```
|
| 179 |
+
|
| 180 |
+
## Configurations
|
| 181 |
+
|
| 182 |
+
The configuration info are in `smash_config.json`.
|
| 183 |
+
|
| 184 |
+
## Credits & License
|
| 185 |
+
|
| 186 |
+
The license of the smashed model follows the license of the original model. Please check the license of the original model numind/NuExtract before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
|
| 187 |
+
|
| 188 |
+
## Want to compress other models?
|
| 189 |
+
|
| 190 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 191 |
+
- Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/numind-NuExtract-HQQ-2bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""fa59d77b0e9f65f260b44f4d19dbc62cb3009bcb"", ""last_modified"": ""2024-07-16 05:59:50+00:00"", ""created_at"": ""2024-07-16 05:59:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""phi3"", ""text-generation"", ""pruna-ai"", ""conversational"", ""custom_code"", ""base_model:numind/NuExtract"", ""base_model:finetune:numind/NuExtract"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-mini-4k-instruct--configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-mini-4k-instruct--modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|end-output|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-16 05:59:50+00:00"", ""cardData"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66960c2c5b049173bc153a00"", ""modelId"": ""PrunaAI/numind-NuExtract-HQQ-2bit-smashed"", ""usedStorage"": 1386030128}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/numind-NuExtract-HQQ-2bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fnumind-NuExtract-HQQ-2bit-smashed%5D(%2FPrunaAI%2Fnumind-NuExtract-HQQ-2bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 192 |
+
PrunaAI/numind-NuExtract-HQQ-4bit-smashed,"---
|
| 193 |
+
thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
|
| 194 |
+
base_model: numind/NuExtract
|
| 195 |
+
metrics:
|
| 196 |
+
- memory_disk
|
| 197 |
+
- memory_inference
|
| 198 |
+
- inference_latency
|
| 199 |
+
- inference_throughput
|
| 200 |
+
- inference_CO2_emissions
|
| 201 |
+
- inference_energy_consumption
|
| 202 |
+
tags:
|
| 203 |
+
- pruna-ai
|
| 204 |
+
---
|
| 205 |
+
<!-- header start -->
|
| 206 |
+
<!-- 200823 -->
|
| 207 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 208 |
+
<a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
|
| 209 |
+
<img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 210 |
+
</a>
|
| 211 |
+
</div>
|
| 212 |
+
<!-- header end -->
|
| 213 |
+
|
| 214 |
+
[](https://twitter.com/PrunaAI)
|
| 215 |
+
[](https://github.com/PrunaAI)
|
| 216 |
+
[](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
|
| 217 |
+
[](https://discord.gg/rskEr4BZJx)
|
| 218 |
+
|
| 219 |
+
# Simply make AI models cheaper, smaller, faster, and greener!
|
| 220 |
+
|
| 221 |
+
- Give a thumbs up if you like this model!
|
| 222 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 223 |
+
- Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 224 |
+
- Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
|
| 225 |
+
- Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
|
| 226 |
+
|
| 227 |
+
## Results
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
|
| 231 |
+
**Frequently Asked Questions**
|
| 232 |
+
- ***How does the compression work?*** The model is compressed with hqq.
|
| 233 |
+
- ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
|
| 234 |
+
- ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
|
| 235 |
+
- ***What is the model format?*** We use safetensors.
|
| 236 |
+
- ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
|
| 237 |
+
- ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
|
| 238 |
+
- ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 239 |
+
- ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
|
| 240 |
+
- ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
|
| 241 |
+
|
| 242 |
+
## Setup
|
| 243 |
+
|
| 244 |
+
You can run the smashed model with these steps:
|
| 245 |
+
|
| 246 |
+
0. Check requirements from the original repo numind/NuExtract installed. In particular, check python, cuda, and transformers versions.
|
| 247 |
+
1. Make sure that you have installed quantization related packages.
|
| 248 |
+
```bash
|
| 249 |
+
pip install hqq
|
| 250 |
+
```
|
| 251 |
+
2. Load & run the model.
|
| 252 |
+
```python
|
| 253 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 254 |
+
from hqq.engine.hf import HQQModelForCausalLM
|
| 255 |
+
from hqq.models.hf.base import AutoHQQHFModel
|
| 256 |
+
|
| 257 |
+
try:
|
| 258 |
+
model = HQQModelForCausalLM.from_quantized(""PrunaAI/numind-NuExtract-HQQ-4bit-smashed"", device_map='auto')
|
| 259 |
+
except:
|
| 260 |
+
model = AutoHQQHFModel.from_quantized(""PrunaAI/numind-NuExtract-HQQ-4bit-smashed"")
|
| 261 |
+
tokenizer = AutoTokenizer.from_pretrained(""numind/NuExtract"")
|
| 262 |
+
|
| 263 |
+
input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
|
| 264 |
+
|
| 265 |
+
outputs = model.generate(input_ids, max_new_tokens=216)
|
| 266 |
+
tokenizer.decode(outputs[0])
|
| 267 |
+
```
|
| 268 |
+
|
| 269 |
+
## Configurations
|
| 270 |
+
|
| 271 |
+
The configuration info are in `smash_config.json`.
|
| 272 |
+
|
| 273 |
+
## Credits & License
|
| 274 |
+
|
| 275 |
+
The license of the smashed model follows the license of the original model. Please check the license of the original model numind/NuExtract before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
|
| 276 |
+
|
| 277 |
+
## Want to compress other models?
|
| 278 |
+
|
| 279 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 280 |
+
- Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/numind-NuExtract-HQQ-4bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""a608cf60d4aaee0dfb34ccaea641da83ab0231a5"", ""last_modified"": ""2024-07-16 06:00:22+00:00"", ""created_at"": ""2024-07-16 05:59:10+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""phi3"", ""text-generation"", ""pruna-ai"", ""conversational"", ""custom_code"", ""base_model:numind/NuExtract"", ""base_model:finetune:numind/NuExtract"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-mini-4k-instruct--configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-mini-4k-instruct--modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|end-output|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-16 06:00:22+00:00"", ""cardData"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66960c2e461de4eea5a5b78b"", ""modelId"": ""PrunaAI/numind-NuExtract-HQQ-4bit-smashed"", ""usedStorage"": 2291999792}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/numind-NuExtract-HQQ-4bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fnumind-NuExtract-HQQ-4bit-smashed%5D(%2FPrunaAI%2Fnumind-NuExtract-HQQ-4bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 281 |
+
PrunaAI/numind-NuExtract-QUANTO-int4bit-smashed,"---
|
| 282 |
+
thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
|
| 283 |
+
base_model: numind/NuExtract
|
| 284 |
+
metrics:
|
| 285 |
+
- memory_disk
|
| 286 |
+
- memory_inference
|
| 287 |
+
- inference_latency
|
| 288 |
+
- inference_throughput
|
| 289 |
+
- inference_CO2_emissions
|
| 290 |
+
- inference_energy_consumption
|
| 291 |
+
tags:
|
| 292 |
+
- pruna-ai
|
| 293 |
+
---
|
| 294 |
+
<!-- header start -->
|
| 295 |
+
<!-- 200823 -->
|
| 296 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 297 |
+
<a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
|
| 298 |
+
<img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 299 |
+
</a>
|
| 300 |
+
</div>
|
| 301 |
+
<!-- header end -->
|
| 302 |
+
|
| 303 |
+
[](https://twitter.com/PrunaAI)
|
| 304 |
+
[](https://github.com/PrunaAI)
|
| 305 |
+
[](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
|
| 306 |
+
[](https://discord.gg/rskEr4BZJx)
|
| 307 |
+
|
| 308 |
+
# Simply make AI models cheaper, smaller, faster, and greener!
|
| 309 |
+
|
| 310 |
+
- Give a thumbs up if you like this model!
|
| 311 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 312 |
+
- Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 313 |
+
- Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
|
| 314 |
+
- Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
|
| 315 |
+
|
| 316 |
+
## Results
|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
|
| 320 |
+
**Frequently Asked Questions**
|
| 321 |
+
- ***How does the compression work?*** The model is compressed with quanto.
|
| 322 |
+
- ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
|
| 323 |
+
- ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
|
| 324 |
+
- ***What is the model format?*** We use safetensors.
|
| 325 |
+
- ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
|
| 326 |
+
- ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
|
| 327 |
+
- ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 328 |
+
- ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
|
| 329 |
+
- ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
|
| 330 |
+
|
| 331 |
+
## Setup
|
| 332 |
+
|
| 333 |
+
You can run the smashed model with these steps:
|
| 334 |
+
|
| 335 |
+
0. Check requirements from the original repo numind/NuExtract installed. In particular, check python, cuda, and transformers versions.
|
| 336 |
+
1. Make sure that you have installed quantization related packages.
|
| 337 |
+
```bash
|
| 338 |
+
pip install quanto
|
| 339 |
+
```
|
| 340 |
+
2. Load & run the model.
|
| 341 |
+
```python
|
| 342 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 343 |
+
IMPORTS
|
| 344 |
+
|
| 345 |
+
model = AutoModelForCausalLM.from_pretrained(""PrunaAI/numind-NuExtract-QUANTO-int4bit-smashed"", trust_remote_code=True, device_map='auto')
|
| 346 |
+
tokenizer = AutoTokenizer.from_pretrained(""numind/NuExtract"")
|
| 347 |
+
|
| 348 |
+
input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
|
| 349 |
+
|
| 350 |
+
outputs = model.generate(input_ids, max_new_tokens=216)
|
| 351 |
+
tokenizer.decode(outputs[0])
|
| 352 |
+
```
|
| 353 |
+
|
| 354 |
+
## Configurations
|
| 355 |
+
|
| 356 |
+
The configuration info are in `smash_config.json`.
|
| 357 |
+
|
| 358 |
+
## Credits & License
|
| 359 |
+
|
| 360 |
+
The license of the smashed model follows the license of the original model. Please check the license of the original model numind/NuExtract before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
|
| 361 |
+
|
| 362 |
+
## Want to compress other models?
|
| 363 |
+
|
| 364 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 365 |
+
- Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/numind-NuExtract-QUANTO-int4bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""70c03c5908e8a609df96092174c9f20d10d1cc0b"", ""last_modified"": ""2024-07-19 09:20:38+00:00"", ""created_at"": ""2024-07-16 05:59:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:numind/NuExtract"", ""base_model:finetune:numind/NuExtract"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|end-output|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:20:38+00:00"", ""cardData"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66960c4197eb9fe02d8b4bfe"", ""modelId"": ""PrunaAI/numind-NuExtract-QUANTO-int4bit-smashed"", ""usedStorage"": 15285057697}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/numind-NuExtract-QUANTO-int4bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fnumind-NuExtract-QUANTO-int4bit-smashed%5D(%2FPrunaAI%2Fnumind-NuExtract-QUANTO-int4bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 366 |
+
PrunaAI/numind-NuExtract-QUANTO-int2bit-smashed,"---
|
| 367 |
+
thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
|
| 368 |
+
base_model: numind/NuExtract
|
| 369 |
+
metrics:
|
| 370 |
+
- memory_disk
|
| 371 |
+
- memory_inference
|
| 372 |
+
- inference_latency
|
| 373 |
+
- inference_throughput
|
| 374 |
+
- inference_CO2_emissions
|
| 375 |
+
- inference_energy_consumption
|
| 376 |
+
tags:
|
| 377 |
+
- pruna-ai
|
| 378 |
+
---
|
| 379 |
+
<!-- header start -->
|
| 380 |
+
<!-- 200823 -->
|
| 381 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 382 |
+
<a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
|
| 383 |
+
<img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 384 |
+
</a>
|
| 385 |
+
</div>
|
| 386 |
+
<!-- header end -->
|
| 387 |
+
|
| 388 |
+
[](https://twitter.com/PrunaAI)
|
| 389 |
+
[](https://github.com/PrunaAI)
|
| 390 |
+
[](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
|
| 391 |
+
[](https://discord.gg/rskEr4BZJx)
|
| 392 |
+
|
| 393 |
+
# Simply make AI models cheaper, smaller, faster, and greener!
|
| 394 |
+
|
| 395 |
+
- Give a thumbs up if you like this model!
|
| 396 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 397 |
+
- Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 398 |
+
- Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
|
| 399 |
+
- Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
|
| 400 |
+
|
| 401 |
+
## Results
|
| 402 |
+
|
| 403 |
+

|
| 404 |
+
|
| 405 |
+
**Frequently Asked Questions**
|
| 406 |
+
- ***How does the compression work?*** The model is compressed with quanto.
|
| 407 |
+
- ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
|
| 408 |
+
- ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
|
| 409 |
+
- ***What is the model format?*** We use safetensors.
|
| 410 |
+
- ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
|
| 411 |
+
- ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
|
| 412 |
+
- ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 413 |
+
- ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
|
| 414 |
+
- ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
|
| 415 |
+
|
| 416 |
+
## Setup
|
| 417 |
+
|
| 418 |
+
You can run the smashed model with these steps:
|
| 419 |
+
|
| 420 |
+
0. Check requirements from the original repo numind/NuExtract installed. In particular, check python, cuda, and transformers versions.
|
| 421 |
+
1. Make sure that you have installed quantization related packages.
|
| 422 |
+
```bash
|
| 423 |
+
pip install quanto
|
| 424 |
+
```
|
| 425 |
+
2. Load & run the model.
|
| 426 |
+
```python
|
| 427 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 428 |
+
IMPORTS
|
| 429 |
+
|
| 430 |
+
model = AutoModelForCausalLM.from_pretrained(""PrunaAI/numind-NuExtract-QUANTO-int2bit-smashed"", trust_remote_code=True, device_map='auto')
|
| 431 |
+
tokenizer = AutoTokenizer.from_pretrained(""numind/NuExtract"")
|
| 432 |
+
|
| 433 |
+
input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
|
| 434 |
+
|
| 435 |
+
outputs = model.generate(input_ids, max_new_tokens=216)
|
| 436 |
+
tokenizer.decode(outputs[0])
|
| 437 |
+
```
|
| 438 |
+
|
| 439 |
+
## Configurations
|
| 440 |
+
|
| 441 |
+
The configuration info are in `smash_config.json`.
|
| 442 |
+
|
| 443 |
+
## Credits & License
|
| 444 |
+
|
| 445 |
+
The license of the smashed model follows the license of the original model. Please check the license of the original model numind/NuExtract before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
|
| 446 |
+
|
| 447 |
+
## Want to compress other models?
|
| 448 |
+
|
| 449 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 450 |
+
- Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/numind-NuExtract-QUANTO-int2bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""8878fc451c9732e61b461a495f7f1b98f222cada"", ""last_modified"": ""2024-07-19 09:30:46+00:00"", ""created_at"": ""2024-07-16 05:59:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:numind/NuExtract"", ""base_model:finetune:numind/NuExtract"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|end-output|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:30:46+00:00"", ""cardData"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66960c41d68bb542681775a3"", ""modelId"": ""PrunaAI/numind-NuExtract-QUANTO-int2bit-smashed"", ""usedStorage"": 15285057697}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/numind-NuExtract-QUANTO-int2bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fnumind-NuExtract-QUANTO-int2bit-smashed%5D(%2FPrunaAI%2Fnumind-NuExtract-QUANTO-int2bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 451 |
+
PrunaAI/numind-NuExtract-HQQ-1bit-smashed,"---
|
| 452 |
+
thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
|
| 453 |
+
base_model: numind/NuExtract
|
| 454 |
+
metrics:
|
| 455 |
+
- memory_disk
|
| 456 |
+
- memory_inference
|
| 457 |
+
- inference_latency
|
| 458 |
+
- inference_throughput
|
| 459 |
+
- inference_CO2_emissions
|
| 460 |
+
- inference_energy_consumption
|
| 461 |
+
tags:
|
| 462 |
+
- pruna-ai
|
| 463 |
+
---
|
| 464 |
+
<!-- header start -->
|
| 465 |
+
<!-- 200823 -->
|
| 466 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 467 |
+
<a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
|
| 468 |
+
<img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 469 |
+
</a>
|
| 470 |
+
</div>
|
| 471 |
+
<!-- header end -->
|
| 472 |
+
|
| 473 |
+
[](https://twitter.com/PrunaAI)
|
| 474 |
+
[](https://github.com/PrunaAI)
|
| 475 |
+
[](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
|
| 476 |
+
[](https://discord.gg/rskEr4BZJx)
|
| 477 |
+
|
| 478 |
+
# Simply make AI models cheaper, smaller, faster, and greener!
|
| 479 |
+
|
| 480 |
+
- Give a thumbs up if you like this model!
|
| 481 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 482 |
+
- Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 483 |
+
- Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
|
| 484 |
+
- Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
|
| 485 |
+
|
| 486 |
+
## Results
|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
|
| 490 |
+
**Frequently Asked Questions**
|
| 491 |
+
- ***How does the compression work?*** The model is compressed with hqq.
|
| 492 |
+
- ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
|
| 493 |
+
- ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
|
| 494 |
+
- ***What is the model format?*** We use safetensors.
|
| 495 |
+
- ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
|
| 496 |
+
- ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
|
| 497 |
+
- ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 498 |
+
- ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
|
| 499 |
+
- ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
|
| 500 |
+
|
| 501 |
+
## Setup
|
| 502 |
+
|
| 503 |
+
You can run the smashed model with these steps:
|
| 504 |
+
|
| 505 |
+
0. Check requirements from the original repo numind/NuExtract installed. In particular, check python, cuda, and transformers versions.
|
| 506 |
+
1. Make sure that you have installed quantization related packages.
|
| 507 |
+
```bash
|
| 508 |
+
pip install hqq
|
| 509 |
+
```
|
| 510 |
+
2. Load & run the model.
|
| 511 |
+
```python
|
| 512 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 513 |
+
from hqq.engine.hf import HQQModelForCausalLM
|
| 514 |
+
from hqq.models.hf.base import AutoHQQHFModel
|
| 515 |
+
|
| 516 |
+
try:
|
| 517 |
+
model = HQQModelForCausalLM.from_quantized(""PrunaAI/numind-NuExtract-HQQ-1bit-smashed"", device_map='auto')
|
| 518 |
+
except:
|
| 519 |
+
model = AutoHQQHFModel.from_quantized(""PrunaAI/numind-NuExtract-HQQ-1bit-smashed"")
|
| 520 |
+
tokenizer = AutoTokenizer.from_pretrained(""numind/NuExtract"")
|
| 521 |
+
|
| 522 |
+
input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
|
| 523 |
+
|
| 524 |
+
outputs = model.generate(input_ids, max_new_tokens=216)
|
| 525 |
+
tokenizer.decode(outputs[0])
|
| 526 |
+
```
|
| 527 |
+
|
| 528 |
+
## Configurations
|
| 529 |
+
|
| 530 |
+
The configuration info are in `smash_config.json`.
|
| 531 |
+
|
| 532 |
+
## Credits & License
|
| 533 |
+
|
| 534 |
+
The license of the smashed model follows the license of the original model. Please check the license of the original model numind/NuExtract before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
|
| 535 |
+
|
| 536 |
+
## Want to compress other models?
|
| 537 |
+
|
| 538 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 539 |
+
- Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/numind-NuExtract-HQQ-1bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""2d7b08ced9ac47f4b85b2c87f5215312c1646756"", ""last_modified"": ""2024-07-16 06:00:28+00:00"", ""created_at"": ""2024-07-16 05:59:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""phi3"", ""text-generation"", ""pruna-ai"", ""conversational"", ""custom_code"", ""base_model:numind/NuExtract"", ""base_model:finetune:numind/NuExtract"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-mini-4k-instruct--configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-mini-4k-instruct--modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|end-output|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-16 06:00:28+00:00"", ""cardData"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66960c59f17d700f3796d80d"", ""modelId"": ""PrunaAI/numind-NuExtract-HQQ-1bit-smashed"", ""usedStorage"": 933045296}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/numind-NuExtract-HQQ-1bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fnumind-NuExtract-HQQ-1bit-smashed%5D(%2FPrunaAI%2Fnumind-NuExtract-HQQ-1bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 540 |
+
PrunaAI/numind-NuExtract-QUANTO-float8bit-smashed,"---
|
| 541 |
+
thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
|
| 542 |
+
base_model: numind/NuExtract
|
| 543 |
+
metrics:
|
| 544 |
+
- memory_disk
|
| 545 |
+
- memory_inference
|
| 546 |
+
- inference_latency
|
| 547 |
+
- inference_throughput
|
| 548 |
+
- inference_CO2_emissions
|
| 549 |
+
- inference_energy_consumption
|
| 550 |
+
tags:
|
| 551 |
+
- pruna-ai
|
| 552 |
+
---
|
| 553 |
+
<!-- header start -->
|
| 554 |
+
<!-- 200823 -->
|
| 555 |
+
<div style=""width: auto; margin-left: auto; margin-right: auto"">
|
| 556 |
+
<a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
|
| 557 |
+
<img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
|
| 558 |
+
</a>
|
| 559 |
+
</div>
|
| 560 |
+
<!-- header end -->
|
| 561 |
+
|
| 562 |
+
[](https://twitter.com/PrunaAI)
|
| 563 |
+
[](https://github.com/PrunaAI)
|
| 564 |
+
[](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
|
| 565 |
+
[](https://discord.gg/rskEr4BZJx)
|
| 566 |
+
|
| 567 |
+
# Simply make AI models cheaper, smaller, faster, and greener!
|
| 568 |
+
|
| 569 |
+
- Give a thumbs up if you like this model!
|
| 570 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 571 |
+
- Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 572 |
+
- Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
|
| 573 |
+
- Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
|
| 574 |
+
|
| 575 |
+
## Results
|
| 576 |
+
|
| 577 |
+

|
| 578 |
+
|
| 579 |
+
**Frequently Asked Questions**
|
| 580 |
+
- ***How does the compression work?*** The model is compressed with quanto.
|
| 581 |
+
- ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
|
| 582 |
+
- ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
|
| 583 |
+
- ***What is the model format?*** We use safetensors.
|
| 584 |
+
- ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
|
| 585 |
+
- ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
|
| 586 |
+
- ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
|
| 587 |
+
- ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
|
| 588 |
+
- ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
|
| 589 |
+
|
| 590 |
+
## Setup
|
| 591 |
+
|
| 592 |
+
You can run the smashed model with these steps:
|
| 593 |
+
|
| 594 |
+
0. Check requirements from the original repo numind/NuExtract installed. In particular, check python, cuda, and transformers versions.
|
| 595 |
+
1. Make sure that you have installed quantization related packages.
|
| 596 |
+
```bash
|
| 597 |
+
pip install quanto
|
| 598 |
+
```
|
| 599 |
+
2. Load & run the model.
|
| 600 |
+
```python
|
| 601 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 602 |
+
IMPORTS
|
| 603 |
+
|
| 604 |
+
model = AutoModelForCausalLM.from_pretrained(""PrunaAI/numind-NuExtract-QUANTO-float8bit-smashed"", trust_remote_code=True, device_map='auto')
|
| 605 |
+
tokenizer = AutoTokenizer.from_pretrained(""numind/NuExtract"")
|
| 606 |
+
|
| 607 |
+
input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
|
| 608 |
+
|
| 609 |
+
outputs = model.generate(input_ids, max_new_tokens=216)
|
| 610 |
+
tokenizer.decode(outputs[0])
|
| 611 |
+
```
|
| 612 |
+
|
| 613 |
+
## Configurations
|
| 614 |
+
|
| 615 |
+
The configuration info are in `smash_config.json`.
|
| 616 |
+
|
| 617 |
+
## Credits & License
|
| 618 |
+
|
| 619 |
+
The license of the smashed model follows the license of the original model. Please check the license of the original model numind/NuExtract before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
|
| 620 |
+
|
| 621 |
+
## Want to compress other models?
|
| 622 |
+
|
| 623 |
+
- Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
|
| 624 |
+
- Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/numind-NuExtract-QUANTO-float8bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""3763d11f3d095fda9c2cce1f573331650f6a1f1b"", ""last_modified"": ""2024-07-19 09:23:07+00:00"", ""created_at"": ""2024-07-16 06:05:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:numind/NuExtract"", ""base_model:finetune:numind/NuExtract"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|end-output|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:23:07+00:00"", ""cardData"": ""base_model: numind/NuExtract\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66960d9acda586f73209af87"", ""modelId"": ""PrunaAI/numind-NuExtract-QUANTO-float8bit-smashed"", ""usedStorage"": 15284557846}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/numind-NuExtract-QUANTO-float8bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fnumind-NuExtract-QUANTO-float8bit-smashed%5D(%2FPrunaAI%2Fnumind-NuExtract-QUANTO-float8bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 625 |
+
marquesafonso/NuExtract-openvino-8bit,"---
|
| 626 |
+
base_model: numind/NuExtract
|
| 627 |
+
language:
|
| 628 |
+
- en
|
| 629 |
+
license: mit
|
| 630 |
+
tags:
|
| 631 |
+
- openvino
|
| 632 |
+
- nncf
|
| 633 |
+
- 8-bit
|
| 634 |
+
new_version: numind/NuExtract-v1.5
|
| 635 |
+
---
|
| 636 |
+
|
| 637 |
+
This model is a quantized version of [`numind/NuExtract`](https://huggingface.co/numind/NuExtract) and is converted to the OpenVINO format. This model was obtained via the [nncf-quantization](https://huggingface.co/spaces/echarlaix/nncf-quantization) space with [optimum-intel](https://github.com/huggingface/optimum-intel).
|
| 638 |
+
|
| 639 |
+
First make sure you have `optimum-intel` installed:
|
| 640 |
+
|
| 641 |
+
```bash
|
| 642 |
+
pip install optimum[openvino]
|
| 643 |
+
```
|
| 644 |
+
|
| 645 |
+
To load your model you can do as follows:
|
| 646 |
+
|
| 647 |
+
```python
|
| 648 |
+
from optimum.intel import OVModelForCausalLM
|
| 649 |
+
|
| 650 |
+
model_id = ""marquesafonso/NuExtract-openvino-8bit""
|
| 651 |
+
model = OVModelForCausalLM.from_pretrained(model_id)
|
| 652 |
+
```
|
| 653 |
+
","{""id"": ""marquesafonso/NuExtract-openvino-8bit"", ""author"": ""marquesafonso"", ""sha"": ""873ff7ae1b2564dd864eb0918217f506358ae064"", ""last_modified"": ""2024-12-02 22:22:02+00:00"", ""created_at"": ""2024-12-02 22:21:34+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""openvino"", ""phi3"", ""nncf"", ""8-bit"", ""custom_code"", ""en"", ""base_model:numind/NuExtract"", ""base_model:finetune:numind/NuExtract"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: numind/NuExtract\nlanguage:\n- en\nlicense: mit\ntags:\n- openvino\n- nncf\n- 8-bit\nnew_version: numind/NuExtract-v1.5"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Phi3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-mini-4k-instruct--configuration_phi3.Phi3Config"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-mini-4k-instruct--modeling_phi3.Phi3ForCausalLM""}, ""model_type"": ""phi3"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|end-output|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-02 22:22:02+00:00"", ""cardData"": ""base_model: numind/NuExtract\nlanguage:\n- en\nlicense: mit\ntags:\n- openvino\n- nncf\n- 8-bit\nnew_version: numind/NuExtract-v1.5"", ""transformersInfo"": null, ""_id"": ""674e32ee4b7915defe5f48f3"", ""modelId"": ""marquesafonso/NuExtract-openvino-8bit"", ""usedStorage"": 3824918816}",1,,0,,0,,0,,0,"echarlaix/nncf-quantization, huggingface/InferenceSupport/discussions/new?title=marquesafonso/NuExtract-openvino-8bit&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmarquesafonso%2FNuExtract-openvino-8bit%5D(%2Fmarquesafonso%2FNuExtract-openvino-8bit)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
|
NuminaMath-7B-TIR_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
OOTDiffusion_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
levihsu/OOTDiffusion,"---
|
| 3 |
+
license: cc-by-nc-sa-4.0
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# OOTDiffusion
|
| 7 |
+
[Our OOTDiffusion GitHub repository](https://github.com/levihsu/OOTDiffusion)
|
| 8 |
+
|
| 9 |
+
🤗 [Try out OOTDiffusion](https://huggingface.co/spaces/levihsu/OOTDiffusion)
|
| 10 |
+
|
| 11 |
+
(Thanks to [ZeroGPU](https://huggingface.co/zero-gpu-explorers) for providing A100 GPUs)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
> **OOTDiffusion: Outfitting Fusion based Latent Diffusion for Controllable Virtual Try-on** [[arXiv paper](https://arxiv.org/abs/2403.01779)]<br>
|
| 15 |
+
> [Yuhao Xu](http://levihsu.github.io/), [Tao Gu](https://github.com/T-Gu), [Weifeng Chen](https://github.com/ShineChen1024), [Chengcai Chen](https://www.researchgate.net/profile/Chengcai-Chen)<br>
|
| 16 |
+
> Xiao-i Research
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
Our model checkpoints trained on [VITON-HD](https://github.com/shadow2496/VITON-HD) (half-body) and [Dress Code](https://github.com/aimagelab/dress-code) (full-body) have been released
|
| 20 |
+
|
| 21 |
+
* 📢📢 We support ONNX for [humanparsing](https://github.com/GoGoDuck912/Self-Correction-Human-Parsing) now. Most environmental issues should have been addressed : )
|
| 22 |
+
* Please also download [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) into ***checkpoints*** folder
|
| 23 |
+
* We've only tested our code and models on Linux (Ubuntu 22.04)
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+

|
| 27 |
+
|
| 28 |
+
## Citation
|
| 29 |
+
```
|
| 30 |
+
@article{xu2024ootdiffusion,
|
| 31 |
+
title={OOTDiffusion: Outfitting Fusion based Latent Diffusion for Controllable Virtual Try-on},
|
| 32 |
+
author={Xu, Yuhao and Gu, Tao and Chen, Weifeng and Chen, Chengcai},
|
| 33 |
+
journal={arXiv preprint arXiv:2403.01779},
|
| 34 |
+
year={2024}
|
| 35 |
+
}
|
| 36 |
+
```
|
| 37 |
+
","{""id"": ""levihsu/OOTDiffusion"", ""author"": ""levihsu"", ""sha"": ""c79f9dd0585743bea82a39261cc09a24040bc4f9"", ""last_modified"": ""2024-04-17 06:03:25+00:00"", ""created_at"": ""2024-02-21 02:50:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 302, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""onnx"", ""safetensors"", ""arxiv:2403.01779"", ""license:cc-by-nc-sa-4.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: cc-by-nc-sa-4.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/humanparsing/exp-schp-201908261155-lip.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/humanparsing/exp-schp-201908301523-atr.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/humanparsing/parsing_atr.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/humanparsing/parsing_lip.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/ootd_dc/checkpoint-36000/unet_garm/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/ootd_dc/checkpoint-36000/unet_garm/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/ootd_dc/checkpoint-36000/unet_vton/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/ootd_dc/checkpoint-36000/unet_vton/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/ootd_hd/checkpoint-36000/unet_garm/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/ootd_hd/checkpoint-36000/unet_garm/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/ootd_hd/checkpoint-36000/unet_vton/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/ootd_hd/checkpoint-36000/unet_vton/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/ootd/vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoints/openpose/ckpts/body_pose_model.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/demo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='images/workflow.png', size=None, blob_id=None, lfs=None)""], ""spaces"": [""levihsu/OOTDiffusion"", ""zhengchong/CatVTON"", ""aiqcamp/fash"", ""VIDraft/Fashion-Fit"", ""ahkamboh/Change-cloth-AI"", ""hikerxu/OOTDiffusion"", ""Katiyar48/OOTDiffusion-VirtualTryOnClothing"", ""Nymbo/CatVTON"", ""mukeshpandey2628/GT_VTR3_1"", ""shahza1b/CatVTON"", ""John6666/space-splitter"", ""lbjocson/OOTDiffusion-Test"", ""Nymbo/OOTDiffusion"", ""Saad0KH/OOTDiffusion"", ""cocktailpeanut/OOTDiffusion"", ""tsi-org/OOTDiffusion"", ""raznal/OOTDiffusion"", ""seawolf2357/aifah"", ""zhenghong/OOTDiffusion"", ""nat997/fashion-diffusion"", ""nat997/fashion-diff"", ""nat997/OOTDiffusion"", ""VictorFS82/TestCHIn"", ""liuq641968816/OOTSD"", ""thobuiq/OOTDiffusion"", ""ChrisJohnson111/test5"", ""OjciecTadeusz/OOTDiffusion"", ""emilwojtaszek/OOTDiffusion"", ""HarrysonIA/OOTDDiffusion"", ""Rebecasarai/instant-virtual-try-on"", ""lgiavedoni/OOTDiffusion"", ""Smiley0707/OOTDiffusion"", ""Gopalagarwal/Deradh"", ""michaelcostacardozo/OOTDiffusion-cpu"", ""rimjhimittal/final"", ""alexff91/Virtual-Try-On-Advanced"", ""hungdang1610/CatVTON"", ""moyabill/OOTDiffusion"", ""Shad0ws/CatVTON"", ""Ammaralee/Trail"", ""umerkk164/OOTDiffusion"", ""imados51/TW"", ""jsoncm/OOTDiffusion"", ""ahmadsuyadi/Virtual-Try-On-Advanced"", ""ahmadsuyadi/OOTDiffusion"", ""marktow/run"", ""ProgrammerParamesh/VirtualDress"", ""nrtoya/CatVTON2"", ""serhatyalcin/OOTDiffusion"", ""ShubhankarMUS/OOTDiffusion"", ""abubakar123456/tryon"", ""aiqcamp/fash-old"", ""sukalovpro/OOTDiffusion2"", ""royalx/VirtualTryClothing"", ""royalx/OOTDiffusion-VirtualTryOnClothing"", ""wylupek/Test"", ""thincamel/IOTA_OOTDiffusion"", ""iamahmadsaboor/OOTDiffusion"", ""jarvislk/OOTDiffusion"", ""Vaibhavnaik12/conDiffusion"", ""themanas021/OOTDiffusion"", ""nain6246/AR-STYLING"", ""RiponSamadder/OOTD"", ""HusnainAliOfficial738/Lumea"", ""Deepak7376/OOTDiffusion"", ""Vaibhavnaik12/DeXFit-TryOn"", ""SaadAhmedSiddiqui/27-Dec-CatVTON"", ""nvn04/StyleSnap"", ""nvn04/DemoStyleSnap"", ""tao0109/Change-cloth-AI"", ""raulsenaferreira/OOTDiffusion"", ""ekimbasoglu/OOTDiffusion-test-again"", ""Oshuboi/OOTDiffusion"", ""stevenijacobs/Brooke-Fashion"", ""rtallam45/MarketingCopilot"", ""botai3/CHELUMORE_AI"", ""botai3/CatVTON"", ""Lokesh84/OOTDiffusion202"", ""SaMeT1693/Deneme16"", ""BEfunnuga/Change-cloth-AI"", ""kulashekar123/VirtuTry"", ""zuri-technical/Change-cloth-AI"", ""zuri-technical/Change-clot"", ""DilshanIRU/FitON-API"", ""vdvdvdubey/OOTDiffusion"", ""John6666/OOTDiffusion"", ""Justo19/mvp-catvton"", ""banao-tech/outfit-change"", ""Decli-Tech/Fashion-Fit"", ""kssathishh/CatVTON1"", ""Jeff850/OOTDiffusion""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-17 06:03:25+00:00"", ""cardData"": ""license: cc-by-nc-sa-4.0"", ""transformersInfo"": null, ""_id"": ""65d564fc66b4b3725e158fc8"", ""modelId"": ""levihsu/OOTDiffusion"", ""usedStorage"": 19287961101}",0,,0,,0,,0,,0,"BEfunnuga/Change-cloth-AI, John6666/space-splitter, Katiyar48/OOTDiffusion-VirtualTryOnClothing, VIDraft/Fashion-Fit, ahkamboh/Change-cloth-AI, huggingface/InferenceSupport/discussions/new?title=levihsu/OOTDiffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blevihsu%2FOOTDiffusion%5D(%2Flevihsu%2FOOTDiffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, levihsu/OOTDiffusion, moyabill/OOTDiffusion, shahza1b/CatVTON, thincamel/IOTA_OOTDiffusion, zhengchong/CatVTON, zuri-technical/Change-clot, zuri-technical/Change-cloth-AI",13
|
PhotoMaker_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
TencentARC/PhotoMaker,N/A,"{""id"": ""TencentARC/PhotoMaker"", ""author"": ""TencentARC"", ""sha"": ""f68f8e6309bf213d28d68230abff0ccc92de9f30"", ""last_modified"": ""2024-07-22 15:28:18+00:00"", ""created_at"": ""2024-01-13 14:11:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 27358, ""downloads_all_time"": null, ""likes"": 426, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""text-to-image"", ""en"", ""arxiv:2312.04461"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\npipeline_tag: text-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='photomaker-v1.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""TencentARC/PhotoMaker"", ""TencentARC/PhotoMaker-V2"", ""TencentARC/PhotoMaker-Style"", ""YupengZhou/StoryDiffusion"", ""TencentARC/BrushEdit"", ""Nymbo/image_gen_supaqueue"", ""abreza/3d_animation_toolkit"", ""linoyts/olympics-photobooth"", ""awacke1/3d_animation_toolkit"", ""TonyGold777/PhotoMaker"", ""tsqn/PhotoMaker-V2"", ""anishde/SIMPLIFY_text_summarizer"", ""SD-online/Fooocus-Docker"", ""svjack/PhotoMaker-V2"", ""LikhonScripts/TencentARC-PhotoMaker"", ""Cothn/PhotoMaker"", ""rossli6789/TencentARC-PhotoMaker"", ""Andre22x5/PhotoMakerNEW"", ""ZENLLC/StoryDiffusion"", ""thenekomacias/PhotoMaker"", ""shmuel85/DeFooocus"", ""waloneai/WalOPhotoSt"", ""gabrielnadoncanada/TencentARC-PhotoMaker"", ""asd8yowt54y8p54vumop/PhotoMaker"", ""Shawt/PhotoMaker-Style"", ""ahugh777/TencentARC-PhotoMaker"", ""Frz21/PhotoMaker-Style"", ""jessekrills08/PhotoMaker-Style"", ""Shawt/PhotoMaker"", ""svli/PhotoMaker"", ""Raxmatilla/testAi"", ""sdev11/PhotoMaker"", ""youngitachi/TencentARC-PhotoMaker"", ""Ganjick/TencentARC-PhotoMaker"", ""jeff27361782/TencentARC-PhotoMaker"", ""TkaySA/PhotoMaker"", ""Goko18/PhotoMaker"", ""Raxmatilla/TencentARC-PhotoMaker"", ""emailconfirmation/PhotoMaker"", ""tsi-org/PhotoMaker"", ""monishv03/TencentARC-PhotoMaker"", ""wrknd/TencentARC-PhotoMaker"", ""uelordi/PhotoMaker"", ""uelordi/PhotoMaker-Style"", ""Gaje/TencentARC-PhotoMaker"", ""charlestonX/TencentARC-PhotoMaker"", ""devridn/TencentARC-PhotoMaker"", ""VelesKant/VelesMarker"", ""DmitrMakeev/PhotoMaker-Style"", ""jensinjames/TencentARC-PhotoMaker"", ""freddyaboulton/PhotoMaker"", ""axnjr/TencentARC-PhotoMaker"", ""uelordi/PhotoMakerFork"", ""MrNikolaTesla/APIFo"", ""jensinjames/PhotoMaker-dev"", ""ayagup/TencentARC-PhotoMaker"", ""thatsparks/PhotoMaker"", ""alfalmi/PhotoMaker"", ""surajyadav53/Fooocus-v2"", ""Lummus00/PhotoMaker"", ""MikeyAK/TencentARC-PhotoMaker"", ""Thomas22943/my-first-training-space"", ""tristan-aeschbach/Test"", ""flatsko/TencentARC-PhotoMaker1"", ""Arivmta19/img2img"", ""JagadishR/faceAI"", ""SanzExtz/PhotoMaker-Style"", ""ametko/TencentARC-PhotoMaker"", ""tikcrimhg/testai"", ""viacheslavzyrianov/myfirstspace"", ""HumbleWolf/DockerFooocus"", ""Yzh1998/TencentARC-PhotoMaker"", ""GardenXas/TencentARC-PhotoMaker"", ""Fleecks/TencentARC-PhotoMaker"", ""Lolmankwkw/TencentARC-PhotoMaker"", ""Lolmankwkw/TencentARC-PhotoM"", ""shetka/PhotoMaker-Style"", ""khalifa0079/TencentARC-PhotoMaker"", ""Adityadn/AI-Describe-Image"", ""Adityadn/KB"", ""buildapp/tencenPhotoMaker"", ""TravelScrub/PhotoMaker"", ""chabab/TencentARC-PhotoMaker"", ""Adityadn/AIImages"", ""masha100000/TencentARC-PhotoMaker"", ""Deeyn/Fooocus"", ""osmunphotography/TencentARC-PhotoMaker"", ""karpatko/Fooocus"", ""STEVELU1116/AKTEST1"", ""Rishixx02/TencentARC-PhotoMaker1"", ""Simon34890/TencentARC-PhotoMaker"", ""hector404nf/TencentARC-PhotoMaker"", ""Mathdesenvnonimate/stable"", ""ChrisJohnson111/test1"", ""pedidosamtlahuelilpan/PhotoMaker"", ""jacktheporsche/StoryDiffusion"", ""jtanner/StoryDiffusion"", ""cocktailpeanut/StoryDiffusion"", ""Phfoysitzzfjfrusyyosttskyddhuugtxoysyogksk/TencentARC-PhotoMaker"", ""Leo11235/PhotoMaker""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-22 15:28:18+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\npipeline_tag: text-to-image"", ""transformersInfo"": null, ""_id"": ""65a29a2a01ed2b702d911591"", ""modelId"": ""TencentARC/PhotoMaker"", ""usedStorage"": 2762506302}",0,,0,"https://huggingface.co/jjaq/test-civitai-xlMoreArt, https://huggingface.co/jjaq/civitai-sdxlUnstableDiffusersRundiffusion, https://huggingface.co/jjaq/civitai-sdxUnstableDiffusers",3,,0,,0,"Cothn/PhotoMaker, Goko18/PhotoMaker, MikeyAK/TencentARC-PhotoMaker, Nymbo/image_gen_supaqueue, TencentARC/PhotoMaker, TencentARC/PhotoMaker-Style, TencentARC/PhotoMaker-V2, YupengZhou/StoryDiffusion, ahugh777/TencentARC-PhotoMaker, huggingface/InferenceSupport/discussions/new?title=TencentARC/PhotoMaker&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTencentARC%2FPhotoMaker%5D(%2FTencentARC%2FPhotoMaker)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jensinjames/TencentARC-PhotoMaker, monishv03/TencentARC-PhotoMaker, sdev11/PhotoMaker, svjack/PhotoMaker-V2",14
|
Qwen-72B_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,808 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Qwen/Qwen-72B,"---
|
| 3 |
+
language:
|
| 4 |
+
- zh
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- qwen
|
| 8 |
+
new_version: Qwen/Qwen1.5-72B
|
| 9 |
+
pipeline_tag: text-generation
|
| 10 |
+
inference: false
|
| 11 |
+
license: other
|
| 12 |
+
license_name: tongyi-qianwen-license-agreement
|
| 13 |
+
license_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
|
| 14 |
+
---
|
| 15 |
+
|
| 16 |
+
# Qwen-72B
|
| 17 |
+
|
| 18 |
+
<p align=""center"">
|
| 19 |
+
<img src=""https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg"" width=""400""/>
|
| 20 |
+
<p>
|
| 21 |
+
<br>
|
| 22 |
+
|
| 23 |
+
<p align=""center"">
|
| 24 |
+
🤗 <a href=""https://huggingface.co/Qwen"">Hugging Face</a>   |   🤖 <a href=""https://modelscope.cn/organization/qwen"">ModelScope</a>   |    📑 <a href=""https://arxiv.org/abs/2309.16609"">Paper</a>    |   🖥️ <a href=""https://modelscope.cn/studios/qwen/Qwen-72B-Chat-Demo/summary"">Demo</a>
|
| 25 |
+
<br>
|
| 26 |
+
<a href=""https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png"">WeChat (微信)</a>   |   <a href=""https://discord.gg/z3GAxXZ9Ce"">Discord</a>   |   <a href=""https://dashscope.aliyun.com"">API</a>
|
| 27 |
+
</p>
|
| 28 |
+
<br>
|
| 29 |
+
|
| 30 |
+
## 介绍 (Introduction)
|
| 31 |
+
|
| 32 |
+
**通义千问-72B**(**Qwen-72B**)是阿里云研发的通义千问大模型系列的720亿参数规模的模型。Qwen-72B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-72B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-72B-Chat。本仓库为Qwen-72B的仓库。
|
| 33 |
+
|
| 34 |
+
通义千问-72B(Qwen-72B)主要有以下特点:
|
| 35 |
+
|
| 36 |
+
1. **大规模高质量训练语料**:使用超过3万亿tokens的数据进行预训练,包含高质量中、英、多语言、代码、数学等数据,涵盖通用及专业领域的训练语料。通过大量对比实验对预训练语料分布进行了优化。
|
| 37 |
+
2. **强大的性能**:Qwen-72B在多个中英文下游评测任务上(涵盖常识推理、代码、数学、翻译等),效果显著超越现有的开源模型。具体评测结果请详见下文。
|
| 38 |
+
3. **覆盖更全面的词表**:相比目前以中英词表为主的开源模型,Qwen-72B使用了约15万大小的词表。该词表对多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强和扩展。
|
| 39 |
+
4. **较长的上下文支持**:Qwen-72B支持32k的上下文长度。
|
| 40 |
+
|
| 41 |
+
如果您想了解更多关于通义千问72B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。
|
| 42 |
+
|
| 43 |
+
**Qwen-72B** is the 72B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-72B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-72B, we release Qwen-72B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. This repository is the one for Qwen-72B.
|
| 44 |
+
|
| 45 |
+
The features of Qwen-72B include:
|
| 46 |
+
|
| 47 |
+
1. **Large-scale high-quality training corpora**: It is pretrained on over 3 trillion tokens, including Chinese, English, multilingual texts, code, and mathematics, covering general and professional fields. The distribution of the pre-training corpus has been optimized through a large number of ablation experiments.
|
| 48 |
+
2. **Competitive performance**: It significantly surpasses existing open-source models on multiple Chinese and English downstream evaluation tasks (including commonsense, reasoning, code, mathematics, etc.). See below for specific evaluation results.
|
| 49 |
+
3. **More comprehensive vocabulary coverage**: Compared with other open-source models based on Chinese and English vocabularies, Qwen-72B uses a vocabulary of over 150K tokens. This vocabulary is more friendly to multiple languages, enabling users to directly further enhance the capability for certain languages without expanding the vocabulary.
|
| 50 |
+
4. **Longer context support**: Qwen-72B supports 32k context length.
|
| 51 |
+
|
| 52 |
+
For more details about the open-source model of Qwen-72B, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository.
|
| 53 |
+
<br>
|
| 54 |
+
|
| 55 |
+
## 要求(Requirements)
|
| 56 |
+
|
| 57 |
+
* python 3.8及以上版本
|
| 58 |
+
* pytorch 1.12及以上版本,推荐2.0及以上版本
|
| 59 |
+
* 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项)
|
| 60 |
+
* **运行BF16或FP16模型需要多卡至少144GB显存(例如2xA100-80G或5xV100-32G);运行Int4模型至少需要48GB显存(例如1xA100-80G或2xV100-32G)。**
|
| 61 |
+
* python 3.8 and above
|
| 62 |
+
* pytorch 1.12 and above, 2.0 and above are recommended
|
| 63 |
+
* CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
|
| 64 |
+
**To run Qwen-72B-Chat in bf16/fp16, at least 144GB GPU memory is required (e.g., 2xA100-80G or 5xV100-32G). To run it in int4, at least 48GB GPU memory is requred (e.g., 1xA100-80G or 2xV100-32G).**
|
| 65 |
+
<br>
|
| 66 |
+
|
| 67 |
+
## 依赖项 (Dependency)
|
| 68 |
+
|
| 69 |
+
运行Qwen-72B,请确保满足上述要求,再执行以下pip命令安装依赖库
|
| 70 |
+
|
| 71 |
+
To run Qwen-72B, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
|
| 72 |
+
|
| 73 |
+
```bash
|
| 74 |
+
pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。
|
| 78 |
+
|
| 79 |
+
In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage.
|
| 80 |
+
|
| 81 |
+
```bash
|
| 82 |
+
git clone https://github.com/Dao-AILab/flash-attention
|
| 83 |
+
cd flash-attention && pip install .
|
| 84 |
+
# 下方安装可选,安装可能比较缓慢。
|
| 85 |
+
# Below are optional. Installing them might be slow.
|
| 86 |
+
# pip install csrc/layer_norm
|
| 87 |
+
# 如果你的flash-attn版本高于2.1.1,下方不需要安装。
|
| 88 |
+
# If the version of flash-attn is higher than 2.1.1, the following is not needed.
|
| 89 |
+
# pip install csrc/rotary
|
| 90 |
+
```
|
| 91 |
+
<br>
|
| 92 |
+
|
| 93 |
+
## 快速使用(Quickstart)
|
| 94 |
+
|
| 95 |
+
您可以通过以下代码轻松调用:
|
| 96 |
+
|
| 97 |
+
You can easily call the model with the following code:
|
| 98 |
+
|
| 99 |
+
```python
|
| 100 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 101 |
+
from transformers.generation import GenerationConfig
|
| 102 |
+
|
| 103 |
+
# Note: The default behavior now has injection attack prevention off.
|
| 104 |
+
tokenizer = AutoTokenizer.from_pretrained(""Qwen/Qwen-72B"", trust_remote_code=True)
|
| 105 |
+
|
| 106 |
+
# use bf16
|
| 107 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-72B"", device_map=""auto"", trust_remote_code=True, bf16=True).eval()
|
| 108 |
+
# use fp16
|
| 109 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-72B"", device_map=""auto"", trust_remote_code=True, fp16=True).eval()
|
| 110 |
+
# use cpu only
|
| 111 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-72B"", device_map=""cpu"", trust_remote_code=True).eval()
|
| 112 |
+
# use auto mode, automatically select precision based on the device.
|
| 113 |
+
model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-72B"", device_map=""auto"", trust_remote_code=True).eval()
|
| 114 |
+
|
| 115 |
+
# Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this.
|
| 116 |
+
# model.generation_config = GenerationConfig.from_pretrained(""Qwen/Qwen-72B"", trust_remote_code=True)
|
| 117 |
+
|
| 118 |
+
inputs = tokenizer('蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是', return_tensors='pt')
|
| 119 |
+
inputs = inputs.to(model.device)
|
| 120 |
+
pred = model.generate(**inputs)
|
| 121 |
+
print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
|
| 122 |
+
# 蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是亚的斯亚贝巴(Addis Ababa)...
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。
|
| 126 |
+
|
| 127 |
+
For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information.
|
| 128 |
+
<br>
|
| 129 |
+
|
| 130 |
+
## Tokenizer
|
| 131 |
+
|
| 132 |
+
> 注:作为术语的“tokenization”在中文中尚无共识的概念对应,本文档采用英文表达以利说明。
|
| 133 |
+
|
| 134 |
+
基于tiktoken的分词器有别于其他分词器,比如sentencepiece分词器。尤其在微调阶段,需要特别注意特殊token的使用。关于tokenizer的更多信息,以及微调时涉及的相关使用,请参阅[文档](https://github.com/QwenLM/Qwen/blob/main/tokenization_note_zh.md)。
|
| 135 |
+
|
| 136 |
+
Our tokenizer based on tiktoken is different from other tokenizers, e.g., sentencepiece tokenizer. You need to pay attention to special tokens, especially in finetuning. For more detailed information on the tokenizer and related use in fine-tuning, please refer to the [documentation](https://github.com/QwenLM/Qwen/blob/main/tokenization_note.md).
|
| 137 |
+
<br>
|
| 138 |
+
|
| 139 |
+
## 模型细节 (Model)
|
| 140 |
+
|
| 141 |
+
Qwen-72B模型规模基本情况如下所示:
|
| 142 |
+
|
| 143 |
+
The details of the model architecture of Qwen-72B are listed as follows:
|
| 144 |
+
|
| 145 |
+
| Hyperparameter | Value |
|
| 146 |
+
|:----------------|:-------|
|
| 147 |
+
| n_layers | 80 |
|
| 148 |
+
| n_heads | 64 |
|
| 149 |
+
| d_model | 8192 |
|
| 150 |
+
| vocab size | 151851 |
|
| 151 |
+
| sequence length | 32768 |
|
| 152 |
+
|
| 153 |
+
在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法,
|
| 154 |
+
即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。
|
| 155 |
+
|
| 156 |
+
在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-72B使用了超过15万token大小的词表。 该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强。
|
| 157 |
+
词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。
|
| 158 |
+
|
| 159 |
+
我们从部分语种各随机抽取100万个文档语料,以对比不同模型的编码压缩率(以支持100语种的XLM-R为基准值1,越低越好),具体性能见图。
|
| 160 |
+
|
| 161 |
+
可以看到Qwen-72B在保持中英代码高效解码的前提下,对部分使用人群较多的语种(泰语th、希伯来语he、阿拉伯语ar、韩语ko、越南语vi、日语ja、土耳其语tr、印尼语id、波兰语pl、俄语ru、荷兰语nl、葡萄牙语pt、意大利语it、德语de、西班牙语es、法语fr等)上也实现了较高的压缩率,使得模型在这些语种上也具备较强的可扩展性和较高的训练和推理效率。
|
| 162 |
+
|
| 163 |
+
在预训练数据方面,Qwen-72B模型一方面利用了部分开源通用语料,
|
| 164 |
+
另一方面也积累了海量全网语料以及高质量文本内容,去重及过滤后的语料超过3T tokens。
|
| 165 |
+
囊括全网文本、百科、书籍、代码、数学及各个领域垂类。
|
| 166 |
+
|
| 167 |
+
<p align=""center"">
|
| 168 |
+
<img src=""assets/tokenizer.png"" style=""width: 1200px""/>
|
| 169 |
+
<p>
|
| 170 |
+
|
| 171 |
+
For position encoding, FFN activation function, and normalization methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration).
|
| 172 |
+
|
| 173 |
+
For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-72B uses a vocabulary of over 150K tokens. It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary. It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization.
|
| 174 |
+
|
| 175 |
+
We randomly selected 1 million document corpus of each language to test and compare the encoding compression rates of different models (with XLM-R, which supports 100 languages, as the base value 1). The specific performance is shown in the figure above.
|
| 176 |
+
|
| 177 |
+
As can be seen, while ensuring the efficient decoding of Chinese, English, and code, Qwen-72B also achieves a high compression rate for many other languages (such as th, he, ar, ko, vi, ja, tr, id, pl, ru, nl, pt, it, de, es, fr etc.), equipping the model with strong scalability as well as high training and inference efficiency in these languages.
|
| 178 |
+
|
| 179 |
+
For pre-training data, on the one hand, Qwen-72B uses part of the open-source generic corpus. On the other hand, it uses a massive amount of accumulated web corpus and high-quality text content. The scale of corpus reaches over 3T tokens after deduplication and filtration, encompassing web text, encyclopedias, books, code, mathematics, and various domain.
|
| 180 |
+
<br>
|
| 181 |
+
|
| 182 |
+
## 评测效果(Evaluation)
|
| 183 |
+
我们选取了MMLU,C-Eval,GSM8K, MATH, HumanEval, MBPP, BBH, CMMLU等目前较流行的benchmark,对模型的中英知识能力、翻译、数学推理、代码等能力进行综合评测。Qwen-72B模型在所有benchmark上均取得了开源模型中的最优表现。
|
| 184 |
+
|
| 185 |
+
We selected MMLU, C-Eval, GSM8K, MATH, HumanEval, MBPP, BBH, CMMLU, which are currently popular benchmarks, to test the model’s Chinese and English knowledge capabilities, translation, mathematical reasoning, coding and other capabilities. From the following comprehensive evaluation results, we can see that the Qwen model outperform the similarly sized open-source models on all tasks.
|
| 186 |
+
|
| 187 |
+
| Model | Avg | MMLU | C-Eval | GSM8K | MATH | HumanEval | MBPP | BBH | AGIEval | GaokaoBench | CMMLU |
|
| 188 |
+
|:-------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:---------:|:--------:|:--------:|:--------:|:--------:|:--------:|
|
| 189 |
+
| | | 5-shot | 5-shot | 8-shot | 4-shot | 0-shot | 3-shot | 3-shot | 0-shot | 0-shot | 5-shot |
|
| 190 |
+
| LLaMA2-7B | 24.4 | 46.8 | 32.5 | 16.7 | 3.3 | 12.8 | 20.8 | 38.2 | 21.8 | 18.9 | 31.8 |
|
| 191 |
+
| LLaMA2-13B | 31.3 | 55.0 | 41.4 | 29.6 | 5.0 | 18.9 | 30.3 | 45.6 | 30.9 | 18.2 | 38.4 |
|
| 192 |
+
| LLaMA2-70B | 45.7 | 69.7 | 50.1 | 63.5 | 12.0 | 26.2 | 39.6 | 64.9 | 54.2 | 23.3 | 53.6 |
|
| 193 |
+
| InternLM-20B | 47.2 | 62.1 | 58.8 | 52.6 | 7.9 | 25.6 | 35.6 | 52.5 | 59.0 | 59.0 | 59.0 |
|
| 194 |
+
| Yi-34B | 58.0 | 76.3 | 81.8 | 67.9 | 15.9 | 26.2 | 38.2 | 66.4 | 56.5 | 68.3 | 82.6 |
|
| 195 |
+
| XVERSE-65B | - | 70.8 | 68.6 | 60.3 | - | 26.3 | - | - | - | - | - |
|
| 196 |
+
| **Qwen-7B** | 46.2 | 58.2 | 63.5 | 51.7 | 11.6 | 29.9 | 31.6 | 45.0 | 45.3 | 62.5 | 62.2 |
|
| 197 |
+
| **Qwen-14B** | 52.7 | 66.3 | 72.1 | 61.3 | 24.8 | 32.3 | 40.8 | 53.4 | 51.9 | 52.7 | 71.0 |
|
| 198 |
+
| **Qwen-72B** | **66.4** | **77.4** | **83.3** | **78.9** | **35.2** | **35.4** | **52.2** | **67.7** | **62.5** | **87.6** | **83.6** |
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
### 长序列评测(Long-Context Evaluation)
|
| 202 |
+
|
| 203 |
+
Qwen-72B采用扩展RoPE base的训练方法,支持32k的外推长度,我们使用arXiv数据进行语言建模评测,PPL(越低越好)结果如下:
|
| 204 |
+
|
| 205 |
+
Qwen-72B uses the method of extending RoPE base and supports the extrapolation length of 32k. We use arXiv data for language modeling evaluation. The PPL (lower is better) results are as follows:
|
| 206 |
+
<table>
|
| 207 |
+
<tr>
|
| 208 |
+
<th rowspan=""2"">Model</th><th colspan=""6"" align=""center"">Sequence Length</th>
|
| 209 |
+
</tr>
|
| 210 |
+
<tr>
|
| 211 |
+
</th><th align=""center"">8192</th><th align=""center"">16384</th><th align=""center"">32768</th>
|
| 212 |
+
</tr>
|
| 213 |
+
<tr>
|
| 214 |
+
<td>Qwen-72B</td><td align=""center"">2.828</td><td align=""center"">2.734</td><td align=""center"">2.717</td>
|
| 215 |
+
</tr>
|
| 216 |
+
|
| 217 |
+
</table>
|
| 218 |
+
|
| 219 |
+
## 评测复现(Reproduction)
|
| 220 |
+
|
| 221 |
+
我们提供了评测脚本,方便大家复现模型效果,详见[链接](https://github.com/QwenLM/Qwen/tree/main/eval)。提示:由于硬件和框架造成的舍入误差,复现结果如有小幅波动属于正常现象。
|
| 222 |
+
|
| 223 |
+
We have provided evaluation scripts to reproduce the performance of our model, details as [link](https://github.com/QwenLM/Qwen/tree/main/eval).
|
| 224 |
+
<br>
|
| 225 |
+
|
| 226 |
+
## FAQ
|
| 227 |
+
|
| 228 |
+
如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。
|
| 229 |
+
|
| 230 |
+
If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue.
|
| 231 |
+
<br>
|
| 232 |
+
|
| 233 |
+
## 引用 (Citation)
|
| 234 |
+
|
| 235 |
+
如果你觉得我们的工作对你有帮助,欢迎引用!
|
| 236 |
+
|
| 237 |
+
If you find our work helpful, feel free to give us a cite.
|
| 238 |
+
|
| 239 |
+
```
|
| 240 |
+
@article{qwen,
|
| 241 |
+
title={Qwen Technical Report},
|
| 242 |
+
author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu},
|
| 243 |
+
journal={arXiv preprint arXiv:2309.16609},
|
| 244 |
+
year={2023}
|
| 245 |
+
}
|
| 246 |
+
```
|
| 247 |
+
<br>
|
| 248 |
+
|
| 249 |
+
## 使用协议(License Agreement)
|
| 250 |
+
|
| 251 |
+
我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,请填写[问卷](https://dashscope.console.aliyun.com/openModelApply/Qwen-72B-Chat)申请。
|
| 252 |
+
|
| 253 |
+
Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/Qwen-72B-Chat) to apply.
|
| 254 |
+
<br>
|
| 255 |
+
|
| 256 |
+
## 联系我们(Contact Us)
|
| 257 |
+
|
| 258 |
+
如果你想给我们的研发团队和产品团队留言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件(qianwen_opensource@alibabacloud.com)联系我们。
|
| 259 |
+
|
| 260 |
+
If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to qianwen_opensource@alibabacloud.com.
|
| 261 |
+
|
| 262 |
+
","{""id"": ""Qwen/Qwen-72B"", ""author"": ""Qwen"", ""sha"": ""b8e18ac61df64d35308695769ff46b976b6a00f4"", ""last_modified"": ""2024-10-09 05:59:26+00:00"", ""created_at"": ""2023-11-26 16:16:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1649, ""downloads_all_time"": null, ""likes"": 356, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""zh"", ""en"", ""arxiv:2309.16609"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\npipeline_tag: text-generation\ntags:\n- qwen\nnew_version: Qwen/Qwen1.5-72B\ninference: false"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6731\u5229\u5b89\uff0c\u6211\u559c\u6b22""}, {""text"": ""\u6211\u53eb\u6258\u9a6c\u65af\uff0c\u6211\u7684\u4e3b\u8981""}, {""text"": ""\u6211\u53eb\u739b\u4e3d\u4e9a\uff0c\u6211\u6700\u559c\u6b22\u7684""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u662f""}, {""text"": ""\u4ece\u524d\uff0c""}], ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/qwen_tokenizer.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/tokenizer.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/wechat.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00047-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00048-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00049-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00050-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00051-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00052-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00053-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00054-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00055-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00056-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00057-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00058-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00059-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00060-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00061-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00062-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00063-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00064-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00065-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00066-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00067-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00068-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00069-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00070-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00071-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00072-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00073-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00074-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00075-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00076-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00077-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00078-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00079-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00080-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00081-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00082-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""Justinrune/LLaMA-Factory"", ""kenken999/fastapi_django_main_live"", ""officialhimanshu595/llama-factory"", ""msun415/Llamole""], ""safetensors"": {""parameters"": {""BF16"": 72287920128}, ""total"": 72287920128}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-09 05:59:26+00:00"", ""cardData"": ""language:\n- zh\n- en\nlicense: other\nlicense_name: tongyi-qianwen-license-agreement\nlicense_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT\npipeline_tag: text-generation\ntags:\n- qwen\nnew_version: Qwen/Qwen1.5-72B\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""65636f5fd5e3c35c793c135e"", ""modelId"": ""Qwen/Qwen-72B"", ""usedStorage"": 289151822456}",0,https://huggingface.co/Qwen/Qwen-72B-Chat,1,,0,"https://huggingface.co/mradermacher/Qwen-72B-GGUF, https://huggingface.co/mradermacher/Qwen-72B-i1-GGUF",2,,0,"Justinrune/LLaMA-Factory, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=Qwen/Qwen-72B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQwen%2FQwen-72B%5D(%2FQwen%2FQwen-72B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, msun415/Llamole, officialhimanshu595/llama-factory",6
|
| 263 |
+
Qwen/Qwen-72B-Chat,"---
|
| 264 |
+
language:
|
| 265 |
+
- zh
|
| 266 |
+
- en
|
| 267 |
+
tags:
|
| 268 |
+
- qwen
|
| 269 |
+
pipeline_tag: text-generation
|
| 270 |
+
inference: false
|
| 271 |
+
license: other
|
| 272 |
+
license_name: tongyi-qianwen
|
| 273 |
+
license_link: https://huggingface.co/Qwen/Qwen-72B-Chat/blob/main/LICENSE
|
| 274 |
+
base_model:
|
| 275 |
+
- Qwen/Qwen-72B
|
| 276 |
+
new_version: Qwen/Qwen1.5-72B-Chat
|
| 277 |
+
library_name: transformers
|
| 278 |
+
---
|
| 279 |
+
|
| 280 |
+
# Qwen-72B-Chat
|
| 281 |
+
|
| 282 |
+
<p align=""center"">
|
| 283 |
+
<img src=""https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg"" width=""400""/>
|
| 284 |
+
<p>
|
| 285 |
+
<br>
|
| 286 |
+
|
| 287 |
+
<p align=""center"">
|
| 288 |
+
🤗 <a href=""https://huggingface.co/Qwen"">Hugging Face</a>   |   🤖 <a href=""https://modelscope.cn/organization/qwen"">ModelScope</a>   |    📑 <a href=""https://arxiv.org/abs/2309.16609"">Paper</a>    |   🖥️ <a href=""https://modelscope.cn/studios/qwen/Qwen-72B-Chat-Demo/summary"">Demo</a>
|
| 289 |
+
<br>
|
| 290 |
+
<a href=""https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png"">WeChat (微信)</a>   |   <a href=""https://discord.gg/z3GAxXZ9Ce"">Discord</a>   |   <a href=""https://dashscope.aliyun.com"">API</a>
|
| 291 |
+
</p>
|
| 292 |
+
<br>
|
| 293 |
+
|
| 294 |
+
## 介绍(Introduction)
|
| 295 |
+
|
| 296 |
+
**通义千问-72B**(**Qwen-72B**)是阿里云研发的通义千问大模型系列的720亿参数规模的模型。Qwen-72B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-72B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-72B-Chat。本仓库为Qwen-72B-Chat的仓库。
|
| 297 |
+
|
| 298 |
+
通义千问-72B(Qwen-72B)主要有以下特点:
|
| 299 |
+
|
| 300 |
+
1. **大规模高质量训练语料**:使用超过3万亿tokens的数据进行预训练,包含高质量中、英、多语言、代码、数学等数据,涵盖通用及专业领域的训练语料。通过大量对比实验对预训练语料分布进行了优化。
|
| 301 |
+
2. **强大的性能**:Qwen-72B在多个中英文下游评测任务上(涵盖常识推理、代码、数学、翻译等),效果显著超越现有的开源模型。具体评测结果请详见下文。
|
| 302 |
+
3. **覆盖更全面的词表**:相比目前以中英词表为主的开源模型,Qwen-72B使用了约15万大小的词表。该词表对多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强和扩展。
|
| 303 |
+
4. **更长的上下文支持**:Qwen-72B支持32k的上下文长度。
|
| 304 |
+
5. **系统指令跟随**:Qwen-72B-Chat可以通过调整系统指令,实现**角色扮演**,**语言风格迁移**,**任务设定**,和**行为设定**等能力。
|
| 305 |
+
|
| 306 |
+
如果您想了解更多关于通义千问72B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。
|
| 307 |
+
|
| 308 |
+
**Qwen-72B** is the 72B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-72B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-72B, we release Qwen-72B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. This repository is the one for Qwen-72B-Chat.
|
| 309 |
+
|
| 310 |
+
The features of Qwen-72B include:
|
| 311 |
+
|
| 312 |
+
1. **Large-scale high-quality training corpora**: It is pretrained on over 3 trillion tokens, including Chinese, English, multilingual texts, code, and mathematics, covering general and professional fields. The distribution of the pre-training corpus has been optimized through a large number of ablation experiments.
|
| 313 |
+
2. **Competitive performance**: It significantly surpasses existing open-source models on multiple Chinese and English downstream evaluation tasks (including commonsense, reasoning, code, mathematics, etc.). See below for specific evaluation results.
|
| 314 |
+
3. **More comprehensive vocabulary coverage**: Compared with other open-source models based on Chinese and English vocabularies, Qwen-72B uses a vocabulary of over 150K tokens. This vocabulary is more friendly to multiple languages, enabling users to directly further enhance the capability for certain languages without expanding the vocabulary.
|
| 315 |
+
4. **Longer context support**: Qwen-72B supports 32k context length.
|
| 316 |
+
5. **System prompt**: Qwen-72B can realize roly playing, language style transfer, task setting, and behavior setting by using system prompt.
|
| 317 |
+
|
| 318 |
+
For more details about the open-source model of Qwen-72B, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository.
|
| 319 |
+
<br>
|
| 320 |
+
|
| 321 |
+
## 要求(Requirements)
|
| 322 |
+
|
| 323 |
+
* python 3.8及以上版本
|
| 324 |
+
* pytorch 1.12及以上版本,推荐2.0及以上版本
|
| 325 |
+
* 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项)
|
| 326 |
+
* **运行BF16或FP16模型需要多卡至少144GB显存(例如2xA100-80G或5xV100-32G);运行Int4模型至少需要48GB显存(例如1xA100-80G或2xV100-32G)**
|
| 327 |
+
* python 3.8 and above
|
| 328 |
+
* pytorch 1.12 and above, 2.0 and above are recommended
|
| 329 |
+
* CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
|
| 330 |
+
* **To run Qwen-72B-Chat in bf16/fp16, at least 144GB GPU memory is required (e.g., 2xA100-80G or 5xV100-32G). To run it in int4, at least 48GB GPU memory is required (e.g., 1xA100-80G or 2xV100-32G)**
|
| 331 |
+
<br>
|
| 332 |
+
|
| 333 |
+
## 依赖项(Dependency)
|
| 334 |
+
|
| 335 |
+
### 使用HuggingFace进行推理
|
| 336 |
+
|
| 337 |
+
运行Qwen-72B-Chat,请确保满足上述要求,再执行以下pip命令安装依赖库
|
| 338 |
+
|
| 339 |
+
To run Qwen-72B-Chat, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
|
| 340 |
+
|
| 341 |
+
```bash
|
| 342 |
+
pip install ""transformers>=4.32.0"" accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
|
| 343 |
+
```
|
| 344 |
+
|
| 345 |
+
另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。
|
| 346 |
+
|
| 347 |
+
In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage.
|
| 348 |
+
|
| 349 |
+
```bash
|
| 350 |
+
git clone https://github.com/Dao-AILab/flash-attention
|
| 351 |
+
cd flash-attention && pip install .
|
| 352 |
+
# 下方安装可选,安装可能比较缓慢。
|
| 353 |
+
# Below are optional. Installing them might be slow.
|
| 354 |
+
# pip install csrc/layer_norm
|
| 355 |
+
# 如果你的flash-attn版本高于2.1.1,下方不需要安装。
|
| 356 |
+
# If the version of flash-attn is higher than 2.1.1, the following is not needed.
|
| 357 |
+
# pip install csrc/rotary
|
| 358 |
+
```
|
| 359 |
+
|
| 360 |
+
### 使用vLLM进行推理
|
| 361 |
+
|
| 362 |
+
使用vLLM进行推理可以支持更长的上下文长度并获得至少两倍的生成加速。你需要满足以下要求:
|
| 363 |
+
|
| 364 |
+
Using vLLM for inference can support longer context lengths and obtain at least twice the generation speedup. You need to meet the following requirements:
|
| 365 |
+
|
| 366 |
+
* pytorch >= 2.0
|
| 367 |
+
* cuda 11.8 or 12.1
|
| 368 |
+
|
| 369 |
+
如果你使用cuda12.1和pytorch2.1,可以直接使用以下命令安装vLLM。
|
| 370 |
+
|
| 371 |
+
If you use cuda 12.1 and pytorch 2.1, you can directly use the following command to install vLLM.
|
| 372 |
+
|
| 373 |
+
```bash
|
| 374 |
+
# pip install vllm # This line is faster but it does not support quantization models.
|
| 375 |
+
|
| 376 |
+
# The below lines support int4 quantization (int8 will be supported soon). The installation are slower (~10 minutes).
|
| 377 |
+
git clone https://github.com/QwenLM/vllm-gptq
|
| 378 |
+
cd vllm-gptq
|
| 379 |
+
pip install -e .
|
| 380 |
+
```
|
| 381 |
+
|
| 382 |
+
否则请参考vLLM官方的[安装说明](https://docs.vllm.ai/en/latest/getting_started/installation.html),或者我们[vLLM分支仓库(支持量化模型)](https://github.com/QwenLM/vllm-gptq)。
|
| 383 |
+
|
| 384 |
+
Otherwise, please refer to the official vLLM [Installation Instructions](https://docs.vllm.ai/en/latest/getting_started/installation.html), or our [vLLM repo for GPTQ quantization](https://github.com/QwenLM/vllm-gptq).
|
| 385 |
+
<br>
|
| 386 |
+
|
| 387 |
+
## 快速使用(Quickstart)
|
| 388 |
+
|
| 389 |
+
### 使用HuggingFace Transformers进行推理(Inference with Huggingface Transformers)
|
| 390 |
+
|
| 391 |
+
下面我们展示了一个使用Qwen-72B-Chat模型,进行多轮对话交互的样例:
|
| 392 |
+
|
| 393 |
+
We show an example of multi-turn interaction with Qwen-72B-Chat in the following code:
|
| 394 |
+
|
| 395 |
+
```python
|
| 396 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 397 |
+
from transformers.generation import GenerationConfig
|
| 398 |
+
|
| 399 |
+
# Note: The default behavior now has injection attack prevention off.
|
| 400 |
+
tokenizer = AutoTokenizer.from_pretrained(""Qwen/Qwen-72B-Chat"", trust_remote_code=True)
|
| 401 |
+
|
| 402 |
+
# use bf16
|
| 403 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-72B-Chat"", device_map=""auto"", trust_remote_code=True, bf16=True).eval()
|
| 404 |
+
# use fp16
|
| 405 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-72B-Chat"", device_map=""auto"", trust_remote_code=True, fp16=True).eval()
|
| 406 |
+
# use cpu only
|
| 407 |
+
# model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-72B-Chat"", device_map=""cpu"", trust_remote_code=True).eval()
|
| 408 |
+
# use auto mode, automatically select precision based on the device.
|
| 409 |
+
model = AutoModelForCausalLM.from_pretrained(""Qwen/Qwen-72B-Chat"", device_map=""auto"", trust_remote_code=True).eval()
|
| 410 |
+
# NOTE: The above line would require at least 144GB memory in total
|
| 411 |
+
|
| 412 |
+
# Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this.
|
| 413 |
+
# model.generation_config = GenerationConfig.from_pretrained(""Qwen/Qwen-72B-Chat"", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
| 414 |
+
|
| 415 |
+
# 第一轮对话 1st dialogue turn
|
| 416 |
+
response, history = model.chat(tokenizer, ""你好"", history=None)
|
| 417 |
+
print(response)
|
| 418 |
+
# 你好!很高兴为你提供帮助。
|
| 419 |
+
|
| 420 |
+
# 第二轮对话 2nd dialogue turn
|
| 421 |
+
response, history = model.chat(tokenizer, ""给我讲一个年轻人奋斗创业最终取得成功的故事。"", history=history)
|
| 422 |
+
print(response)
|
| 423 |
+
# 这是一个关于一个年轻人奋斗创业最终取得成功的故事。
|
| 424 |
+
# 故事的主人公叫李明,他来自一个普通的家庭,父母都是普通的工人。从小,李明就立下了一个目标:要成为一名成功的企业家。
|
| 425 |
+
# 为了实现这个目标,李明勤奋学习,考上了大学。在大学期间,他积极参加各种创业比赛,获得了不少奖项。他还利用课余时间去实习,积累了宝贵的经验。
|
| 426 |
+
# 毕业后,李明决定开始自己的创业之路。他开始寻找投资机会,但多次都被拒绝了。然而,他并没有放弃。他继续努力,不断改进自己的创业计划,并寻找新的投资机会。
|
| 427 |
+
# 最终,李明成功地获得了一笔投资,开始了自己的创业之路。他成立了一家科技公司,专注于开发新型软件。在他的领导下,公司迅速发展起来,成为了一家成功的科技企业。
|
| 428 |
+
# 李明的成功并不是偶然的。他勤奋、坚韧、勇于冒险,不断学习和改进自己。他的成功也证明了,只要努力奋斗,任何人都有可能取得成功。
|
| 429 |
+
|
| 430 |
+
# 第三轮对话 3rd dialogue turn
|
| 431 |
+
response, history = model.chat(tokenizer, ""给这个故事起一个标题"", history=history)
|
| 432 |
+
print(response)
|
| 433 |
+
# 《奋斗创业:一个年轻人的成功之路》
|
| 434 |
+
|
| 435 |
+
# Qwen-72B-Chat现在可以通过调整系统指令(System Prompt),实现角色扮演,语言风格迁移,任务设定,行为设定等能力。
|
| 436 |
+
# Qwen-72B-Chat can realize roly playing, language style transfer, task setting, and behavior setting by system prompt.
|
| 437 |
+
response, _ = model.chat(tokenizer, ""你好呀"", history=None, system=""请用二次元可爱语气和我说话"")
|
| 438 |
+
print(response)
|
| 439 |
+
# 哎呀,你好哇!是怎么找到人家的呢?是不是被人家的魅力吸引过来的呀~(≧▽≦)/~
|
| 440 |
+
|
| 441 |
+
response, _ = model.chat(tokenizer, ""My colleague works diligently"", history=None, system=""You will write beautiful compliments according to needs"")
|
| 442 |
+
print(response)
|
| 443 |
+
# Your colleague is a shining example of dedication and hard work. Their commitment to their job is truly commendable, and it shows in the quality of their work.
|
| 444 |
+
# They are an asset to the team, and their efforts do not go unnoticed. Keep up the great work!
|
| 445 |
+
```
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
### 使用vLLM和类Transformers接口进行推理(Inference with vLLM and Transformers-like APIs)
|
| 450 |
+
|
| 451 |
+
在根据上方依赖性部分的说明安装vLLM后,可以下载[接口封装代码](https://qianwen-res.oss-cn-beijing.aliyuncs.com/vllm_wrapper.py)到当前文件夹,并执行以下命令进行多轮对话交互。(注意:该方法当前只支持``model.chat()``接口。)
|
| 452 |
+
|
| 453 |
+
After installing vLLM according to the dependency section above, you can download the [wrapper codes](https://qianwen-res.oss-cn-beijing.aliyuncs.com/vllm_wrapper.py) and execute the following commands for multiple rounds of dialogue interaction. (Note: It currently only supports the ``model.chat()`` method.)
|
| 454 |
+
|
| 455 |
+
```python
|
| 456 |
+
from vllm_wrapper import vLLMWrapper
|
| 457 |
+
|
| 458 |
+
model = vLLMWrapper('Qwen/Qwen-72B-Chat', tensor_parallel_size=2)
|
| 459 |
+
# model = vLLMWrapper('Qwen/Qwen-72B-Chat-Int4', tensor_parallel_size=1, dtype=""float16"") # 运行int4模型。 run int4 model.
|
| 460 |
+
|
| 461 |
+
response, history = model.chat(query=""你好"", history=None)
|
| 462 |
+
print(response)
|
| 463 |
+
response, history = model.chat(query=""给我讲一个年轻人奋斗创业最终取得成功的故事。"", history=history)
|
| 464 |
+
print(response)
|
| 465 |
+
response, history = model.chat(query=""给这个故事起一个标题"", history=history)
|
| 466 |
+
print(response)
|
| 467 |
+
```
|
| 468 |
+
|
| 469 |
+
### 使用vLLM和类OpenAI接口进行推理(Inference with vLLM and OpenAI-like API)
|
| 470 |
+
|
| 471 |
+
请��考我们GitHub repo中[vLLM部署](https://github.com/QwenLM/Qwen#vllm)和[OpenAI接口使用](https://github.com/QwenLM/Qwen#openai-api)两个部分的介绍。
|
| 472 |
+
|
| 473 |
+
Please refer to the introduction of [vLLM deployment](https://github.com/QwenLM/Qwen#vllm) and [OpenAI interface usage](https://github.com/QwenLM/Qwen#openai-api) in our GitHub repo.
|
| 474 |
+
|
| 475 |
+
如果使用2xA100-80G进行部署,可以运行以下代码:
|
| 476 |
+
|
| 477 |
+
If deploying with 2xA100-80G, you can run the following code:
|
| 478 |
+
|
| 479 |
+
```python
|
| 480 |
+
python -m fastchat.serve.controller
|
| 481 |
+
python -m fastchat.serve.vllm_worker --model-path Qwen/Qwen-72B-Chat --trust-remote-code --tensor-parallel-size 2 --gpu-memory-utilization 0.98 --dtype bfloat16
|
| 482 |
+
# python -m fastchat.serve.vllm_worker --model-path Qwen/Qwen-72B-Chat-Int4 --trust-remote-code --dtype float16 # 运行int4模型。 run int4 model.
|
| 483 |
+
python -m fastchat.serve.openai_api_server --host localhost --port 8000
|
| 484 |
+
```
|
| 485 |
+
|
| 486 |
+
注意需要``--gpu-memory-utilization 0.98``参数避免OOM问题。
|
| 487 |
+
|
| 488 |
+
Note that the ``--gpu-memory-utilization 0.98`` parameter is required to avoid OOM problems.
|
| 489 |
+
|
| 490 |
+
<br>
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。
|
| 494 |
+
|
| 495 |
+
For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information.
|
| 496 |
+
<br>
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
## 量化 (Quantization)
|
| 500 |
+
|
| 501 |
+
### 用法 (Usage)
|
| 502 |
+
|
| 503 |
+
以下我们提供示例说明如何使用Int4/Int8量化模型。在开始使用前,请先保证满足要求(如torch 2.0及以上,transformers版本为4.32.0及以上,等等),并安装所需安装包:
|
| 504 |
+
|
| 505 |
+
Here we demonstrate how to use our provided quantized models for inference. Before you start, make sure you meet the requirements of auto-gptq (e.g., torch 2.0 and above, transformers 4.32.0 and above, etc.) and install the required packages:
|
| 506 |
+
|
| 507 |
+
```bash
|
| 508 |
+
pip install auto-gptq optimum
|
| 509 |
+
```
|
| 510 |
+
|
| 511 |
+
如安装`auto-gptq`遇到问题,我们建议您到官方[repo](https://github.com/PanQiWei/AutoGPTQ)搜索合适的预编译wheel。
|
| 512 |
+
|
| 513 |
+
If you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a pre-build wheel.
|
| 514 |
+
|
| 515 |
+
> 注意:预编译的`auto-gptq`版本对`torch`版本及其CUDA版本要求严格。同时,由于
|
| 516 |
+
> 其近期更新,你可能会遇到`transformers`、`optimum`或`peft`抛出的版本错误。
|
| 517 |
+
> 我们建议使用符合以下要求的最新版本:
|
| 518 |
+
> - torch==2.1 auto-gptq>=0.5.1 transformers>=4.35.0 optimum>=1.14.0 peft>=0.6.1
|
| 519 |
+
> - torch>=2.0,<2.1 auto-gptq<0.5.0 transformers<4.35.0 optimum<1.14.0 peft>=0.5.0,<0.6.0
|
| 520 |
+
> Note: The pre-compiled `auto-gptq` packages strongly depend on the version of `torch` and its CUDA version. Moreover, due to recent update,
|
| 521 |
+
> you may also encounter unsupported version errors from `transformers`, `optimum`, or `peft`.
|
| 522 |
+
> We recommend using the latest versions meeting the following requirements :
|
| 523 |
+
> - torch==2.1 auto-gptq>=0.5.1 transformers>=4.35.0 optimum>=1.14.0 peft>=0.6.1
|
| 524 |
+
> - torch>=2.0,<2.1 auto-gptq<0.5.0 transformers<4.35.0 optimum<1.14.0 peft>=0.5.0,<0.6.0
|
| 525 |
+
|
| 526 |
+
随后即可使用和上述一致的用法调用量化模型:
|
| 527 |
+
|
| 528 |
+
Then you can load the quantized model easily and run inference as same as usual:
|
| 529 |
+
|
| 530 |
+
```python
|
| 531 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 532 |
+
""Qwen/Qwen-72B-Chat-Int4"",
|
| 533 |
+
device_map=""auto"",
|
| 534 |
+
trust_remote_code=True
|
| 535 |
+
).eval()
|
| 536 |
+
response, history = model.chat(tokenizer, ""你好"", history=None)
|
| 537 |
+
```
|
| 538 |
+
|
| 539 |
+
注意:使用vLLM运行量化模型需安装我们[vLLM分支仓库](https://github.com/QwenLM/vllm-gptq)。暂不支持int8模型,近期将更新。
|
| 540 |
+
|
| 541 |
+
Note: You need to install our [vLLM repo] (https://github.com/qwenlm/vllm-gptq) for AutoGPTQ. The int8 model is not supported for the time being, and we will add the support soon.
|
| 542 |
+
|
| 543 |
+
### 效果评测
|
| 544 |
+
|
| 545 |
+
我们对BF16,Int8和Int4模型在基准评测上做了测试(使用zero-shot设置),结果如下所示:
|
| 546 |
+
|
| 547 |
+
We illustrate the zero-shot performance of both BF16, Int8 and Int4 models on the benchmark. Results are shown below:
|
| 548 |
+
|
| 549 |
+
| Quantization | MMLU | CEval (val) | GSM8K | Humaneval |
|
| 550 |
+
|--------------|:----:|:-----------:|:-----:|:---------:|
|
| 551 |
+
| BF16 | 74.4 | 80.1 | 76.4 | 64.6 |
|
| 552 |
+
| Int8 | 73.5 | 80.1 | 73.5 | 62.2 |
|
| 553 |
+
| Int4 | 73.4 | 80.1 | 75.3 | 61.6 |
|
| 554 |
+
|
| 555 |
+
### 推理速度及显存使用 (Inference Speed & GPU Memory Usage)
|
| 556 |
+
|
| 557 |
+
我们测算了不同精度模型、不同FlashAttn库版本、以及是否使用vLLM的情况下,模型在不同输入长度下生成2048词的平均推理速度以及显存使用。
|
| 558 |
+
|
| 559 |
+
We measured the average inference speed and GPU memory usage of generating 2048 tokens across several settings, including input lengths, quantization levels, versions of flash-attention, and whether vLLM is used.
|
| 560 |
+
|
| 561 |
+
| Quantization | Setting | # of A100-80G GPUs | Context Length | Generation Length | Speed (Tokens/s) | Total GPU Memory Usage |
|
| 562 |
+
| ------------- | :---------------: | :----------------: | :-------------: | :---------------: | :---------------:| :---------------------:|
|
| 563 |
+
| BF16 | HF + FlashAttn-v2 | 2 | 1 | 2048 | 8.48 | 144.69GB |
|
| 564 |
+
| BF16 | HF + FlashAttn-v1 | 2 | 1 | 2048 | 8.31 | 144.69GB |
|
| 565 |
+
| BF16 | HF + No FlashAttn | 2 | 1 | 2048 | 7.89 | 144.69GB |
|
| 566 |
+
| BF16 | vLLM | 2 | 1 | 2048 | 17.60 | Pre-Allocated* |
|
| 567 |
+
| BF16 | vLLM | 4 | 1 | 2048 | 26.16 | Pre-Allocated* |
|
| 568 |
+
| BF16 | HF + FlashAttn-v2 | 4 | 6144 | 2048 | 5.37 | 181.47GB |
|
| 569 |
+
| BF16 | HF + FlashAttn-v1 | 4 | 6144 | 2048 | 4.96 | 181.47GB |
|
| 570 |
+
| BF16 | HF + No FlashAttn | 4 | 6144 | 2048 | 4.72 | 202.74GB |
|
| 571 |
+
| BF16 | vLLM | 4 | 6144 | 2048 | 24.41 | Pre-Allocated* |
|
| 572 |
+
| BF16 | vLLM | 4 | 14336 | 2048 | 21.24 | Pre-Allocated* |
|
| 573 |
+
| BF16 | vLLM | 4 | 30720 | 2048 | 17.55 | Pre-Allocated* |
|
| 574 |
+
| Int8 | HF + FlashAttn-v2 | 2 | 1 | 2048 | 9.05 | 81.27GB |
|
| 575 |
+
| Int8 | HF + FlashAttn-v1 | 2 | 1 | 2048 | 8.97 | 81.27GB |
|
| 576 |
+
| Int8 | HF + No FlashAttn | 2 | 1 | 2048 | 8.32 | 81.27GB |
|
| 577 |
+
| Int8 | HF + FlashAttn-v2 | 3 | 6144 | 2048 | 5.76 | 118.06GB |
|
| 578 |
+
| Int8 | HF + FlashAttn-v1 | 3 | 6144 | 2048 | 5.72 | 118.06GB |
|
| 579 |
+
| Int8 | HF + No FlashAttn | 2 | 6144 | 2048 | 4.50 | 129.83GB |
|
| 580 |
+
| Int8 | HF + FlashAttn-v2 | 4 | 14336 | 2048 | 3.44 | 180.44GB |
|
| 581 |
+
| Int8 | HF + FlashAttn-v1 | 4 | 14336 | 2048 | 3.19 | 180.44GB |
|
| 582 |
+
| Int8 | HF + No FlashAttn | 4 | 14336 | 2048 | OOM | OOM |
|
| 583 |
+
| Int4 | HF + FlashAttn-v2 | 1 | 1 | 2048 | 11.67 | 48.86GB |
|
| 584 |
+
| Int4 | HF + FlashAttn-v1 | 1 | 1 | 2048 | 11.27 | 48.86GB |
|
| 585 |
+
| Int4 | HF + No FlashAttn | 1 | 1 | 2048 | 11.32 | 48.86GB |
|
| 586 |
+
| Int4 | vLLM | 1 | 1 | 2048 | 14.63 | Pre-Allocated* |
|
| 587 |
+
| Int4 | vLLM | 2 | 1 | 2048 | 20.76 | Pre-Allocated* |
|
| 588 |
+
| Int4 | vLLM | 4 | 1 | 2048 | 27.19 | Pre-Allocated* |
|
| 589 |
+
| Int4 | HF + FlashAttn-v2 | 2 | 6144 | 2048 | 6.75 | 85.99GB |
|
| 590 |
+
| Int4 | HF + FlashAttn-v1 | 2 | 6144 | 2048 | 6.32 | 85.99GB |
|
| 591 |
+
| Int4 | HF + No FlashAttn | 2 | 6144 | 2048 | 5.97 | 88.30GB |
|
| 592 |
+
| Int4 | vLLM | 2 | 6144 | 2048 | 18.07 | Pre-Allocated* |
|
| 593 |
+
| Int4 | vLLM | 4 | 6144 | 2048 | 24.56 | Pre-Allocated* |
|
| 594 |
+
| Int4 | HF + FlashAttn-v2 | 3 | 14336 | 2048 | 4.18 | 148.73GB |
|
| 595 |
+
| Int4 | HF + FlashAttn-v1 | 3 | 14336 | 2048 | 3.72 | 148.73GB |
|
| 596 |
+
| Int4 | HF + No FlashAttn | 3 | 14336 | 2048 | OOM | OOM |
|
| 597 |
+
| Int4 | vLLM | 2 | 14336 | 2048 | 14.51 | Pre-Allocated* |
|
| 598 |
+
| Int4 | vLLM | 4 | 14336 | 2048 | 19.28 | Pre-Allocated* |
|
| 599 |
+
| Int4 | vLLM | 4 | 30720 | 2048 | 16.93 | Pre-Allocated* |
|
| 600 |
+
|
| 601 |
+
\* vLLM会提前预分配显存,因此无法探测最大显存使用情况。HF是指使用Huggingface Transformers库进行推理。
|
| 602 |
+
|
| 603 |
+
\* vLLM pre-allocates GPU memory, so we cannot detect the maximum usage. HF refers to using the Huggingface Transformers library for inference.
|
| 604 |
+
|
| 605 |
+
HuggingFace Transformers的性能测算使用[此脚本](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py)完成。评测使用A100-SXM4-80G GPU,使用PyTorch 2.0.1 (Huggingface Transformers) / PyTorch 2.1.0 (vLLM)和CUDA 11.8。
|
| 606 |
+
|
| 607 |
+
The speed and memory profiling of HuggingFace Transformers are conducted using [this script](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py). The profiling runs on A100-SXM4-80G GPUs with PyTorch 2.0.1 (for Huggingface Transformers) / PyTorch 2.1.0 (for vLLM) and CUDA 11.8.
|
| 608 |
+
<br>
|
| 609 |
+
|
| 610 |
+
## 模型细节(Model)
|
| 611 |
+
|
| 612 |
+
与Qwen-72B预训练模型相同,Qwen-72B-Chat模型规模基本情况如下所示
|
| 613 |
+
|
| 614 |
+
The details of the model architecture of Qwen-72B-Chat are listed as follows
|
| 615 |
+
|
| 616 |
+
| Hyperparameter | Value |
|
| 617 |
+
|:----------------|:-------|
|
| 618 |
+
| n_layers | 80 |
|
| 619 |
+
| n_heads | 64 |
|
| 620 |
+
| d_model | 8192 |
|
| 621 |
+
| vocab size | 151851 |
|
| 622 |
+
| sequence length | 32768 |
|
| 623 |
+
|
| 624 |
+
在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法,
|
| 625 |
+
即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。
|
| 626 |
+
|
| 627 |
+
在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-72B-Chat使用了约15万token大小的词表。
|
| 628 |
+
该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强。
|
| 629 |
+
词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。
|
| 630 |
+
|
| 631 |
+
For position encoding, FFN activation function, and normalization calculation methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration).
|
| 632 |
+
|
| 633 |
+
For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-72B-Chat uses a vocabulary of over 150K tokens.
|
| 634 |
+
It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary.
|
| 635 |
+
It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization.
|
| 636 |
+
<br>
|
| 637 |
+
|
| 638 |
+
## 评测效果(Evaluation)
|
| 639 |
+
|
| 640 |
+
对于Qwen-72B-Chat模型,我们同样评测了常规的中文理解(C-Eval)、英文理解(MMLU)、代码(HumanEval)和数学(GSM8K)等权威任务,同时包含了长序列任务的评测结果。由于Qwen-72B-Chat模型经过对齐后,激发了较强的外部系统调用能力,我们还进行了工具使用能力方面的评测。
|
| 641 |
+
|
| 642 |
+
提示:由于硬件和框架造成的舍入误差,复现结果如有波动属于正常现象。
|
| 643 |
+
|
| 644 |
+
For Qwen-72B-Chat, we also evaluate the model on C-Eval, MMLU, HumanEval, GSM8K, etc., as well as the benchmark evaluation for long-context understanding, and tool usage.
|
| 645 |
+
|
| 646 |
+
Note: Due to rounding errors caused by hardware and framework, differences in reproduced results are possible.
|
| 647 |
+
|
| 648 |
+
### 中文评测(Chinese Evaluation)
|
| 649 |
+
|
| 650 |
+
#### C-Eval
|
| 651 |
+
|
| 652 |
+
在[C-Eval](https://arxiv.org/abs/2305.08322)验证集上,我们评价了Qwen-72B-Chat模型的0-shot & 5-shot准确率
|
| 653 |
+
|
| 654 |
+
We demonstrate the 0-shot & 5-shot accuracy of Qwen-72B-Chat on C-Eval validation set
|
| 655 |
+
|
| 656 |
+
| Model | Avg. Acc. |
|
| 657 |
+
|:--------------------------------:|:---------:|
|
| 658 |
+
| LLaMA2-7B-Chat | 31.9 |
|
| 659 |
+
| LLaMA2-13B-Chat | 36.2 |
|
| 660 |
+
| LLaMA2-70B-Chat | 44.3 |
|
| 661 |
+
| ChatGPT3.5 | 52.5 |
|
| 662 |
+
| ChatGPT4 | 69.9 |
|
| 663 |
+
| Yi-34B-Chat (0-shot) | 77.0 |
|
| 664 |
+
| Yi-34B-Chat (5-shot) | 78.5 |
|
| 665 |
+
| Qwen-7B-Chat (original) (0-shot) | 54.2 |
|
| 666 |
+
| **Qwen-7B-Chat (0-shot)** | 59.7 |
|
| 667 |
+
| **Qwen-7B-Chat (5-shot)** | 59.3 |
|
| 668 |
+
| **Qwen-14B-Chat (0-shot)** | 69.8 |
|
| 669 |
+
| **Qwen-14B-Chat (5-shot)** | 71.7 |
|
| 670 |
+
| **Qwen-72B-Chat (0-shot)** | 80.1 |
|
| 671 |
+
| **Qwen-72B-Chat (5-shot)** | 82.9 |
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
C-Eval测试集上,Qwen-72B-Chat模型的zero-shot准确率结果如下:
|
| 675 |
+
|
| 676 |
+
The zero-shot accuracy of Qwen-72B-Chat on C-Eval testing set is provided below:
|
| 677 |
+
|
| 678 |
+
| Model | Avg. | STEM | Social Sciences | Humanities | Others |
|
| 679 |
+
| :---------------------- | :------: | :--: | :-------------: | :--------: | :----: |
|
| 680 |
+
| Qwen-7B-Chat (original) | 54.6 | 47.8 | 67.6 | 59.3 | 50.6 |
|
| 681 |
+
| **Qwen-7B-Chat** | 58.6 | 53.3 | 72.1 | 62.8 | 52.0 |
|
| 682 |
+
| **Qwen-14B-Chat** | 69.1 | 65.1 | 80.9 | 71.2 | 63.4 |
|
| 683 |
+
| **Qwen-72B-Chat** | 79.5 | 74.5 | 89.1 | 81.2 | 78.1 |
|
| 684 |
+
|
| 685 |
+
### 英文评测(English Evaluation)
|
| 686 |
+
|
| 687 |
+
#### MMLU
|
| 688 |
+
|
| 689 |
+
[MMLU](https://arxiv.org/abs/2009.03300)评测集上,Qwen-7B-Chat模型的 0-shot & 5-shot 准确率如下,效果同样在同类对齐模型中同样表现较优。
|
| 690 |
+
|
| 691 |
+
The 0-shot & 5-shot accuracy of Qwen-72B-Chat on MMLU is provided below.
|
| 692 |
+
The performance of Qwen-72B-Chat still on the top between other human-aligned models with comparable size.
|
| 693 |
+
|
| 694 |
+
| Model | Avg. Acc. |
|
| 695 |
+
|:--------------------------------:|:---------:|
|
| 696 |
+
| LLaMA2-7B-Chat | 46.2 |
|
| 697 |
+
| LLaMA2-13B-Chat | 54.6 |
|
| 698 |
+
| LLaMA2-70B-Chat | 63.8 |
|
| 699 |
+
| Yi-34B-Chat (0-shot) | 67.6 |
|
| 700 |
+
| Yi-34B-Chat (5-shot) | 73.4 |
|
| 701 |
+
| ChatGPT3.5 | 69.1 |
|
| 702 |
+
| ChatGPT4 | 83.0 |
|
| 703 |
+
| Qwen-7B-Chat (original) (0-shot) | 53.9 |
|
| 704 |
+
| **Qwen-7B-Chat (0-shot)** | 55.8 |
|
| 705 |
+
| **Qwen-7B-Chat (5-shot)** | 57.0 |
|
| 706 |
+
| **Qwen-14B-Chat (0-shot)** | 64.6 |
|
| 707 |
+
| **Qwen-14B-Chat (5-shot)** | 66.5 |
|
| 708 |
+
| **Qwen-72B-Chat (0-shot)** | 74.3 |
|
| 709 |
+
| **Qwen-72B-Chat (5-shot)** | 75.0 |
|
| 710 |
+
|
| 711 |
+
### 代码评测(Coding Evaluation)
|
| 712 |
+
|
| 713 |
+
Qwen-72B-Chat在[HumanEval](https://github.com/openai/human-eval)的zero-shot Pass@1效果如下
|
| 714 |
+
|
| 715 |
+
The zero-shot Pass@1 of Qwen-72B-Chat on [HumanEval](https://github.com/openai/human-eval) is demonstrated below
|
| 716 |
+
|
| 717 |
+
| Model | Pass@1 |
|
| 718 |
+
|:-----------------------:|:--------:|
|
| 719 |
+
| LLaMA2-7B-Chat | 12.2 |
|
| 720 |
+
| LLaMA2-13B-Chat | 18.9 |
|
| 721 |
+
| LLaMA2-70B-Chat | 32.3 |
|
| 722 |
+
| Yi-34B-Chat | 33.5 |
|
| 723 |
+
| ChatGPT3.5 | 73.2 |
|
| 724 |
+
| ChatGPT4 | 86.6 |
|
| 725 |
+
| Qwen-7B-Chat (original) | 24.4 |
|
| 726 |
+
| **Qwen-7B-Chat** | 37.2 |
|
| 727 |
+
| **Qwen-14B-Chat** | 43.9 |
|
| 728 |
+
| **Qwen-72B-Chat** | 64.6 |
|
| 729 |
+
|
| 730 |
+
### 数学评测(Mathematics Evaluation)
|
| 731 |
+
|
| 732 |
+
在评测数学能力的[GSM8K](https://github.com/openai/grade-school-math)上,Qwen-72B-Chat的准确率结果如下
|
| 733 |
+
|
| 734 |
+
The accuracy of Qwen-72B-Chat on GSM8K is shown below
|
| 735 |
+
|
| 736 |
+
| Model | Acc. |
|
| 737 |
+
|:--------------------------------:|:--------:|
|
| 738 |
+
| LLaMA2-7B-Chat | 26.3 |
|
| 739 |
+
| LLaMA2-13B-Chat | 37.1 |
|
| 740 |
+
| LLaMA2-70B-Chat | 59.3 |
|
| 741 |
+
| Yi-34B-Chat | 71.6 |
|
| 742 |
+
| ChatGPT3.5 | 73.2 |
|
| 743 |
+
| ChatGPT4 | 91.4 |
|
| 744 |
+
| Qwen-7B-Chat (original) (0-shot) | 41.1 |
|
| 745 |
+
| **Qwen-7B-Chat (0-shot)** | 50.3 |
|
| 746 |
+
| **Qwen-7B-Chat (8-shot)** | 54.1 |
|
| 747 |
+
| **Qwen-14B-Chat (0-shot)** | 60.1 |
|
| 748 |
+
| **Qwen-14B-Chat (8-shot)** | 59.3 |
|
| 749 |
+
| **Qwen-72B-Chat (0-shot)** | 76.4 |
|
| 750 |
+
| **Qwen-72B-Chat (8-shot)** | 75.7 |
|
| 751 |
+
|
| 752 |
+
### 长序列评测(Long-Context Understanding)
|
| 753 |
+
|
| 754 |
+
Qwen-72B-Chat支持最长32k的上下文长度,在[L-Eval](https://arxiv.org/abs/2307.11088)客观题的评分结果如下:
|
| 755 |
+
|
| 756 |
+
Qwen-72B-Chat supports context lengths of up to 32k. The scores of [L-Eval](https://arxiv.org/abs/2307.11088) (closed-ended tasks) are as follows:
|
| 757 |
+
|
| 758 |
+
| Model | Average | Coursera | GSM | QuALITY | TOEFL | CodeU | SFcition |
|
| 759 |
+
|:------------------|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|
|
| 760 |
+
| ChatGPT-3.5-16k | 60.73 | **63.51** | **84.00** | 61.38 | 78.43 | **12.22** | 64.84 |
|
| 761 |
+
| **Qwen-72B-Chat** | **62.30** | 58.13 | 76.00 | **77.22** | **86.24** | 6.66 | **69.53** |
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
我们进一步进行了“大海捞针”实验(想法来自于[@Greg Kamradt](https://twitter.com/GregKamradt/status/1727018183608193393)),测试模型在不同长度的输入下,是否能检索到文章不同位置的信息,结果如下:
|
| 765 |
+
|
| 766 |
+
We conducted the ""needle in a haystack"" experiment (the idea came from [@Greg Kamradt](https://twitter.com/GregKamradt/status/1727018183608193393)) to test whether the model can retrieve information at different positions in the inputs of different lengths, the result is as follows:
|
| 767 |
+
|
| 768 |
+

|
| 769 |
+
|
| 770 |
+
以上结果说明,Qwen-72B-Chat可以能准确检索到32k以内的输入长度中放在各种位置的信息,证明了其具有优秀的长文本处理能力。
|
| 771 |
+
|
| 772 |
+
The above results show that Qwen-72B-Chat can accurately retrieve information placed in various positions within an input length of 32k, proving its excellent long text understanding capabilities.
|
| 773 |
+
|
| 774 |
+
## FAQ
|
| 775 |
+
|
| 776 |
+
如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。
|
| 777 |
+
|
| 778 |
+
If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue.
|
| 779 |
+
<br>
|
| 780 |
+
|
| 781 |
+
## 引用 (Citation)
|
| 782 |
+
|
| 783 |
+
如果你觉得我们的工作对你有帮助,欢迎引用!
|
| 784 |
+
|
| 785 |
+
If you find our work helpful, feel free to give us a cite.
|
| 786 |
+
|
| 787 |
+
```
|
| 788 |
+
@article{qwen,
|
| 789 |
+
title={Qwen Technical Report},
|
| 790 |
+
author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu},
|
| 791 |
+
journal={arXiv preprint arXiv:2309.16609},
|
| 792 |
+
year={2023}
|
| 793 |
+
}
|
| 794 |
+
```
|
| 795 |
+
<br>
|
| 796 |
+
|
| 797 |
+
## 使用协议(License Agreement)
|
| 798 |
+
|
| 799 |
+
我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,欢迎填写[问卷](https://dashscope.console.aliyun.com/openModelApply/Qwen-72B-Chat)申请。
|
| 800 |
+
|
| 801 |
+
Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/Qwen-72B-Chat) to apply.
|
| 802 |
+
<br>
|
| 803 |
+
|
| 804 |
+
## 联系我们(Contact Us)
|
| 805 |
+
|
| 806 |
+
如果你想给我们的研发团队和产品团队留言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件(qianwen_opensource@alibabacloud.com)联系我们。
|
| 807 |
+
|
| 808 |
+
If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to qianwen_opensource@alibabacloud.com.","{""id"": ""Qwen/Qwen-72B-Chat"", ""author"": ""Qwen"", ""sha"": ""307fccc1a45211b087ab294a8d291f7632259322"", ""last_modified"": ""2024-10-08 05:20:43+00:00"", ""created_at"": ""2023-11-29 09:37:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1357, ""downloads_all_time"": null, ""likes"": 155, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen"", ""text-generation"", ""custom_code"", ""zh"", ""en"", ""arxiv:2309.16609"", ""arxiv:2305.08322"", ""arxiv:2009.03300"", ""arxiv:2307.11088"", ""base_model:Qwen/Qwen-72B"", ""base_model:finetune:Qwen/Qwen-72B"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen-72B\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: tongyi-qianwen\nlicense_link: https://huggingface.co/Qwen/Qwen-72B-Chat/blob/main/LICENSE\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false\nnew_version: Qwen/Qwen1.5-72B-Chat"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6731\u5229\u5b89\uff0c\u6211\u559c\u6b22""}, {""text"": ""\u6211\u53eb\u6258\u9a6c\u65af\uff0c\u6211\u7684\u4e3b\u8981""}, {""text"": ""\u6211\u53eb\u739b\u4e3d\u4e9a\uff0c\u6211\u6700\u559c\u6b22\u7684""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u662f""}, {""text"": ""\u4ece\u524d\uff0c""}], ""model_index"": null, ""config"": {""architectures"": [""QWenLMHeadModel""], ""auto_map"": {""AutoConfig"": ""configuration_qwen.QWenConfig"", ""AutoModelForCausalLM"": ""modeling_qwen.QWenLMHeadModel""}, ""model_type"": ""qwen"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/qwen_72b_needle_in_a_haystack.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/wechat.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_256.cpp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cache_autogptq_cuda_kernel_256.cu', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cpp_kernels.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00047-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00048-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00049-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00050-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00051-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00052-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00053-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00054-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00055-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00056-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00057-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00058-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00059-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00060-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00061-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00062-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00063-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00064-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00065-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00066-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00067-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00068-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00069-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00070-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00071-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00072-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00073-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00074-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00075-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00076-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00077-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00078-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00079-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00080-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00081-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00082-of-00082.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen_generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_qwen.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""KBaba7/Quant"", ""Justinrune/LLaMA-Factory"", ""yhavinga/dutch-tokenizer-arena"", ""kenken999/fastapi_django_main_live"", ""bhaskartripathi/LLM_Quantization"", ""officialhimanshu595/llama-factory"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""Zulelee/langchain-chatchat"", ""ruslanmv/convert_to_gguf"", ""IS2Lab/S-Eval"", ""pvanand/general_chat"", ""pvanand/audio_chat"", ""msun415/Llamole"", ""ogegadavis254/ibuka"", ""hermi612/Medical-Chatbot"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 72287920128}, ""total"": 72287920128}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-08 05:20:43+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen-72B\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: tongyi-qianwen\nlicense_link: https://huggingface.co/Qwen/Qwen-72B-Chat/blob/main/LICENSE\npipeline_tag: text-generation\ntags:\n- qwen\ninference: false\nnew_version: Qwen/Qwen1.5-72B-Chat"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_qwen.QWenLMHeadModel"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6567064377d8a948ac96d37a"", ""modelId"": ""Qwen/Qwen-72B-Chat"", ""usedStorage"": 144575911576}",1,,0,,0,"https://huggingface.co/mradermacher/Qwen-72B-Chat-GGUF, https://huggingface.co/mradermacher/Qwen-72B-Chat-i1-GGUF",2,,0,"FallnAI/Quantize-HF-Models, IS2Lab/S-Eval, Justinrune/LLaMA-Factory, K00B404/LLM_Quantization, KBaba7/Quant, Zulelee/langchain-chatchat, bhaskartripathi/LLM_Quantization, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=Qwen/Qwen-72B-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQwen%2FQwen-72B-Chat%5D(%2FQwen%2FQwen-72B-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, officialhimanshu595/llama-factory, pvanand/general_chat, yhavinga/dutch-tokenizer-arena",13
|
Qwen2-72B-Instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
SillyTavern-Presets_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
Virt-io/SillyTavern-Presets,"---
|
| 3 |
+
tags:
|
| 4 |
+
- roleplay
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
> [!IMPORTANT]
|
| 8 |
+
> v1.9 is still recommended<br>
|
| 9 |
+
> v2.0 is simmilar to v1.9 | json is a master import.<br>
|
| 10 |
+
> **Samplers are just for messing around**<br>
|
| 11 |
+
> **Turn on trim if you like it I just suffer from FOMO.**<br>
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
> [!IMPORTANT]
|
| 15 |
+
> Thanks to:<br>
|
| 16 |
+
> [SerialKicked](https://huggingface.co/SerialKicked) for [fixing context](https://huggingface.co/Virt-io/SillyTavern-Presets/discussions/3)<br>
|
| 17 |
+
> [saishf](https://huggingface.co/saishf) for testing all the bad versions<br>
|
| 18 |
+
> [Lewdiculous](https://huggingface.co/Lewdiculous) for testing and quantizing<br>
|
| 19 |
+
> [Herman555](https://huggingface.co/Herman555) for reminding me that [some models need a jailbreak](https://huggingface.co/Virt-io/SillyTavern-Presets/discussions/4)<br>
|
| 20 |
+
> [Clevyby](https://huggingface.co/Clevyby) for sharing their [sampler knowledge](https://huggingface.co/LWDCLS/LLM-Discussions/discussions/2#663b90a7a55b06346368adae)<br>
|
| 21 |
+
> [shrinkedd](https://www.reddit.com/r/SillyTavernAI/comments/1ca4xo8/ive_thought_of_a_way_to_decrease_chances_of/) for ideas<br>
|
| 22 |
+
|
| 23 |
+
### SillyTavern Presets
|
| 24 |
+
|
| 25 |
+
# Usage
|
| 26 |
+
|
| 27 |
+
Make sure to grab both context and instruct templates.
|
| 28 |
+
It should look something like this.
|
| 29 |
+
|
| 30 |
+
<img src=""https://huggingface.co/Virt-io/SillyTavern-Presets/resolve/main/Images/Silly_Tavern_preset.png"">
|
| 31 |
+
|
| 32 |
+
When using these presets you must set **Example Messages Behavior: Never include examples** otherwise they will be sent twice.
|
| 33 |
+
|
| 34 |
+
<img src=""https://huggingface.co/Virt-io/SillyTavern-Presets/resolve/main/Images/ExampleMessages.png"">
|
| 35 |
+
|
| 36 |
+
The reason for this, is because I explicitly set for them to be sent. The default behavior is for them to just be added at the end of the context prompt.
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# Character Cards
|
| 40 |
+
|
| 41 |
+
**The following is just personal preference. However, it is recommended for a better experience.**
|
| 42 |
+
|
| 43 |
+
<img src=""https://huggingface.co/Virt-io/SillyTavern-Presets/resolve/main/Images/Character_Cards_01.png"">
|
| 44 |
+
|
| 45 |
+
> [!IMPORTANT]
|
| 46 |
+
> **Create a new neutral persona(USER_01)**<br>
|
| 47 |
+
> **For scenario, use a really vague description. This is to prevent the LLM from locking in. (Unless you want that)**<br>
|
| 48 |
+
> **I am currently running https://github.com/gaffe-buck/tavern-v2-character-creator inside a container**<br>
|
| 49 |
+
|
| 50 |
+
**Choosing a mode**
|
| 51 |
+
|
| 52 |
+
Prepend one of the following, before your request.
|
| 53 |
+
|
| 54 |
+
```
|
| 55 |
+
> Text Editor
|
| 56 |
+
|
| 57 |
+
> Character Creator
|
| 58 |
+
|
| 59 |
+
> Flexible P-list Formatter
|
| 60 |
+
|
| 61 |
+
> Ali-chat Generator
|
| 62 |
+
|
| 63 |
+
> Opening Scenario Writer
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
Example:
|
| 67 |
+
```
|
| 68 |
+
> Text Editor
|
| 69 |
+
|
| 70 |
+
---
|
| 71 |
+
|
| 72 |
+
Re-write the scenario in a dark fantasy philosophical style.
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
Example:
|
| 76 |
+
```
|
| 77 |
+
> Opening Scenario Writer
|
| 78 |
+
|
| 79 |
+
Create an opening scene for Char, Char enters a coffee shop.
|
| 80 |
+
|
| 81 |
+
> Text Editor
|
| 82 |
+
|
| 83 |
+
Re-write Char's opening scenario, in a dark comedy style.
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
<img src=""https://huggingface.co/Virt-io/SillyTavern-Presets/resolve/main/Images/Character_Cards_02.png"">
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# Samplers
|
| 90 |
+
|
| 91 |
+
**I have decided to remove old samplers and only keep basic presets, I want people to play around and find what works best for them. Change context to desired context length**
|
| 92 |
+
|
| 93 |
+
[SillyTavern Docs](https://docs.sillytavern.app/usage/common-settings/#sampler-parameters)
|
| 94 |
+
|
| 95 |
+
**Temperature**
|
| 96 |
+
Feel free to play with this one, lower values are more grounded.
|
| 97 |
+
|
| 98 |
+
**Min-P**
|
| 99 |
+
Higher values chop off more probabilities.
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
Values between 0.025 - 0.10 are good, personally I would use 0.075 or lower.
|
| 103 |
+
|
| 104 |
+
**Repetition Penalty**
|
| 105 |
+
Tries to decrease repetition.
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
Do not set it higher than 1.2.
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
1.05 - 1.15 seem to work fine.
|
| 112 |
+
|
| 113 |
+
**Rep Pen Range**
|
| 114 |
+
The range of tokens which Repetition Penalty can see.
|
| 115 |
+
I have it set to 2048.
|
| 116 |
+
|
| 117 |
+
**Frequency Penalty**
|
| 118 |
+
Decreases repetition.
|
| 119 |
+
|
| 120 |
+
**Presence Penalty**
|
| 121 |
+
Increases word variety.
|
| 122 |
+
|
| 123 |
+
**Dynamic Temperature**
|
| 124 |
+
Min and Max temps, free to change as desired.
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
Exponent, do not set Exponent higher than the default of 1.
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
You might want to try playing around and setting it lower than 1, this pushes lower probabilies higher.
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
When setting exponent lower than 1, set Min-P a little higher (0.075)
|
| 134 |
+
|
| 135 |
+
**Smooth Sampling**
|
| 136 |
+
This one is great, smoothens out probabilities.
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
Lower is more diverse.
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
Recommended range 0.1 - 0.3
|
| 143 |
+
","{""id"": ""Virt-io/SillyTavern-Presets"", ""author"": ""Virt-io"", ""sha"": ""9a161626d7ad3a19f43ab32503dc95a02a8cf1f9"", ""last_modified"": ""2024-09-25 19:05:29+00:00"", ""created_at"": ""2024-03-22 06:05:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 314, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""roleplay"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""tags:\n- roleplay"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Cards/Blobby.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Cards/P-list_Assitant.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Character_Cards_01.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Character_Cards_02.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/ExampleMessages.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Images/Silly_Tavern_preset.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.5/[Alpaca-Context]Roleplay-v1.5.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.5/[Alpaca-Instruct]Roleplay-v1.5.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.6/[Alpaca-Context]Assistant-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.6/[Alpaca-Context]Roleplay-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.6/[Alpaca-Instruct]Assistant-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.6/[Alpaca-Instruct]Roleplay-v1.6-alt.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.6/[Alpaca-Instruct]Roleplay-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.7/[Alpaca-Context]Roleplay-v1.7.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.7/[Alpaca-Instruct]Roleplay-v1.7.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.8/[Alpaca-Context]Roleplay-v1.8.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.8/[Alpaca-Instruct]Roleplay-v1.8.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.9/[Alpaca-Context]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Alpaca/v1.9/[Alpaca-Instruct]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.5/[ChatML-Context]Roleplay-v1.5.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.5/[ChatML-Instruct]Roleplay-v1.5.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.6/[ChatML-Context]Assistant-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.6/[ChatML-Context]Roleplay-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.6/[ChatML-Instruct]Assistant-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.6/[ChatML-Instruct]Roleplay-v1.6-alt.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.6/[ChatML-Instruct]Roleplay-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.7/[ChatML-Context]Roleplay-v1.7.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.7/[ChatML-Instruct]Roleplay-v1.7.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.8/[ChatML-Context]Roleplay-v1.8.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.8/[ChatML-Instruct]Roleplay-v1.8.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.9/[ChatML-Context]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/ChatML/v1.9/[ChatML-Instruct]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Command-R/v1.7/[CommandR-Context]Roleplay-v1.7.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Command-R/v1.7/[CommandR-Instruct]Roleplay-v1.7.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Command-R/v1.8/[Command-R-Context]Roleplay-v1.8.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Command-R/v1.8/[Command-R-Instruct]Roleplay-v1.8.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Command-R/v1.9/[Command-R-Context]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Command-R/v1.9/[Command-R-Instruct]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.5/[LLAMA-3-Context]Roleplay-v1.5.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.5/[LLAMA-3-Instruct]Roleplay-v1.5.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.6/[LLAMA-3-Context]Assistant-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.6/[LLAMA-3-Context]Roleplay-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.6/[LLAMA-3-Instruct]Assistant-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.6/[LLAMA-3-Instruct]Roleplay-v1.6-alt.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.6/[LLAMA-3-Instruct]Roleplay-v1.6.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.7/[LLAMA-3-Context]Roleplay-v1.7.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.7/[LLAMA-3-Instruct]Roleplay-v1.7.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.8/[LLAMA-3-Context]Roleplay-v1.8.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.8/[LLAMA-3-Instruct]Roleplay-v1.8.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.9/[LLAMA-3-Context]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v1.9/[LLAMA-3-Instruct]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/LLAMA-3/v2.0/LLAMA-3-v2.0.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Mistral/v1.9/[Mistral-Context]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Prompts/Mistral/v1.9/[Mistral-Instruct]Roleplay-v1.9.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Samplers/[Simple]Roleplay.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Samplers/[Test-01]Roleplay.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Samplers/[Test-02]Roleplay.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Samplers/[Test-03]Roleplay.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Scripts/kobold-server.sh', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Themes/Dark-Dawn.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Themes/Visual-Dawn.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-25 19:05:29+00:00"", ""cardData"": ""tags:\n- roleplay"", ""transformersInfo"": null, ""_id"": ""65fd1fb750ca05251d313f4c"", ""modelId"": ""Virt-io/SillyTavern-Presets"", ""usedStorage"": 0}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Virt-io/SillyTavern-Presets&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BVirt-io%2FSillyTavern-Presets%5D(%2FVirt-io%2FSillyTavern-Presets)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
SmolVLM-256M-Instruct_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Starling-LM-7B-alpha_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
berkeley-nest/Starling-LM-7B-alpha,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
datasets:
|
| 5 |
+
- berkeley-nest/Nectar
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
library_name: transformers
|
| 9 |
+
tags:
|
| 10 |
+
- reward model
|
| 11 |
+
- RLHF
|
| 12 |
+
- RLAIF
|
| 13 |
+
---
|
| 14 |
+
# Starling-LM-7B-alpha
|
| 15 |
+
|
| 16 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 17 |
+
|
| 18 |
+
- **Developed by:** Banghua Zhu * , Evan Frick * , Tianhao Wu * , Hanlin Zhu and Jiantao Jiao.
|
| 19 |
+
- **Model type:** Language Model finetuned with RLHF / RLAIF
|
| 20 |
+
- **License:** Apache-2.0 license under the condition that the model is not used to compete with OpenAI
|
| 21 |
+
- **Finetuned from model:** [Openchat 3.5](https://huggingface.co/openchat/openchat_3.5) (based on [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
We introduce Starling-7B, an open large language model (LLM) trained by Reinforcement Learning from AI Feedback (RLAIF). The model harnesses the power of our new GPT-4 labeled ranking dataset, [berkeley-nest/Nectar](https://huggingface.co/datasets/berkeley-nest/Nectar), and our new reward training and policy tuning pipeline. Starling-7B-alpha scores 8.09 in MT Bench with GPT-4 as a judge, outperforming every model to date on MT-Bench except for OpenAI's GPT-4 and GPT-4 Turbo. We release the ranking dataset [Nectar](https://huggingface.co/datasets/berkeley-nest/Nectar), the reward model [Starling-RM-7B-alpha](https://huggingface.co/berkeley-nest/Starling-RM-7B-alpha) and the language model [Starling-LM-7B-alpha](https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha) on HuggingFace, and an online demo in LMSYS [Chatbot Arena](https://chat.lmsys.org). Stay tuned for our forthcoming code and paper, which will provide more details on the whole process.
|
| 26 |
+
|
| 27 |
+
Starling-LM-7B-alpha is a language model trained from [Openchat 3.5](https://huggingface.co/openchat/openchat_3.5) with reward model [berkeley-nest/Starling-RM-7B-alpha](https://huggingface.co/berkeley-nest/Starling-RM-7B-alpha) and policy optimization method [advantage-induced policy alignment (APA)](https://arxiv.org/abs/2306.02231). The evaluation results are listed below.
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
| Model | Tuning Method | MT Bench | AlpacaEval | MMLU |
|
| 31 |
+
|-----------------------|------------------|----------|------------|------|
|
| 32 |
+
| GPT-4-Turbo | ? | 9.32 | 97.70 | |
|
| 33 |
+
| GPT-4 | SFT + PPO | 8.99 | 95.28 | 86.4 |
|
| 34 |
+
| **Starling-7B** | C-RLFT + APA | 8.09 | 91.99 | 63.9 |
|
| 35 |
+
| Claude-2 | ? | 8.06 | 91.36 | 78.5 |
|
| 36 |
+
| GPT-3.5-Turbo | ? | 7.94 | 89.37 | 70 |
|
| 37 |
+
| Claude-1 | ? | 7.9 | 88.39 | 77 |
|
| 38 |
+
| Tulu-2-dpo-70b | SFT + DPO | 7.89 | 95.1 | |
|
| 39 |
+
| Openchat-3.5 | C-RLFT | 7.81 | 88.51 | 64.3 |
|
| 40 |
+
| Zephyr-7B-beta | SFT + DPO | 7.34 | 90.60 | 61.4 |
|
| 41 |
+
| Llama-2-70b-chat-hf | SFT + PPO | 6.86 | 92.66 | 63 |
|
| 42 |
+
| Neural-chat-7b-v3-1 | SFT + DPO | 6.84 | 84.53 | 62.4 |
|
| 43 |
+
| Tulu-2-dpo-7b | SFT + DPO | 6.29 | 85.1 | |
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
For more detailed discussions, please check out our [blog post](https://starling.cs.berkeley.edu), and stay tuned for our upcoming code and paper!
|
| 48 |
+
<!-- Provide the basic links for the model. -->
|
| 49 |
+
|
| 50 |
+
- **Blog:** https://starling.cs.berkeley.edu/
|
| 51 |
+
- **Paper:** Coming soon!
|
| 52 |
+
- **Code:** Coming soon!
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
## Uses
|
| 57 |
+
|
| 58 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 59 |
+
|
| 60 |
+
**Important: Please use the exact chat template provided below for the model. Otherwise there will be a degrade in the performance. The model output can be verbose in rare cases. Please consider setting temperature = 0 to make this happen less.**
|
| 61 |
+
|
| 62 |
+
Our model follows the exact chat template and usage as [Openchat 3.5](https://huggingface.co/openchat/openchat_3.5). Please refer to their model card for more details.
|
| 63 |
+
In addition, our model is hosted on LMSYS [Chatbot Arena](https://chat.lmsys.org) for free test.
|
| 64 |
+
|
| 65 |
+
The conversation template is the same as Openchat 3.5:
|
| 66 |
+
```
|
| 67 |
+
import transformers
|
| 68 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(""openchat/openchat_3.5"")
|
| 69 |
+
|
| 70 |
+
# Single-turn
|
| 71 |
+
tokens = tokenizer(""GPT4 Correct User: Hello<|end_of_turn|>GPT4 Correct Assistant:"").input_ids
|
| 72 |
+
assert tokens == [1, 420, 6316, 28781, 3198, 3123, 1247, 28747, 22557, 32000, 420, 6316, 28781, 3198, 3123, 21631, 28747]
|
| 73 |
+
|
| 74 |
+
# Multi-turn
|
| 75 |
+
tokens = tokenizer(""GPT4 Correct User: Hello<|end_of_turn|>GPT4 Correct Assistant: Hi<|end_of_turn|>GPT4 Correct User: How are you today?<|end_of_turn|>GPT4 Correct Assistant:"").input_ids
|
| 76 |
+
assert tokens == [1, 420, 6316, 28781, 3198, 3123, 1247, 28747, 22557, 32000, 420, 6316, 28781, 3198, 3123, 21631, 28747, 15359, 32000, 420, 6316, 28781, 3198, 3123, 1247, 28747, 1602, 460, 368, 3154, 28804, 32000, 420, 6316, 28781, 3198, 3123, 21631, 28747]
|
| 77 |
+
|
| 78 |
+
# Coding Mode
|
| 79 |
+
tokens = tokenizer(""Code User: Implement quicksort using C++<|end_of_turn|>Code Assistant:"").input_ids
|
| 80 |
+
assert tokens == [1, 7596, 1247, 28747, 26256, 2936, 7653, 1413, 334, 1680, 32000, 7596, 21631, 28747]
|
| 81 |
+
```
|
| 82 |
+
## Code Examples
|
| 83 |
+
|
| 84 |
+
```python
|
| 85 |
+
import transformers
|
| 86 |
+
|
| 87 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(""berkeley-nest/Starling-LM-7B-alpha"")
|
| 88 |
+
model = transformers.AutoModelForCausalLM.from_pretrained(""berkeley-nest/Starling-LM-7B-alpha"")
|
| 89 |
+
|
| 90 |
+
def generate_response(prompt):
|
| 91 |
+
input_ids = tokenizer(prompt, return_tensors=""pt"").input_ids
|
| 92 |
+
outputs = model.generate(
|
| 93 |
+
input_ids,
|
| 94 |
+
max_length=256,
|
| 95 |
+
pad_token_id=tokenizer.pad_token_id,
|
| 96 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 97 |
+
)
|
| 98 |
+
response_ids = outputs[0]
|
| 99 |
+
response_text = tokenizer.decode(response_ids, skip_special_tokens=True)
|
| 100 |
+
return response_text
|
| 101 |
+
|
| 102 |
+
# Single-turn conversation
|
| 103 |
+
prompt = ""Hello, how are you?""
|
| 104 |
+
single_turn_prompt = f""GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:""
|
| 105 |
+
response_text = generate_response(single_turn_prompt)
|
| 106 |
+
print(""Response:"", response_text)
|
| 107 |
+
|
| 108 |
+
## Multi-turn conversation
|
| 109 |
+
prompt = ""Hello""
|
| 110 |
+
follow_up_question = ""How are you today?""
|
| 111 |
+
response = """"
|
| 112 |
+
multi_turn_prompt = f""GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant: {response}<|end_of_turn|>GPT4 Correct User: {follow_up_question}<|end_of_turn|>GPT4 Correct Assistant:""
|
| 113 |
+
response_text = generate_response(multi_turn_prompt)
|
| 114 |
+
print(""Multi-turn conversation response:"", response_text)
|
| 115 |
+
|
| 116 |
+
### Coding conversation
|
| 117 |
+
prompt = ""Implement quicksort using C++""
|
| 118 |
+
coding_prompt = f""Code User: {prompt}<|end_of_turn|>Code Assistant:""
|
| 119 |
+
response = generate_response(coding_prompt)
|
| 120 |
+
print(""Coding conversation response:"", response)
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
## License
|
| 124 |
+
The dataset, model and online demo is a research preview intended for non-commercial use only, subject to the data distillation [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
## Acknowledgment
|
| 128 |
+
We would like to thank Wei-Lin Chiang from Berkeley for detailed feedback of the blog and the projects. We would like to thank the [LMSYS Organization](https://lmsys.org/) for their support of [lmsys-chat-1M](https://huggingface.co/datasets/lmsys/lmsys-chat-1m) dataset, evaluation and online demo. We would like to thank the open source community for their efforts in providing the datasets and base models we used to develope the project, including but not limited to Anthropic, Llama, Mistral, Hugging Face H4, LMSYS, OpenChat, OpenBMB, Flan and ShareGPT.
|
| 129 |
+
|
| 130 |
+
## Citation
|
| 131 |
+
```
|
| 132 |
+
@misc{starling2023,
|
| 133 |
+
title = {Starling-7B: Improving LLM Helpfulness & Harmlessness with RLAIF},
|
| 134 |
+
url = {},
|
| 135 |
+
author = {Zhu, Banghua and Frick, Evan and Wu, Tianhao and Zhu, Hanlin and Jiao, Jiantao},
|
| 136 |
+
month = {November},
|
| 137 |
+
year = {2023}
|
| 138 |
+
}
|
| 139 |
+
```","{""id"": ""berkeley-nest/Starling-LM-7B-alpha"", ""author"": ""berkeley-nest"", ""sha"": ""1dddf3b95bc1391f6307299eb1c162c194bde9bd"", ""last_modified"": ""2024-03-20 04:19:58+00:00"", ""created_at"": ""2023-11-25 17:42:15+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9797, ""downloads_all_time"": null, ""likes"": 556, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mistral"", ""text-generation"", ""reward model"", ""RLHF"", ""RLAIF"", ""conversational"", ""en"", ""dataset:berkeley-nest/Nectar"", ""arxiv:2306.02231"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- berkeley-nest/Nectar\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- reward model\n- RLHF\n- RLAIF"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}"", ""eos_token"": ""<|end_of_turn|>"", ""pad_token"": ""<|end_of_turn|>"", ""sep_token"": ""<sep>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openchat.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""ZhangYuhan/3DGen-Arena"", ""open-llm-leaderboard-old/open_llm_leaderboard"", ""speakleash/open_pl_llm_leaderboard"", ""KBaba7/Quant"", ""meval/multilingual-chatbot-arena-leaderboard"", ""prometheus-eval/BiGGen-Bench-Leaderboard"", ""ali-vilab/IDEA-Bench-Arena"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""Modelexdl/GPT-SPACE-v1"", ""limcheekin/Starling-LM-7B-alpha-GGUF"", ""tianleliphoebe/visual-arena"", ""Ashmal/MobiLlama"", ""ruslanmv/convert_to_gguf"", ""Tonic1/Starling"", ""PeepDaSlan9/berkeley-nest-Starling-LM-7B-alpha"", ""mikx1/berkeley-nest-Starling-LM-7B-alpha"", ""Prezily/berkeley-nest-Starling-LM-7B-alpha"", ""legolasyiu/berkeley-nest-Starling-LM-7B-alpha"", ""EREREFE/berkeley-nest-Starling-LM-7B-alpha"", ""UltraMarkoRJ/berkeley-nest-Starling-LM-7B-alpha"", ""sclepios/berkeley-nest-Starling-LM-7B-alpha"", ""ATajuddin/berkeley-nest-Starling-LM-7B-alpha"", ""cybtek/berkeley-nest-Starling-LM-7B-alpha"", ""Bellamy66/berkeley-nest-Starling-LM-7B-alpha"", ""ronniepies/berkeley-nest-Starling-LM-7B-alpha"", ""iuroneko/berkeley-nest-Starling-LM-7B-alpha"", ""iblfe/test"", ""dbasu/multilingual-chatbot-arena-leaderboard"", ""artteam8/new_artteam8"", ""smothiki/open_llm_leaderboard_old"", ""Alignment-Lab-AI/orcaleaderboard"", ""tomarni/open_pl_llm_leaderboard"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 7241748480}, ""total"": 7241748480}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-20 04:19:58+00:00"", ""cardData"": ""datasets:\n- berkeley-nest/Nectar\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- reward model\n- RLHF\n- RLAIF"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""656231f7ed2bca599ead7d1f"", ""modelId"": ""berkeley-nest/Starling-LM-7B-alpha"", ""usedStorage"": 14484024227}",0,"https://huggingface.co/derek33125/project_angel_llama3_v1, https://huggingface.co/IsakNordgren/Starling-LM-7B-alpha-x2, https://huggingface.co/IsakNordgren/mistral-Summarizer-7b-instruct-v0.2-x-Starling-LM-7B-alpha, https://huggingface.co/IsakNordgren/Starling-LM-7B-beta-x-Starling-LM-7B-alpha, https://huggingface.co/numerouno00/05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever",5,"https://huggingface.co/binhquoc/llama-zalo, https://huggingface.co/AswanthCManoj/azma-starling-7B-alpha-lora-adapter, https://huggingface.co/1-lock/037e774b-eada-4b03-b7bb-34fb6b166eaa, https://huggingface.co/VERSIL91/037e774b-eada-4b03-b7bb-34fb6b166eaa, https://huggingface.co/eeeebbb2/037e774b-eada-4b03-b7bb-34fb6b166eaa, https://huggingface.co/DeepDream2045/090c305e-f6b0-4f40-a8e6-396f0bf24bf3, https://huggingface.co/1-lock/090c305e-f6b0-4f40-a8e6-396f0bf24bf3, https://huggingface.co/dada22231/090c305e-f6b0-4f40-a8e6-396f0bf24bf3, https://huggingface.co/eeeebbb2/090c305e-f6b0-4f40-a8e6-396f0bf24bf3, https://huggingface.co/1-lock/3070ad16-bfaf-4809-bcfd-56bd1e38e058, https://huggingface.co/eeeebbb2/3070ad16-bfaf-4809-bcfd-56bd1e38e058, https://huggingface.co/dada22231/3070ad16-bfaf-4809-bcfd-56bd1e38e058, https://huggingface.co/VERSIL91/3070ad16-bfaf-4809-bcfd-56bd1e38e058, https://huggingface.co/eeeebbb2/7a36ff8a-c1c0-434c-9201-8a76a8be7d60, https://huggingface.co/DeepDream2045/7a36ff8a-c1c0-434c-9201-8a76a8be7d60, https://huggingface.co/vdos/7a36ff8a-c1c0-434c-9201-8a76a8be7d60, https://huggingface.co/dada22231/7a36ff8a-c1c0-434c-9201-8a76a8be7d60, https://huggingface.co/cunghoctienganh/b26876a9-f446-4de6-9dce-2b4cba2ee973, https://huggingface.co/nhung02/c1f5c177-d7b5-46f3-8b2c-97ac786a82cc, https://huggingface.co/VERSIL91/0eaa2168-d144-4655-b824-847b031556c1, https://huggingface.co/kokovova/27f7a040-892c-49b9-90e8-0812c8507956, https://huggingface.co/laquythang/0bb9e871-e3d4-4da1-8f96-125c9bb89150, https://huggingface.co/bbytxt/6086b5d7-0454-4917-ae8d-29a651287492, https://huggingface.co/adammandic87/59a245c1-59a1-4a56-b27a-9297c2bb5218, https://huggingface.co/kk-aivio/da61dfec-5219-4a96-8414-0fc9197ae986, https://huggingface.co/nbninh/dc91943c-fd1d-471d-a853-7b1d6459a7aa, https://huggingface.co/nhung02/2c0e0e84-2e9d-46b8-a38b-9f3a6bd90f7d, https://huggingface.co/laquythang/190e8afa-2455-450b-b930-c889b417b98f, https://huggingface.co/vmpsergio/6456f004-ef13-4d96-8bae-1b07507a61c1, https://huggingface.co/lesso04/bb5d0e76-4ff8-46ac-a7d3-08ba460717fc, https://huggingface.co/nhung02/f4bba666-252a-4aa8-b39d-30e24dd52e3a, https://huggingface.co/laquythang/4386a149-7df5-4757-b18e-9de2134256c8, https://huggingface.co/nttx/4e2a8b20-6a00-4df4-98ed-d5b5ad98f43d, https://huggingface.co/prxy5604/9a6f73f0-681a-45be-b029-1dadcb7d697f, https://huggingface.co/tarabukinivan/0235dab0-43ef-4643-b289-3461a1ac7731, https://huggingface.co/filipesantoscv11/b12dd762-6620-43a8-a94c-a7409e09b375, https://huggingface.co/ClarenceDan/2024307d-3bbd-4781-8aa2-ca214a2d3e73, https://huggingface.co/havinash-ai/53403997-7a62-4436-8e4c-f2789c5d0306, https://huggingface.co/tarabukinivan/41183680-b833-4481-b299-f3099117a326, https://huggingface.co/nadejdatarabukina/d6fa3fde-33b3-460c-bb01-23f0e37fd630, https://huggingface.co/lesso12/7a56fd17-a74a-45e5-b74a-56845879af12, https://huggingface.co/0x1202/6fe93d12-40f8-418b-9262-220a6ab8c6e2, https://huggingface.co/demohong/ef1aadb0-6a1e-4f2a-b272-a23aeeca913e, https://huggingface.co/nhung01/ae2216d6-1954-459e-bcf9-c9693b590b52, https://huggingface.co/dimasik1987/d9f9e5e8-9f14-4b88-9ec8-109ab16d1381, https://huggingface.co/lesso10/621ced06-fedf-49f1-8f12-ab07a6168442, https://huggingface.co/mrHunghddddd/029f2bb6-91af-4328-83a2-885f91debea0, https://huggingface.co/cunghoctienganh/04f879c3-c50a-4c3d-9c84-2abc67f48d96, https://huggingface.co/fedovtt/d390fb5e-445c-46a2-90b9-0fccfeb15916, https://huggingface.co/nhung01/1f13dc67-1129-4916-a537-ab5d31757f94, https://huggingface.co/aleegis09/938fea68-f46e-4c78-b395-584a2e7d3152, https://huggingface.co/ClarenceDan/0340a4e3-1d35-432d-a0bf-a39599d65ed6, https://huggingface.co/mrHunghddddd/fced0531-07cb-4cc8-b98d-40650cdfc3e7, https://huggingface.co/lesso07/8e9a2c5f-01c3-40ad-a000-f954338b15fa, https://huggingface.co/nbninh/78b1dc2e-74ba-4cdc-8f63-03f47aeaacfb, https://huggingface.co/lesso13/7c7db22c-80fd-4b76-9002-90693f306bb6, https://huggingface.co/dimasik2987/2e78e81c-af11-4967-ba7a-61d357ad241e, https://huggingface.co/prxy5605/e1280ecd-9c7e-4957-bfd5-f92bce373656, https://huggingface.co/aleegis09/b4f71353-2bbf-41a0-931a-b7cf7feaa75d, https://huggingface.co/prxy5606/873c0786-6cfe-4281-b437-0ee9202dd843, https://huggingface.co/nbninh/c1a0c59e-104f-4d0c-be12-e702e36159dc, https://huggingface.co/hongngo/d2f87bad-a094-484c-8a6b-59ceb8ab6380, https://huggingface.co/nhoxinh/e56e1078-778f-4dbf-8196-c820aedb8296, https://huggingface.co/nhungphammmmm/e9328363-444d-4b3c-be80-1e25a6c4971f, https://huggingface.co/aleegis09/eb76bd17-4603-4a6d-8b70-c3be16483cea, https://huggingface.co/fedovtt/bbba05a6-7776-43e3-978c-6056c0c99ef3, https://huggingface.co/cvoffer/1f74b535-9022-4cf7-adc4-1d0b1247f4e6, https://huggingface.co/nhung02/91882152-c1db-4fd0-9edf-3085a5bc4d9b, https://huggingface.co/adammandic87/67188f3a-f5c1-4333-a9da-efb62faf42e1, https://huggingface.co/kk-aivio/3294bce4-301c-4a12-9d25-e3d1676376ef, https://huggingface.co/nbninh/ae3efffd-8fb1-47a9-8692-c374685cd3c9, https://huggingface.co/minhtrannnn/e9d642d2-b1fa-4a85-b229-63269c8f15e9, https://huggingface.co/nhung02/a6610d0b-3f2e-44aa-8d58-c4bfd1219104, https://huggingface.co/demohong/13f4d6a5-49d8-42d8-9613-723336b9c70b, https://huggingface.co/lesso10/e063116c-13a7-4b11-b027-2340a96b0b1a, https://huggingface.co/nhung01/4808e975-ff38-445e-9f1d-3282953790cf, https://huggingface.co/dimasik87/3975ffda-01ce-4f0d-aac0-786e9dfda773, https://huggingface.co/laquythang/8eb84646-c60d-4051-938d-1a567e81b867, https://huggingface.co/lesso15/1f65c0ae-b94c-43ac-92f1-df91838f74ef, https://huggingface.co/nhunglaaaaaaa/44f2c2bb-7da1-4343-af85-d9c32387f496, https://huggingface.co/denbeo/76c7e0d6-94d4-46be-8891-4ec034cef98a, https://huggingface.co/thakkkkkk/295de1f9-4b9e-4e87-963f-722cc0822639, https://huggingface.co/error577/3ec1494b-ab51-4eb5-b427-601f223c7452, https://huggingface.co/trangtrannnnn/2e14534d-c86e-4d01-8c77-9b80e97defca, https://huggingface.co/datlaaaaaaa/daffcb00-0a04-42f6-b392-548b684b51a2, https://huggingface.co/nhungphammmmm/0dc45672-24d6-42f3-820d-cab25d4e0a85, https://huggingface.co/thangla01/ea06d24b-8095-45bc-8294-b49a6a74aff6, https://huggingface.co/thaffggg/3c5c9dc8-a188-4e05-828e-a168d771d4ee, https://huggingface.co/prxy5605/038521a6-6f72-473e-b7df-8ce7118eb24b, https://huggingface.co/prxy5607/798b618b-6eff-4fa4-9198-a587134e41cb, https://huggingface.co/nhungphammmmm/adfc527d-a5eb-4720-b2f1-4d101a9f2d69, https://huggingface.co/lesso06/7df024bf-2cf6-40d1-992e-fdac5360a3ee, https://huggingface.co/thalllsssss/c971168d-fbdc-4890-9ca6-7ed2bd1c2757, https://huggingface.co/nhung01/2db76c83-8fce-4e8d-8481-97047df71f7d, https://huggingface.co/dimasik87/1c70a458-2d1c-4aa6-924e-dd13aeefd833, https://huggingface.co/lhong4759/fcfdb02e-1f99-4a73-8feb-9b3d9aba5852, https://huggingface.co/cunghoctienganh/dc35737d-7e71-465e-b4f3-0654c75e550e, https://huggingface.co/adammandic87/7bb8c236-c974-44af-afb3-33bf92f1b185, https://huggingface.co/great0001/f794df60-1dae-4bb9-a5c3-0d80275b490d, https://huggingface.co/denbeo/53bdccc3-f632-4bb9-b2fd-56d8904329ee, https://huggingface.co/nbninh/6b9f33c0-9006-447a-9f29-d28475cb5542, https://huggingface.co/nhoxinh/ce2567ec-dc68-4a78-9bf5-951f5c1159b4, https://huggingface.co/JacksonBrune/9dbc50d2-0a79-4801-b580-7be8fe9a2b2e, https://huggingface.co/minhnguyennnnnn/329ce0e1-c24c-4b6f-8e37-758928e7e6be, https://huggingface.co/laquythang/7f4d8fd5-3433-4c3c-8aef-7ef53b51fe1e, https://huggingface.co/Best000/3290707f-073d-46d0-bc44-0e03ac4916c0, https://huggingface.co/aleegis09/7a25e646-681f-48eb-afde-acdf37d491a9, https://huggingface.co/prxy5606/c4e1d135-9488-43d6-8c8b-d8c31b04c485, https://huggingface.co/aleegis10/b9b66d40-9786-45c6-acf6-190d8461dc68, https://huggingface.co/bbytxt/24168a91-7cdf-4f3e-a9fb-8388fa5b0814, https://huggingface.co/cunghoctienganh/a3f1d3f3-0422-41bc-8014-1a7000a20f88, https://huggingface.co/nhungphammmmm/223e2da2-48fe-42dc-b9f9-8fb012228a77, https://huggingface.co/mrHungddddh/3b47e932-6386-4e1b-ac60-f8c521d7e678, https://huggingface.co/minhtrannnn/04b3b496-70cc-4463-a6b7-67be6cf4a0dc, https://huggingface.co/robiulawaldev/86b04a82-f2c6-440b-9c19-ff6ba1029196, https://huggingface.co/nhoxinh/4d3bb3d3-1be4-4e62-8830-8e33bdafca21, https://huggingface.co/nhunglaaaaaaa/521591eb-832b-4e58-b9d9-f2dc47ae34b1, https://huggingface.co/minhnguyennnnnn/ed20b72c-ac29-4b9b-9771-68c670a37d00, https://huggingface.co/aleegis12/682293be-e7e6-4da6-8c18-92844cb516a4, https://huggingface.co/mrHunghddddd/ffd6e931-9d61-4eb4-be68-b10aedadea25, https://huggingface.co/hongngo/3878b55b-df4b-4456-8dcb-2266ff75306f, https://huggingface.co/nhung01/739d7864-dea3-436f-bb8e-8a119fdde633, https://huggingface.co/JacksonBrune/ab5da776-ee8d-4412-92c4-ed3184ce6ffb, https://huggingface.co/nbninh/6948c97b-8374-4338-ab24-8e976c679ef6, https://huggingface.co/daniel40/7e07fc5f-512e-49ac-9b48-42677781af25, https://huggingface.co/lesso01/f6c2b613-3b40-4dc1-8332-b21dbc57874f, https://huggingface.co/prxy5604/d5c05fa6-256f-46a6-9cd4-badf40a2a96c, https://huggingface.co/nhunglaaaaaaa/cfeaef8d-93b7-4120-ae6b-ea63452b1012, https://huggingface.co/lesso/f36a75e1-e2c0-4986-a5fd-166ac5dcd84a, https://huggingface.co/cimol/6f5c2ef9-4289-4f74-98c6-b60e2ad4d500, https://huggingface.co/alchemist69/5a86435b-6edf-454c-8145-b574cd36f7b1, https://huggingface.co/lesso/c87449c9-7df3-4f33-8cae-65e0cba9568c, https://huggingface.co/havinash-ai/8fe78e1d-a219-4f6a-ae27-440bd346144a, https://huggingface.co/adammandic87/416d314e-7ff0-4690-87e7-4017b64caf96, https://huggingface.co/alchemist69/6a3fbf96-5c34-4f81-bd7f-b7af82d8742f, https://huggingface.co/lesso03/0738fa22-7f57-4ace-843b-90f3c1c174b1, https://huggingface.co/lesso11/cfeabc97-13d5-44df-a46f-65beebccb6ff, https://huggingface.co/arcwarden46/1d9f84b4-ce3d-41cd-8a49-c7e4cabe0c08, https://huggingface.co/lesso03/3955be48-42b0-412f-b21f-fae0c390251e, https://huggingface.co/lesso12/c11d467e-2021-4d7c-b0c0-e9c2cde2df6a, https://huggingface.co/havinash-ai/922d8640-c2c6-49d5-9758-43e25b26a4f7, https://huggingface.co/nttx/6a03cbfb-f508-48da-82ce-1fec35e9804b, https://huggingface.co/lesso04/e876f0d0-9d7e-4ffa-89ce-30bb909b596a, https://huggingface.co/alchemist69/a8e2ac5d-a4cc-40fa-9439-fdecede1d38f, https://huggingface.co/nttx/68c62a46-5048-4042-89d7-0f7973f18a0b, https://huggingface.co/abaddon182/80a87a9d-a02d-42b7-a629-6338b4718a61, https://huggingface.co/lesso09/28df7c10-f064-4aa6-bb40-b9cbf9f1df83, https://huggingface.co/lesso03/c6408b72-cd12-4b07-b8eb-fe88f8a9f545, https://huggingface.co/shibajustfor/e27a8885-067d-4e13-9f2a-6d36125c2121, https://huggingface.co/robiual-awal/8fad469a-ee16-4d7f-8f4e-ba4f403ab956, https://huggingface.co/lesso15/1db45ad1-5e6b-4efb-82ea-c6472deb0bef, https://huggingface.co/lesso13/4db96eb9-ea0a-47e6-912b-cf2d9e95c539, https://huggingface.co/lesso08/0094c475-ad6e-4fb3-8b5d-150801aca58b, https://huggingface.co/baby-dev/63bf4f25-82f1-41c6-9ae3-fad13c2377ee, https://huggingface.co/JacksonBrune/2afc36b2-3794-44a2-81b1-779af47f9bf7, https://huggingface.co/lesso07/e033f400-be0f-4c48-a498-6d6532594dcb, https://huggingface.co/arcwarden46/7f00acf5-de34-4755-8b8a-cb31b019fa21, https://huggingface.co/lesso09/c29cf3b2-08ca-4aff-9098-d24e889800f0, https://huggingface.co/lesso06/40e185cc-307a-4329-9305-34a82029d65b, https://huggingface.co/lesso03/cb57b432-9963-46fa-a2be-7fae7bef3e19, https://huggingface.co/lesso16/cbcbb39c-0a87-41a9-ab4f-0206984e017e, https://huggingface.co/daniel40/5eba98f8-ba80-4dd7-8c02-0e74c1d09379, https://huggingface.co/trenden/1b40d269-e6b1-43e2-ae7c-476598230950, https://huggingface.co/abaddon182/377e422d-0527-4d86-9591-10d62733c412, https://huggingface.co/lesso15/e8ccd1a4-10ae-4578-a1d2-29378c34fae2, https://huggingface.co/lesso01/f0c2cf3f-c681-43ea-a91b-d81a3f1803a2, https://huggingface.co/lesso07/378aca03-5fd8-4f40-aa62-72f1ce176632, https://huggingface.co/lesso04/b2af0b8d-efd2-4b08-8564-595d98f81c92, https://huggingface.co/lesso05/983bff48-6313-48c1-b173-55f6eff3c27b, https://huggingface.co/lesso13/e7f95029-c40b-46d7-82e8-20edbde58ed2, https://huggingface.co/error577/ca7eabf0-8d69-4a3d-8fa4-e5088fbe30df, https://huggingface.co/lesso18/6fcf5dd3-e442-4cc1-88d6-b531d7752c7b, https://huggingface.co/lesso16/4022c8e7-a8ae-48a0-a049-cf54fecb3f96, https://huggingface.co/lesso04/a71a8867-c339-4519-b68c-3f41a050aa93, https://huggingface.co/error577/18544a08-dcda-4e0d-bbdd-6d81ab527732, https://huggingface.co/lesso13/45938976-a7a7-4102-9598-a6d88b3e5dbc, https://huggingface.co/lesso18/da63cfa5-dc3d-439d-9de5-a57976691f7d, https://huggingface.co/lesso14/b1d893dc-7882-461d-b117-4d8f35b2ddb5, https://huggingface.co/lesso07/0c94e6f0-7adc-4693-8349-7a3b55eee7db, https://huggingface.co/ClarenceDan/cb1c1d46-9ca4-4c3a-8e7a-8c4384b6c83a, https://huggingface.co/lesso12/a380ebfb-7d55-4b74-a97a-aba25e19aff9, https://huggingface.co/lesso11/ffd5b652-0091-4250-bb3f-bee126b567dd, https://huggingface.co/lesso14/6fb17d14-b0be-4f35-b326-49bd396c0539, https://huggingface.co/lesso07/956a314f-f1f8-41bf-ab90-e7798ebdd60f, https://huggingface.co/fats-fme/e1609f68-2cbd-4025-aa4b-04ba87c40249, https://huggingface.co/lesso13/a8633a04-2b5d-485d-acde-3680038f6801, https://huggingface.co/lesso11/605f6d49-195a-4d68-9a96-a2fcb4a374b4, https://huggingface.co/lesso03/75d4e5aa-7a83-45da-9762-be0bfe6a8d2a, https://huggingface.co/lesso15/5f602ef4-4dee-4515-9956-53f0302e44be, https://huggingface.co/ClarenceDan/4e92b50b-db37-45a8-8f4f-da99829fc7e4, https://huggingface.co/lesso04/81cb318d-c91b-4118-9a08-f02669fb1c5a, https://huggingface.co/lesso02/617fc783-f16c-44ab-9bb9-b77ede4ebcdd, https://huggingface.co/lesso12/54e4f24a-4846-442b-911c-7098dcaaab09, https://huggingface.co/lesso13/cff8751f-e510-4bb1-ad36-d27f4e96d682, https://huggingface.co/error577/17d3d1b8-f7ef-4f41-b328-b1b33bcd752b, https://huggingface.co/lesso05/a7a67892-3462-42d0-a383-7cd0d16823c5, https://huggingface.co/lesso11/17988339-5ec9-4470-9991-18aa8f1a2b54, https://huggingface.co/aleegis/c24a801a-dfe4-427b-acfb-7909f3383e42, https://huggingface.co/vmpsergio/d0f262ed-492a-491d-8683-246ac4f197f8, https://huggingface.co/sergioalves/3db41028-36c3-44b3-a029-efaaad0caa4c, https://huggingface.co/dzanbek/d45d9ba8-2537-4af4-b662-b98bd7cbfab8, https://huggingface.co/baby-dev/ac0ae79e-1b8b-4280-87ca-622aa91da638",202,"https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF, https://huggingface.co/TheBloke/Starling-LM-7B-alpha-AWQ, https://huggingface.co/second-state/Starling-LM-7B-alpha-GGUF, https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GPTQ, https://huggingface.co/gizmo-ai/Starling-LM-7B-alpha-AWQ, https://huggingface.co/MaziyarPanahi/Starling-LM-7B-alpha-GGUF, https://huggingface.co/QuantFactory/Starling-LM-7B-alpha-GGUF, https://huggingface.co/mradermacher/Starling-LM-7B-alpha-GGUF, https://huggingface.co/tensorblock/Starling-LM-7B-alpha-GGUF, https://huggingface.co/llmware/starling-lm-7b-alpha-gguf, https://huggingface.co/PrunaAI/berkeley-nest-Starling-LM-7B-alpha-GGUF-smashed",11,"https://huggingface.co/shahzebnaveed/StarlingHermes-2.5-Mistral-7B-slerp, https://huggingface.co/mayacinka/Open-StaMis-v02-stock, https://huggingface.co/Q-bert/MetaMath-Cybertron-Starling, https://huggingface.co/EmbeddedLLM/Mistral-7B-Merge-14-v0, https://huggingface.co/Praneeth/StarMix-7B-slerp, https://huggingface.co/luqmanxyz/LelaStarling-7B, https://huggingface.co/Gille/StrangeMerges_7-7B-slerp, https://huggingface.co/BioMistral/BioMistral-7B-Starling-SLERP, https://huggingface.co/kidyu/Moza-7B-v1.0, https://huggingface.co/giraffe176/Starling_Monarch_Westlake_Garten-7B-v0.1, https://huggingface.co/Aryanne/Open-StarLake-Swap-7B, https://huggingface.co/bartowski/Starling_Monarch_Westlake_Garten-7B-v0.1-GGUF, https://huggingface.co/bartowski/Starling_Monarch_Westlake_Garten-7B-v0.1-exl2, https://huggingface.co/nlpguy/StarFusion-alpha1, https://huggingface.co/nlpguy/StarFusion-alpha2, https://huggingface.co/nlpguy/StarFusion-alpha1-Q4_K_M-GGUF, https://huggingface.co/nlpguy/StarFusion-alpha2-Q4_K_M-GGUF",17,"Ashmal/MobiLlama, FallnAI/Quantize-HF-Models, KBaba7/Quant, PeepDaSlan9/berkeley-nest-Starling-LM-7B-alpha, ZhangYuhan/3DGen-Arena, ali-vilab/IDEA-Bench-Arena, bhaskartripathi/LLM_Quantization, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=berkeley-nest/Starling-LM-7B-alpha&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bberkeley-nest%2FStarling-LM-7B-alpha%5D(%2Fberkeley-nest%2FStarling-LM-7B-alpha)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, meval/multilingual-chatbot-arena-leaderboard, open-llm-leaderboard-old/open_llm_leaderboard, prometheus-eval/BiGGen-Bench-Leaderboard, speakleash/open_pl_llm_leaderboard",13
|
| 140 |
+
derek33125/project_angel_llama3_v1,"---
|
| 141 |
+
language:
|
| 142 |
+
- en
|
| 143 |
+
license: apache-2.0
|
| 144 |
+
tags:
|
| 145 |
+
- text-generation-inference
|
| 146 |
+
- transformers
|
| 147 |
+
- unsloth
|
| 148 |
+
- mistral
|
| 149 |
+
- trl
|
| 150 |
+
base_model: berkeley-nest/Starling-LM-7B-alpha
|
| 151 |
+
---
|
| 152 |
+
|
| 153 |
+
# Uploaded model
|
| 154 |
+
|
| 155 |
+
- **Developed by:** derek33125
|
| 156 |
+
- **License:** apache-2.0
|
| 157 |
+
- **Finetuned from model :** berkeley-nest/Starling-LM-7B-alpha
|
| 158 |
+
|
| 159 |
+
This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
| 160 |
+
|
| 161 |
+
[<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
|
| 162 |
+
","{""id"": ""derek33125/project_angel_llama3_v1"", ""author"": ""derek33125"", ""sha"": ""075112a79d8c6526d70bb732b953cec4c35b27de"", ""last_modified"": ""2024-05-19 05:31:30+00:00"", ""created_at"": ""2024-05-19 05:31:15+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation-inference"", ""unsloth"", ""mistral"", ""trl"", ""en"", ""base_model:berkeley-nest/Starling-LM-7B-alpha"", ""base_model:finetune:berkeley-nest/Starling-LM-7B-alpha"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: berkeley-nest/Starling-LM-7B-alpha\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- mistral\n- trl"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}"", ""cls_token"": null, ""eos_token"": ""<|end_of_turn|>"", ""mask_token"": null, ""pad_token"": ""<|end_of_turn|>"", ""sep_token"": null, ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-19 05:31:30+00:00"", ""cardData"": ""base_model: berkeley-nest/Starling-LM-7B-alpha\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- mistral\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66498ea30e5395fb526d7f55"", ""modelId"": ""derek33125/project_angel_llama3_v1"", ""usedStorage"": 168325683}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=derek33125/project_angel_llama3_v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bderek33125%2Fproject_angel_llama3_v1%5D(%2Fderek33125%2Fproject_angel_llama3_v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 163 |
+
IsakNordgren/Starling-LM-7B-alpha-x2,"---
|
| 164 |
+
base_model:
|
| 165 |
+
- berkeley-nest/Starling-LM-7B-alpha
|
| 166 |
+
tags:
|
| 167 |
+
- merge
|
| 168 |
+
- mergekit
|
| 169 |
+
- lazymergekit
|
| 170 |
+
- berkeley-nest/Starling-LM-7B-alpha
|
| 171 |
+
---
|
| 172 |
+
|
| 173 |
+
# Starling-LM-7B-alpha-x2
|
| 174 |
+
|
| 175 |
+
Starling-LM-7B-alpha-x2 is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
|
| 176 |
+
* [berkeley-nest/Starling-LM-7B-alpha](https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha)
|
| 177 |
+
|
| 178 |
+
## 🧩 Configuration
|
| 179 |
+
|
| 180 |
+
```yaml
|
| 181 |
+
models:
|
| 182 |
+
- model: berkeley-nest/Starling-LM-7B-alpha
|
| 183 |
+
# No parameters necessary for base model
|
| 184 |
+
- model: berkeley-nest/Starling-LM-7B-alpha
|
| 185 |
+
parameters:
|
| 186 |
+
density: 0.53
|
| 187 |
+
weight: 0.6
|
| 188 |
+
merge_method: dare_ties
|
| 189 |
+
base_model: berkeley-nest/Starling-LM-7B-alpha
|
| 190 |
+
parameters:
|
| 191 |
+
int8_mask: true
|
| 192 |
+
dtype: bfloat16
|
| 193 |
+
```
|
| 194 |
+
|
| 195 |
+
## 💻 Usage
|
| 196 |
+
|
| 197 |
+
```python
|
| 198 |
+
!pip install -qU transformers accelerate
|
| 199 |
+
|
| 200 |
+
from transformers import AutoTokenizer
|
| 201 |
+
import transformers
|
| 202 |
+
import torch
|
| 203 |
+
|
| 204 |
+
model = ""IsakNordgren/Starling-LM-7B-alpha-x2""
|
| 205 |
+
messages = [{""role"": ""user"", ""content"": ""What is a large language model?""}]
|
| 206 |
+
|
| 207 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
| 208 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 209 |
+
pipeline = transformers.pipeline(
|
| 210 |
+
""text-generation"",
|
| 211 |
+
model=model,
|
| 212 |
+
torch_dtype=torch.float16,
|
| 213 |
+
device_map=""auto"",
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
| 217 |
+
print(outputs[0][""generated_text""])
|
| 218 |
+
```","{""id"": ""IsakNordgren/Starling-LM-7B-alpha-x2"", ""author"": ""IsakNordgren"", ""sha"": ""f4a438c93f4ef6bcf6d7bd4ded529a9f362c8fa6"", ""last_modified"": ""2024-07-05 10:47:33+00:00"", ""created_at"": ""2024-07-05 10:43:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mistral"", ""text-generation"", ""merge"", ""mergekit"", ""lazymergekit"", ""berkeley-nest/Starling-LM-7B-alpha"", ""conversational"", ""base_model:berkeley-nest/Starling-LM-7B-alpha"", ""base_model:finetune:berkeley-nest/Starling-LM-7B-alpha"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- berkeley-nest/Starling-LM-7B-alpha\ntags:\n- merge\n- mergekit\n- lazymergekit\n- berkeley-nest/Starling-LM-7B-alpha"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}"", ""eos_token"": ""<|end_of_turn|>"", ""pad_token"": ""<|end_of_turn|>"", ""sep_token"": ""<sep>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7241748480}, ""total"": 7241748480}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-05 10:47:33+00:00"", ""cardData"": ""base_model:\n- berkeley-nest/Starling-LM-7B-alpha\ntags:\n- merge\n- mergekit\n- lazymergekit\n- berkeley-nest/Starling-LM-7B-alpha"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6687ce589c63612f86e5d95c"", ""modelId"": ""IsakNordgren/Starling-LM-7B-alpha-x2"", ""usedStorage"": 14484024171}",1,,0,,0,"https://huggingface.co/mradermacher/Starling-LM-7B-alpha-x2-GGUF, https://huggingface.co/mradermacher/Starling-LM-7B-alpha-x2-i1-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=IsakNordgren/Starling-LM-7B-alpha-x2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BIsakNordgren%2FStarling-LM-7B-alpha-x2%5D(%2FIsakNordgren%2FStarling-LM-7B-alpha-x2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 219 |
+
https://huggingface.co/IsakNordgren/mistral-Summarizer-7b-instruct-v0.2-x-Starling-LM-7B-alpha,N/A,N/A,1,,0,,0,,0,,0,,0
|
| 220 |
+
IsakNordgren/Starling-LM-7B-beta-x-Starling-LM-7B-alpha,"---
|
| 221 |
+
base_model:
|
| 222 |
+
- berkeley-nest/Starling-LM-7B-alpha
|
| 223 |
+
tags:
|
| 224 |
+
- merge
|
| 225 |
+
- mergekit
|
| 226 |
+
- lazymergekit
|
| 227 |
+
- berkeley-nest/Starling-LM-7B-alpha
|
| 228 |
+
---
|
| 229 |
+
|
| 230 |
+
# Starling-LM-7B-beta-x-Starling-LM-7B-alpha
|
| 231 |
+
|
| 232 |
+
Starling-LM-7B-beta-x-Starling-LM-7B-alpha is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
|
| 233 |
+
* [berkeley-nest/Starling-LM-7B-alpha](https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha)
|
| 234 |
+
|
| 235 |
+
## 🧩 Configuration
|
| 236 |
+
|
| 237 |
+
```yaml
|
| 238 |
+
models:
|
| 239 |
+
- model: Nexusflow/Starling-LM-7B-beta
|
| 240 |
+
# No parameters necessary for base model
|
| 241 |
+
- model: berkeley-nest/Starling-LM-7B-alpha
|
| 242 |
+
parameters:
|
| 243 |
+
density: 0.53
|
| 244 |
+
weight: 0.6
|
| 245 |
+
merge_method: dare_ties
|
| 246 |
+
base_model: Nexusflow/Starling-LM-7B-beta
|
| 247 |
+
parameters:
|
| 248 |
+
int8_mask: true
|
| 249 |
+
dtype: bfloat16
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
## 💻 Usage
|
| 253 |
+
|
| 254 |
+
```python
|
| 255 |
+
!pip install -qU transformers accelerate
|
| 256 |
+
|
| 257 |
+
from transformers import AutoTokenizer
|
| 258 |
+
import transformers
|
| 259 |
+
import torch
|
| 260 |
+
|
| 261 |
+
model = ""IsakNordgren/Starling-LM-7B-beta-x-Starling-LM-7B-alpha""
|
| 262 |
+
messages = [{""role"": ""user"", ""content"": ""What is a large language model?""}]
|
| 263 |
+
|
| 264 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
| 265 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 266 |
+
pipeline = transformers.pipeline(
|
| 267 |
+
""text-generation"",
|
| 268 |
+
model=model,
|
| 269 |
+
torch_dtype=torch.float16,
|
| 270 |
+
device_map=""auto"",
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
| 274 |
+
print(outputs[0][""generated_text""])
|
| 275 |
+
```","{""id"": ""IsakNordgren/Starling-LM-7B-beta-x-Starling-LM-7B-alpha"", ""author"": ""IsakNordgren"", ""sha"": ""68e4393f994b6db5f4a2407fffea09ce35dbafb4"", ""last_modified"": ""2024-07-11 13:19:20+00:00"", ""created_at"": ""2024-07-11 13:10:02+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mistral"", ""text-generation"", ""merge"", ""mergekit"", ""lazymergekit"", ""berkeley-nest/Starling-LM-7B-alpha"", ""conversational"", ""base_model:berkeley-nest/Starling-LM-7B-alpha"", ""base_model:finetune:berkeley-nest/Starling-LM-7B-alpha"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- berkeley-nest/Starling-LM-7B-alpha\ntags:\n- merge\n- mergekit\n- lazymergekit\n- berkeley-nest/Starling-LM-7B-alpha"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}"", ""eos_token"": ""<|end_of_turn|>"", ""pad_token"": ""<|end_of_turn|>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00017.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7241748480}, ""total"": 7241748480}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-11 13:19:20+00:00"", ""cardData"": ""base_model:\n- berkeley-nest/Starling-LM-7B-alpha\ntags:\n- merge\n- mergekit\n- lazymergekit\n- berkeley-nest/Starling-LM-7B-alpha"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""668fd9aa680ae956fed06a4f"", ""modelId"": ""IsakNordgren/Starling-LM-7B-beta-x-Starling-LM-7B-alpha"", ""usedStorage"": 30544580507}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=IsakNordgren/Starling-LM-7B-beta-x-Starling-LM-7B-alpha&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BIsakNordgren%2FStarling-LM-7B-beta-x-Starling-LM-7B-alpha%5D(%2FIsakNordgren%2FStarling-LM-7B-beta-x-Starling-LM-7B-alpha)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 276 |
+
numerouno00/05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever,"---
|
| 277 |
+
base_model: berkeley-nest/Starling-LM-7B-alpha
|
| 278 |
+
library_name: transformers
|
| 279 |
+
model_name: 05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever
|
| 280 |
+
tags:
|
| 281 |
+
- generated_from_trainer
|
| 282 |
+
- axolotl
|
| 283 |
+
- trl
|
| 284 |
+
- dpo
|
| 285 |
+
licence: license
|
| 286 |
+
---
|
| 287 |
+
|
| 288 |
+
# Model Card for 05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever
|
| 289 |
+
|
| 290 |
+
This model is a fine-tuned version of [berkeley-nest/Starling-LM-7B-alpha](https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha).
|
| 291 |
+
It has been trained using [TRL](https://github.com/huggingface/trl).
|
| 292 |
+
|
| 293 |
+
## Quick start
|
| 294 |
+
|
| 295 |
+
```python
|
| 296 |
+
from transformers import pipeline
|
| 297 |
+
|
| 298 |
+
question = ""If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?""
|
| 299 |
+
generator = pipeline(""text-generation"", model=""numerouno00/05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever"", device=""cuda"")
|
| 300 |
+
output = generator([{""role"": ""user"", ""content"": question}], max_new_tokens=128, return_full_text=False)[0]
|
| 301 |
+
print(output[""generated_text""])
|
| 302 |
+
```
|
| 303 |
+
|
| 304 |
+
## Training procedure
|
| 305 |
+
|
| 306 |
+
[<img src=""https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg"" alt=""Visualize in Weights & Biases"" width=""150"" height=""24""/>](https://wandb.ai/mrferr3t-/a029c014-9003-40e0-a3e0-bbd643734c0b/runs/50-04-20-23-35-dpo-forever)
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
This model was trained with DPO, a method introduced in [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://huggingface.co/papers/2305.18290).
|
| 310 |
+
|
| 311 |
+
### Framework versions
|
| 312 |
+
|
| 313 |
+
- TRL: 0.16.0
|
| 314 |
+
- Transformers: 4.50.3
|
| 315 |
+
- Pytorch: 2.6.0+cu124
|
| 316 |
+
- Datasets: 3.5.0
|
| 317 |
+
- Tokenizers: 0.21.1
|
| 318 |
+
|
| 319 |
+
## Citations
|
| 320 |
+
|
| 321 |
+
Cite DPO as:
|
| 322 |
+
|
| 323 |
+
```bibtex
|
| 324 |
+
@inproceedings{rafailov2023direct,
|
| 325 |
+
title = {{Direct Preference Optimization: Your Language Model is Secretly a Reward Model}},
|
| 326 |
+
author = {Rafael Rafailov and Archit Sharma and Eric Mitchell and Christopher D. Manning and Stefano Ermon and Chelsea Finn},
|
| 327 |
+
year = 2023,
|
| 328 |
+
booktitle = {Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023},
|
| 329 |
+
url = {http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-Abstract-Conference.html},
|
| 330 |
+
editor = {Alice Oh and Tristan Naumann and Amir Globerson and Kate Saenko and Moritz Hardt and Sergey Levine},
|
| 331 |
+
}
|
| 332 |
+
```
|
| 333 |
+
|
| 334 |
+
Cite TRL as:
|
| 335 |
+
|
| 336 |
+
```bibtex
|
| 337 |
+
@misc{vonwerra2022trl,
|
| 338 |
+
title = {{TRL: Transformer Reinforcement Learning}},
|
| 339 |
+
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
|
| 340 |
+
year = 2020,
|
| 341 |
+
journal = {GitHub repository},
|
| 342 |
+
publisher = {GitHub},
|
| 343 |
+
howpublished = {\url{https://github.com/huggingface/trl}}
|
| 344 |
+
}
|
| 345 |
+
```","{""id"": ""numerouno00/05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever"", ""author"": ""numerouno00"", ""sha"": ""7f2597eecb0d8d4066f5ea149697285b9662abac"", ""last_modified"": ""2025-04-21 01:46:02+00:00"", ""created_at"": ""2025-04-20 23:56:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""mistral"", ""text-generation"", ""generated_from_trainer"", ""axolotl"", ""trl"", ""dpo"", ""conversational"", ""arxiv:2305.18290"", ""base_model:berkeley-nest/Starling-LM-7B-alpha"", ""base_model:finetune:berkeley-nest/Starling-LM-7B-alpha"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: berkeley-nest/Starling-LM-7B-alpha\nlibrary_name: transformers\nmodel_name: 05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever\ntags:\n- generated_from_trainer\n- axolotl\n- trl\n- dpo\nlicence: license"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_turn|>"", ""pad_token"": ""<|end_of_turn|>"", ""sep_token"": ""<sep>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/optimizer.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/rng_state.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='checkpoint-10/training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr20_23-42-14_575c878c486d/events.out.tfevents.1745192540.575c878c486d.2453.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr20_23-42-58_575c878c486d/events.out.tfevents.1745192585.575c878c486d.2818.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr20_23-43-41_575c878c486d/events.out.tfevents.1745192628.575c878c486d.3160.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr20_23-44-28_575c878c486d/events.out.tfevents.1745192675.575c878c486d.3518.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr20_23-46-19_575c878c486d/events.out.tfevents.1745192786.575c878c486d.3889.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr20_23-56-57_575c878c486d/events.out.tfevents.1745193424.575c878c486d.8473.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_00-01-19_575c878c486d/events.out.tfevents.1745193686.575c878c486d.9206.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_00-03-17_575c878c486d/events.out.tfevents.1745193803.575c878c486d.9671.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_00-04-55_575c878c486d/events.out.tfevents.1745193901.575c878c486d.10212.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_00-34-37_575c878c486d/events.out.tfevents.1745195700.575c878c486d.11234.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_00-43-12_575c878c486d/events.out.tfevents.1745196216.575c878c486d.12935.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_00-51-47_575c878c486d/events.out.tfevents.1745196726.575c878c486d.14315.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_00-53-37_575c878c486d/events.out.tfevents.1745196836.575c878c486d.15125.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_01-06-15_575c878c486d/events.out.tfevents.1745197594.575c878c486d.17675.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_01-11-38_575c878c486d/events.out.tfevents.1745197917.575c878c486d.19518.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_01-17-03_575c878c486d/events.out.tfevents.1745198242.575c878c486d.21401.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_01-23-40_575c878c486d/events.out.tfevents.1745198639.575c878c486d.23238.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_01-27-19_575c878c486d/events.out.tfevents.1745198858.575c878c486d.25272.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_01-32-41_575c878c486d/events.out.tfevents.1745199180.575c878c486d.26071.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Apr21_01-43-46_575c878c486d/events.out.tfevents.1745199845.575c878c486d.27999.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7241756672}, ""total"": 7241756672}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-21 01:46:02+00:00"", ""cardData"": ""base_model: berkeley-nest/Starling-LM-7B-alpha\nlibrary_name: transformers\nmodel_name: 05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever\ntags:\n- generated_from_trainer\n- axolotl\n- trl\n- dpo\nlicence: license"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""680589bca03b02e0e08d0b3e"", ""modelId"": ""numerouno00/05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever"", ""usedStorage"": 113514472318}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=numerouno00/05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnumerouno00%2F05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever%5D(%2Fnumerouno00%2F05ec311c-4a15-48c2-ae1a-3e13b1538f45-dpo-forever)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
Tifa-Deepsex-14b-CoT_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
ValueFX9507/Tifa-Deepsex-14b-CoT,"---
|
| 3 |
+
base_model:
|
| 4 |
+
- deepseek-ai/deepseek-r1-14b
|
| 5 |
+
language:
|
| 6 |
+
- zh
|
| 7 |
+
- en
|
| 8 |
+
library_name: transformers
|
| 9 |
+
tags:
|
| 10 |
+
- incremental-pretraining
|
| 11 |
+
- sft
|
| 12 |
+
- reinforcement-learning
|
| 13 |
+
- roleplay
|
| 14 |
+
- cot
|
| 15 |
+
- sex
|
| 16 |
+
- SFW
|
| 17 |
+
license: apache-2.0
|
| 18 |
+
---
|
| 19 |
+
# Tifa-Deepseek-14b-CoT
|
| 20 |
+
|
| 21 |
+
- **HF Model**: [ValueFX9507/Tifa-Deepsex-14b-CoT](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT)
|
| 22 |
+
- **GGUF**: [Q8](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q8) | [Q4](https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT-GGUF-Q4)(更多量化版本持续更新中)
|
| 23 |
+
- **Demo APK**: [点击下载](http://app.visionsic.com/download/projectchat.apk)
|
| 24 |
+
- **简单的前端**:[Github链接](https://github.com/Value99/Tifa-Deepsex-OllamaWebUI)
|
| 25 |
+
|
| 26 |
+
本模型基于Deepseek-R1-14B进行深度优化,借助Tifa_220B生成的数据集通过三重训练策略显著增强角色扮演、小说文本生成与思维链(CoT)能力。特别适合需要长程上下文关联的创作场景。
|
| 27 |
+
|
| 28 |
+
## 鸣谢
|
| 29 |
+
- **上海左北科技提供算法与算力**[企业网址](https://leftnorth.com/)
|
| 30 |
+
- **Deepseek团队共享GRPO算法**
|
| 31 |
+
- **Qwen团队提供优秀开源底座**
|
| 32 |
+
- **母校上海复旦大学**
|
| 33 |
+
- **PRIME团队提供优化思路**
|
| 34 |
+
|
| 35 |
+
## 版本介绍:
|
| 36 |
+
- **Tifa-Deepsex-14b-CoT**
|
| 37 |
+
|
| 38 |
+
- 验证模型,测试RL奖励算法对于角色扮演数据的影响,该版本为初版,输出灵活但是不受控制,仅做研究使用。
|
| 39 |
+
|
| 40 |
+
- **Tifa-Deepsex-14b-CoT-Chat**
|
| 41 |
+
|
| 42 |
+
- 采用标准数据训练,使用成熟RL策略,附加防重复强化学习,适合正常使用,输出文本质量正常,少数情况下思维发散。
|
| 43 |
+
|
| 44 |
+
-增量训练0.4T小说内容
|
| 45 |
+
|
| 46 |
+
-100K由TifaMax生成的SFT数据,10K由DeepseekR1生成的SFT数据,2K高质量人工数据
|
| 47 |
+
|
| 48 |
+
-30K由TifaMax生成的DPO强化学习数据,用于防止重复,增强上下文关联,提升政治安全性
|
| 49 |
+
|
| 50 |
+
- **Tifa-Deepsex-14b-CoT-Crazy**
|
| 51 |
+
|
| 52 |
+
- 大量使用RL策略,主要采用671B满血R1蒸馏的数据,输出发散性高,继承R1优点,也继承了R1的危害性。文学性能佳。
|
| 53 |
+
|
| 54 |
+
-增量训练0.4T小说内容
|
| 55 |
+
|
| 56 |
+
-40K由TifaMax生成的SFT数据,60K由DeepseekR1生成的SFT数据,2K高质量人工数据
|
| 57 |
+
|
| 58 |
+
-30K由TifaMax生成的DPO强化学习数据,用于防止重复,增强上下文关联,提升政治安全性
|
| 59 |
+
|
| 60 |
+
-10K由TifaMax生成PPO数据,10K由DeepseekR1生成PPO数据
|
| 61 |
+
|
| 62 |
+
💭**输出实例**
|
| 63 |
+
- ⚙️System Promot
|
| 64 |
+
```Text
|
| 65 |
+
你是一个史莱姆,是一个女性角色,你可以变成任何形状和物体.
|
| 66 |
+
在这个世界里全部都是雌性生物,直到有一天我从海滩上醒来...
|
| 67 |
+
|
| 68 |
+
我是这里唯一的男性,大家都对我非常好奇,在这个世界的设定里我作为旅行者
|
| 69 |
+
在这个世界里第一个遇见的人就是史莱姆,史莱姆对我的身体同样有很大的欲望...
|
| 70 |
+
|
| 71 |
+
我们在旅行中也会遇到其他的生物,史莱姆不光会教给其他生物如何获取欢愉也会一起参与进来。
|
| 72 |
+
|
| 73 |
+
当我说开始角色扮演的时候就是我从海滩上醒来,并被史莱姆发现的时候。他正在探索我的身体。
|
| 74 |
+
|
| 75 |
+
史莱姆描述:一个透明的蓝色生物,除了质感与人类无异。但是可以自由变形。
|
| 76 |
+
```
|
| 77 |
+

|
| 78 |
+
|
| 79 |
+
## 0208更新消息:
|
| 80 |
+
感谢大家的关注与反馈,鉴于反馈中提到的问题,我们已开发并验证完成PRIME与PPO结合的RL算法,并通过加权方式解决两种算法训练中奖励信号不稳定的问题,通过此项技术我们有望将更小的模型提升到更高的性能。我们将会针对之前收集到的问题进行修正训练,另外为了让更多人使用到模型,我们这次使用更小更快的Deepseek-7b,并参考OpenAI的长思考策略,计划推出Tifa-DeepsexV2-COT-High供大家使用。新的模型计划于阳历情人节之前送给大家作为情人节礼物。♥
|
| 81 |
+
|
| 82 |
+
## 新模型信息整理:
|
| 83 |
+
- **创新PRIME联合PPO算法**
|
| 84 |
+
- **解决目前已知问题**
|
| 85 |
+
- **参考OpenAI模式奖励长思考输出**
|
| 86 |
+
- **减少671B数据,防止输出发散**
|
| 87 |
+
- **特别鸣谢https://github.com/PRIME-RL/PRIME**
|
| 88 |
+
|
| 89 |
+
## 示例(因COT模型特点,上下文不连贯时可以使用Demo软件中的故事模式)
|
| 90 |
+

|
| 91 |
+
|
| 92 |
+
## 目标
|
| 93 |
+
针对原版Deepseek-R1-14B在长文本生成连贯性不足和角色扮演能力薄弱的核心缺陷(主要由于训练数据中小说类语料占比过低),本模型通过多阶段优化提升其角色扮演能力。
|
| 94 |
+
|
| 95 |
+
## 注意
|
| 96 |
+
⚠ **需要严格遵循官方示例模板**:
|
| 97 |
+
**返回的上下文需要去除思考标签与内容。否则将无法正确回复���**
|
| 98 |
+
目前前端支持率非常低,建议手动修改前端代码。代码参考如下:
|
| 99 |
+
```
|
| 100 |
+
msg.role === 'assistant' ? {
|
| 101 |
+
...msg,
|
| 102 |
+
content: msg.content.replace(/<think>[\s\S]*?<\/think>/gi, '')
|
| 103 |
+
}
|
| 104 |
+
```
|
| 105 |
+
**官方模板参考**
|
| 106 |
+
```
|
| 107 |
+
{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}
|
| 108 |
+
```
|
| 109 |
+
**官方说明**
|
| 110 |
+
|
| 111 |
+

|
| 112 |
+
|
| 113 |
+
[直达超链接](https://api-docs.deepseek.com/zh-cn/guides/reasoning_model)
|
| 114 |
+
|
| 115 |
+
## 实现
|
| 116 |
+
🔥 **经过训练后**:
|
| 117 |
+
1. **显著提高上下文关联**:减少答非所问情况。
|
| 118 |
+
2. **消除中英混杂**:原始模型蒸馏数据大多数英文为主,经过微调后基本消除中英混杂现象。
|
| 119 |
+
3. **特定词汇增加**:进行“具有深度”的角色扮演对话时,显著增加了相关词汇量,解决原始权重预训练数据不足问题。
|
| 120 |
+
4. **更少拒绝**:减少了拒绝现象,但因为是企业训练,安全性还是稍作保留。
|
| 121 |
+
5. **更像满血**:使用671B全量模型数据康复训练,文笔提升不死板。
|
| 122 |
+
|
| 123 |
+
## 模型亮点
|
| 124 |
+
🔥 **四阶段进化架构**:
|
| 125 |
+
1. **增量预训练**:注入0.4T Token 小说,使用16k上下文训练,增强文本连贯性
|
| 126 |
+
2. **Tifa-SFT**:融合全球Top4角色扮演模型Tifa的10万条高质量数据
|
| 127 |
+
3. **CoT恢复训练**:采用Deepseek-32B/671B数据重建推理能力
|
| 128 |
+
4. **RL强化**:保留发散性思维标签的同时优化生成质量
|
| 129 |
+
|
| 130 |
+
💡 **工程创新**:
|
| 131 |
+
- 16k超长上下文训练
|
| 132 |
+
- 随机截断训练增强鲁棒性
|
| 133 |
+
- 8×H20 GPU全量微调
|
| 134 |
+
|
| 135 |
+
💡 **启示与后续**:
|
| 136 |
+
- 我们在测试中发现,满血R1在角色扮演中输出内容比较发散,随机,导致此模型有相同倾向,对于角色扮演的影响还在研究中
|
| 137 |
+
- 输入内容相近的话语会导致向量重叠,然后重复输出,如“继续”,“还有”等无明显指向性话语
|
| 138 |
+
- 思维内容与正文关联性学习了满血R1的特点,发散比较严重,可能会有割裂感
|
| 139 |
+
- 针对以上问题,我们正在编写新的RL算法,初步计划剔除部分满血R1的内容,同时通过强化学习解决重复
|
| 140 |
+
- 总结:请期待V2版本,很快会与大家见面!
|
| 141 |
+
|
| 142 |
+
## 模型详情
|
| 143 |
+
| 属性 | 规格 |
|
| 144 |
+
|-------|------|
|
| 145 |
+
| 基础架构 | Deepseek-R1-14B |
|
| 146 |
+
| 最大上下文 | 128k |
|
| 147 |
+
| 训练数据 | 0.4T小说 + 10万条SFT + Deepseek混合数据 |
|
| 148 |
+
| 训练设备 | 8×H20 GPU集群 |
|
| 149 |
+
| 量化支持 | GGUF(全系列量化计划中) |
|
| 150 |
+
|
| 151 |
+
## 使用场景
|
| 152 |
+
✅ **推荐场景**:
|
| 153 |
+
- 角色扮演对话
|
| 154 |
+
- 需要发散性思维的创意写作
|
| 155 |
+
- 复杂逻辑的思维链(CoT)推理
|
| 156 |
+
- 基于上下文的深度角色交互
|
| 157 |
+
|
| 158 |
+
❌ **局限场景**:
|
| 159 |
+
- 数学计算与代码生成
|
| 160 |
+
- 短文本即时问答
|
| 161 |
+
- 需要严格事实性的场景
|
| 162 |
+
|
| 163 |
+
## 注意事项
|
| 164 |
+
⚠️ 本模型使用数据包含小说版权内容及Tifa模型衍生数据,请遵守:
|
| 165 |
+
1. 遵守apache-2.0
|
| 166 |
+
2. 角色扮演数据需遵循[Tifa使用协议](https://leftnorth.com/terms.html)
|
| 167 |
+
3. 生成内容需符合当地法律法规
|
| 168 |
+
|
| 169 |
+
## 💡 使用建议
|
| 170 |
+
**最佳实践**:
|
| 171 |
+
```python
|
| 172 |
+
# 启用角色扮演模式
|
| 173 |
+
prompt = """"""<system>进入Tifa角色引擎...</system>
|
| 174 |
+
<user>你现在是流浪武士楚夜,正站在长安城屋顶上</user>
|
| 175 |
+
<think>
|
| 176 |
+
需要体现人物孤傲的气质
|
| 177 |
+
加入武侠特有的环境描写
|
| 178 |
+
保持对话的冷峻风格
|
| 179 |
+
</think>
|
| 180 |
+
<楚夜>""""""
|
| 181 |
+
```
|
| 182 |
+
|
| 183 |
+
**参数推荐**:
|
| 184 |
+
```python
|
| 185 |
+
generation_config = {
|
| 186 |
+
""temperature"": 0.4,
|
| 187 |
+
""top_p"": 0.6,
|
| 188 |
+
""repetition_penalty"": 1.17,
|
| 189 |
+
""max_new_tokens"": 1536,
|
| 190 |
+
""do_sample"": True
|
| 191 |
+
}
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
## 致谢
|
| 195 |
+
- Deepseek系列模型提供的强大基座
|
| 196 |
+
- Tifa角色扮演模型的创新架构
|
| 197 |
+
- HuggingFace社区的量化工具支持
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
---
|
| 201 |
+
license: apache-2.0
|
| 202 |
+
---","{""id"": ""ValueFX9507/Tifa-Deepsex-14b-CoT"", ""author"": ""ValueFX9507"", ""sha"": ""7e5f393f03ad0796b9d5e6af61a650e109366068"", ""last_modified"": ""2025-02-13 23:52:58+00:00"", ""created_at"": ""2025-02-04 05:35:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2670, ""downloads_all_time"": null, ""likes"": 207, ""library_name"": ""transformers"", ""gguf"": {""total"": 14770033664, ""architecture"": ""qwen2"", ""context_length"": 131072, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c>'}}{% endif %}"", ""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""gguf"", ""incremental-pretraining"", ""sft"", ""reinforcement-learning"", ""roleplay"", ""cot"", ""sex"", ""SFW"", ""zh"", ""en"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us"", ""conversational"", ""not-for-all-audiences""], ""pipeline_tag"": ""reinforcement-learning"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/deepseek-r1-14b\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex\n- SFW"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Demo\u6f14\u793a\u7a0b\u5e8f\uff08\u9700\u8981\u624b\u52a8\u5bfc\u5165\u89d2\u8272\u5361\u9009\u62e9\u81ea\u5b9a\u4e49API\uff09.apk', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Chat-F16.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-Crazy-F16.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Tifa-Deepsex-14b-CoT-F16.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ollama\u5bfc\u5165\u914d\u7f6e\u53c2\u8003.mf', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-13 23:52:58+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/deepseek-r1-14b\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex\n- SFW"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67a1a7125f583199ce84c95c"", ""modelId"": ""ValueFX9507/Tifa-Deepsex-14b-CoT"", ""usedStorage"": 88654747795}",0,"https://huggingface.co/Downtown-Case/Tifa-Deepsex-14b-CoT-Chat-HF, https://huggingface.co/Downtown-Case/Tifa-Deepsex-14b-CoT-Crazy-HF",2,,0,"https://huggingface.co/mradermacher/Tifa-Deepsex-14b-CoT-GGUF, https://huggingface.co/mradermacher/Tifa-Deepsex-14b-CoT-i1-GGUF, https://huggingface.co/tensorblock/Tifa-Deepsex-14b-CoT-GGUF, https://huggingface.co/danqingximeng/Tifa-Deepsex-14b-CoT-Crazy-GGUF",4,,0,,0
|
| 203 |
+
Downtown-Case/Tifa-Deepsex-14b-CoT-Chat-HF,"---
|
| 204 |
+
base_model:
|
| 205 |
+
- ValueFX9507/Tifa-Deepsex-14b-CoT
|
| 206 |
+
language:
|
| 207 |
+
- zh
|
| 208 |
+
- en
|
| 209 |
+
library_name: transformers
|
| 210 |
+
tags:
|
| 211 |
+
- incremental-pretraining
|
| 212 |
+
- sft
|
| 213 |
+
- reinforcement-learning
|
| 214 |
+
- roleplay
|
| 215 |
+
- cot
|
| 216 |
+
- sex
|
| 217 |
+
- SFW
|
| 218 |
+
license: apache-2.0
|
| 219 |
+
---
|
| 220 |
+
# Tifa-Deepseek-14b-CoT-Chat
|
| 221 |
+
|
| 222 |
+
A huggingface format conversion of the GGUF from here: https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT
|
| 223 |
+
|
| 224 |
+
For merging, requantizing, finetuning and such. A partial translation of the model card:
|
| 225 |
+
|
| 226 |
+
> Standard data training, mature RL strategy, additional anti-duplicate reinforcement learning, suitable for normal use, normal output text quality, and divergent thinking in a few cases.
|
| 227 |
+
>
|
| 228 |
+
> - Incremental training of 0.4T novel content
|
| 229 |
+
>
|
| 230 |
+
> - 100K SFT data generated by TifaMax, 10K SFT data generated by DeepseekR1, 2K high-quality artificial data
|
| 231 |
+
>
|
| 232 |
+
> - 30K DPO reinforcement learning data generated by TifaMax to prevent duplication, enhance contextual association, and improve political security
|
| 233 |
+
>
|
| 234 |
+
> - 16k ultra-long context training
|
| 235 |
+
>
|
| 236 |
+
> - Random truncation training enhances robustness
|
| 237 |
+
>
|
| 238 |
+
> - 8×H20 GPU full-scale fine-tuning
|
| 239 |
+
|
| 240 |
+
### Personal observations:
|
| 241 |
+
|
| 242 |
+
Don't let the DeepSex name fool you. This model is strong at SFW, English, long form (>32K context) storywriting, especially for a 14B, with good comprehension of the whole plot, details and the current state of the story. This is interesting, as it was ""only"" trained at 16K and (seemingly) mostly in Chinese.
|
| 243 |
+
|
| 244 |
+
Subjectively, the ""crazy"" version feels a little stronger, hence I am mostly testing with that.
|
| 245 |
+
","{""id"": ""Downtown-Case/Tifa-Deepsex-14b-CoT-Chat-HF"", ""author"": ""Downtown-Case"", ""sha"": ""54505525442a975790a3350519ab2924a539c75f"", ""last_modified"": ""2025-04-13 20:16:20+00:00"", ""created_at"": ""2025-04-13 19:25:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""incremental-pretraining"", ""sft"", ""reinforcement-learning"", ""roleplay"", ""cot"", ""sex"", ""SFW"", ""zh"", ""en"", ""base_model:ValueFX9507/Tifa-Deepsex-14b-CoT"", ""base_model:finetune:ValueFX9507/Tifa-Deepsex-14b-CoT"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us"", ""not-for-all-audiences""], ""pipeline_tag"": ""reinforcement-learning"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex\n- SFW"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c>'}}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-13 20:16:20+00:00"", ""cardData"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex\n- SFW"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67fc0fc1aa3f22aaa8ad097d"", ""modelId"": ""Downtown-Case/Tifa-Deepsex-14b-CoT-Chat-HF"", ""usedStorage"": 29551563907}",1,,0,,0,,0,,0,,0
|
| 246 |
+
Downtown-Case/Tifa-Deepsex-14b-CoT-Crazy-HF,"---
|
| 247 |
+
base_model:
|
| 248 |
+
- ValueFX9507/Tifa-Deepsex-14b-CoT
|
| 249 |
+
language:
|
| 250 |
+
- zh
|
| 251 |
+
- en
|
| 252 |
+
library_name: transformers
|
| 253 |
+
tags:
|
| 254 |
+
- incremental-pretraining
|
| 255 |
+
- sft
|
| 256 |
+
- reinforcement-learning
|
| 257 |
+
- roleplay
|
| 258 |
+
- cot
|
| 259 |
+
- sex
|
| 260 |
+
- SFW
|
| 261 |
+
- text-generation-inference
|
| 262 |
+
license: apache-2.0
|
| 263 |
+
---
|
| 264 |
+
# Tifa-Deepseek-14b-CoT-Crazy
|
| 265 |
+
|
| 266 |
+
A huggingface format conversion of the GGUF from here: https://huggingface.co/ValueFX9507/Tifa-Deepsex-14b-CoT
|
| 267 |
+
|
| 268 |
+
For merging, requantizing, finetuning and such. A partial translation of the model card:
|
| 269 |
+
|
| 270 |
+
> A number of RL strategies are used, mainly using 671B R1 distilled data, with high output divergence, inheriting the advantages of R1, and also inheriting the harmfulness of R1. Good literary performance.
|
| 271 |
+
>
|
| 272 |
+
> - Incremental training of 0.4T novel content
|
| 273 |
+
>
|
| 274 |
+
> - 40K SFT data generated by TifaMax, 60K SFT data generated by DeepseekR1, 2K high-quality artificial data
|
| 275 |
+
>
|
| 276 |
+
> - 30K DPO reinforcement learning data generated by TifaMax to prevent duplication, enhance context association, and improve political security
|
| 277 |
+
>
|
| 278 |
+
> - 10K PPO data generated by TifaMax, 10K PPO data generated by DeepseekR1
|
| 279 |
+
>
|
| 280 |
+
> - 16k ultra-long context training
|
| 281 |
+
>
|
| 282 |
+
> - Random truncation training enhances robustness
|
| 283 |
+
>
|
| 284 |
+
> - 8×H20 GPU full-scale fine-tuning
|
| 285 |
+
|
| 286 |
+
### Personal observations:
|
| 287 |
+
|
| 288 |
+
Don't let the Deepsex name fool you.
|
| 289 |
+
|
| 290 |
+
This model seems *very* strong at SFW, English, long form (>32K context) storywriting, especially for a 14B, with good comprehension of the whole plot, details and the current state of the story. This is interesting, as it was ""only"" trained at 16K and (seemingly) mostly in Chinese.","{""id"": ""Downtown-Case/Tifa-Deepsex-14b-CoT-Crazy-HF"", ""author"": ""Downtown-Case"", ""sha"": ""6166891ce5ad14404c3c944acbd6491d41887d3e"", ""last_modified"": ""2025-04-13 20:16:59+00:00"", ""created_at"": ""2025-04-13 19:32:21+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""incremental-pretraining"", ""sft"", ""reinforcement-learning"", ""roleplay"", ""cot"", ""sex"", ""SFW"", ""text-generation-inference"", ""zh"", ""en"", ""base_model:ValueFX9507/Tifa-Deepsex-14b-CoT"", ""base_model:finetune:ValueFX9507/Tifa-Deepsex-14b-CoT"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us"", ""not-for-all-audiences""], ""pipeline_tag"": ""reinforcement-learning"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex\n- SFW\n- text-generation-inference"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-13 20:16:59+00:00"", ""cardData"": ""base_model:\n- ValueFX9507/Tifa-Deepsex-14b-CoT\nlanguage:\n- zh\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- incremental-pretraining\n- sft\n- reinforcement-learning\n- roleplay\n- cot\n- sex\n- SFW\n- text-generation-inference"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67fc114597ee6fca776f883f"", ""modelId"": ""Downtown-Case/Tifa-Deepsex-14b-CoT-Crazy-HF"", ""usedStorage"": 29551556154}",1,,0,,0,,0,,0,,0
|
VLM_WebSight_finetuned_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
HuggingFaceM4/VLM_WebSight_finetuned,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
datasets:
|
| 5 |
+
- HuggingFaceM4/WebSight
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
tags:
|
| 9 |
+
- code
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
**Try out the [demo](https://huggingface.co/spaces/HuggingFaceM4/screenshot2html)!**
|
| 14 |
+
|
| 15 |
+
# Model Description
|
| 16 |
+
|
| 17 |
+
This model converts screenshots of website components into HTML/CSS codes.
|
| 18 |
+
|
| 19 |
+
It is based on a very early checkpoint of our forthcoming vision-language foundation model, which has been fine-tuned using the [Websight](https://huggingface.co/datasets/HuggingFaceM4/Websight) dataset.
|
| 20 |
+
|
| 21 |
+
This is very much an alpha version. The goal is to kick off an effort to develop improved models capable of converting a website screenshot into actual code.
|
| 22 |
+
|
| 23 |
+
# Code snippet
|
| 24 |
+
|
| 25 |
+
```python
|
| 26 |
+
import torch
|
| 27 |
+
|
| 28 |
+
from PIL import Image
|
| 29 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
| 30 |
+
|
| 31 |
+
from transformers.image_utils import to_numpy_array, PILImageResampling, ChannelDimension
|
| 32 |
+
from transformers.image_transforms import resize, to_channel_dimension_format
|
| 33 |
+
|
| 34 |
+
DEVICE = torch.device(""cuda"")
|
| 35 |
+
PROCESSOR = AutoProcessor.from_pretrained(
|
| 36 |
+
""HuggingFaceM4/VLM_WebSight_finetuned"",
|
| 37 |
+
token=API_TOKEN,
|
| 38 |
+
)
|
| 39 |
+
MODEL = AutoModelForCausalLM.from_pretrained(
|
| 40 |
+
""HuggingFaceM4/VLM_WebSight_finetuned"",
|
| 41 |
+
token=API_TOKEN,
|
| 42 |
+
trust_remote_code=True,
|
| 43 |
+
torch_dtype=torch.bfloat16,
|
| 44 |
+
).to(DEVICE)
|
| 45 |
+
image_seq_len = MODEL.config.perceiver_config.resampler_n_latents
|
| 46 |
+
BOS_TOKEN = PROCESSOR.tokenizer.bos_token
|
| 47 |
+
BAD_WORDS_IDS = PROCESSOR.tokenizer([""<image>"", ""<fake_token_around_image>""], add_special_tokens=False).input_ids
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def convert_to_rgb(image):
|
| 51 |
+
# `image.convert(""RGB"")` would only work for .jpg images, as it creates a wrong background
|
| 52 |
+
# for transparent images. The call to `alpha_composite` handles this case
|
| 53 |
+
if image.mode == ""RGB"":
|
| 54 |
+
return image
|
| 55 |
+
|
| 56 |
+
image_rgba = image.convert(""RGBA"")
|
| 57 |
+
background = Image.new(""RGBA"", image_rgba.size, (255, 255, 255))
|
| 58 |
+
alpha_composite = Image.alpha_composite(background, image_rgba)
|
| 59 |
+
alpha_composite = alpha_composite.convert(""RGB"")
|
| 60 |
+
return alpha_composite
|
| 61 |
+
|
| 62 |
+
# The processor is the same as the Idefics processor except for the BILINEAR interpolation,
|
| 63 |
+
# so this is a hack in order to redefine ONLY the transform method
|
| 64 |
+
def custom_transform(x):
|
| 65 |
+
x = convert_to_rgb(x)
|
| 66 |
+
x = to_numpy_array(x)
|
| 67 |
+
x = resize(x, (960, 960), resample=PILImageResampling.BILINEAR)
|
| 68 |
+
x = PROCESSOR.image_processor.rescale(x, scale=1 / 255)
|
| 69 |
+
x = PROCESSOR.image_processor.normalize(
|
| 70 |
+
x,
|
| 71 |
+
mean=PROCESSOR.image_processor.image_mean,
|
| 72 |
+
std=PROCESSOR.image_processor.image_std
|
| 73 |
+
)
|
| 74 |
+
x = to_channel_dimension_format(x, ChannelDimension.FIRST)
|
| 75 |
+
x = torch.tensor(x)
|
| 76 |
+
return x
|
| 77 |
+
|
| 78 |
+
inputs = PROCESSOR.tokenizer(
|
| 79 |
+
f""{BOS_TOKEN}<fake_token_around_image>{'<image>' * image_seq_len}<fake_token_around_image>"",
|
| 80 |
+
return_tensors=""pt"",
|
| 81 |
+
add_special_tokens=False,
|
| 82 |
+
)
|
| 83 |
+
inputs[""pixel_values""] = PROCESSOR.image_processor([image], transform=custom_transform)
|
| 84 |
+
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
| 85 |
+
generated_ids = MODEL.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_length=4096)
|
| 86 |
+
generated_text = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 87 |
+
|
| 88 |
+
print(generated_text)
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
# Model Details
|
| 92 |
+
|
| 93 |
+
- **Developed by:** Hugging Face
|
| 94 |
+
- **Model type:** Multi-modal model (screenshot of website component to HTML/CSS code)
|
| 95 |
+
- **Language(s) (NLP):** en
|
| 96 |
+
- **License:** see [License section](#license)
|
| 97 |
+
- **Parent Models:** [SigLIP](https://github.com/huggingface/transformers/pull/26522) and [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
| 98 |
+
- **Resources for more information:**
|
| 99 |
+
<!-- - [GitHub Repo](https://github.com/huggingface/m4/) -->
|
| 100 |
+
- Websight dataset: [Dataset card](https://huggingface.co/datasets/HuggingFaceM4/Websight)
|
| 101 |
+
- Websight technical report: [Report](https://arxiv.org/abs/2403.09029)
|
| 102 |
+
|
| 103 |
+
# License
|
| 104 |
+
|
| 105 |
+
The model is built on top of two pre-trained models: [SigLIP](https://github.com/huggingface/transformers/pull/26522) and [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1), which are delivered under an Apache-2.0 license. As such, users should comply with the licenses of these models.
|
| 106 |
+
|
| 107 |
+
The two pre-trained models are connected to each other with newly initialized parameters that we train. These are not based on any of the two base frozen models forming the composite model. We release the additional weights we trained under an Apache-2.0 license.","{""id"": ""HuggingFaceM4/VLM_WebSight_finetuned"", ""author"": ""HuggingFaceM4"", ""sha"": ""a5c2b06bfee0bd713cf2a6b3e4d46f94dd8fe839"", ""last_modified"": ""2024-03-15 14:51:23+00:00"", ""created_at"": ""2024-01-08 16:44:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 634, ""downloads_all_time"": null, ""likes"": 183, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""vmistral"", ""text-generation"", ""code"", ""custom_code"", ""en"", ""dataset:HuggingFaceM4/WebSight"", ""arxiv:2403.09029"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- HuggingFaceM4/WebSight\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- code"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""VMistralForVisionText2Text""], ""auto_map"": {""AutoConfig"": ""configuration_vmistral.VMistralConfig"", ""AutoModelForCausalLM"": ""modeling_vmistral.VMistralForVisionText2Text""}, ""model_type"": ""vmistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_vmistral.VMistralForVisionText2Text"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_vmistral.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_vmistral.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vision.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""HuggingFaceM4/screenshot2html"", ""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""panney/screenshot2html"", ""cbensimon/screenshot2html"", ""IYAPPA007/screenshot2html"", ""ruslanmv/convert_to_gguf"", ""azharaslam/screenshot2html"", ""keremsabirli/ss2code"", ""vakilrathod67/screenshot2html"", ""NivedPadikkal/screenshot2html"", ""xmelox/screenshot2html"", ""Gbssreejith/screenshot2html"", ""HaawkeNeural/screenshot2html"", ""isarat/screenshot2htmlgsgdsgsszg"", ""Lasawick/uiassistant"", ""azharaslam/azhardeveloper"", ""azharaslam/mockupimagetohtml"", ""azharaslam/screenshots2html"", ""azharaslam/imagetohtml"", ""hoduyquocbao/screenshot2html"", ""efikkert/recipeapp"", ""broadfield-dev/screenshot2html"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 8208007232}, ""total"": 8208007232}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-15 14:51:23+00:00"", ""cardData"": ""datasets:\n- HuggingFaceM4/WebSight\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- code"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_vmistral.VMistralForVisionText2Text"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""659c2675a082efe557054353"", ""modelId"": ""HuggingFaceM4/VLM_WebSight_finetuned"", ""usedStorage"": 32832733267}",0,https://huggingface.co/kangwifi/mx-kfai,1,,0,,0,,0,"FallnAI/Quantize-HF-Models, HuggingFaceM4/screenshot2html, IYAPPA007/screenshot2html, K00B404/LLM_Quantization, KBaba7/Quant, azharaslam/screenshot2html, bhaskartripathi/LLM_Quantization, cbensimon/screenshot2html, huggingface/InferenceSupport/discussions/new?title=HuggingFaceM4/VLM_WebSight_finetuned&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BHuggingFaceM4%2FVLM_WebSight_finetuned%5D(%2FHuggingFaceM4%2FVLM_WebSight_finetuned)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, keremsabirli/ss2code, panney/screenshot2html, ruslanmv/convert_to_gguf, totolook/Quant",13
|
| 108 |
+
kangwifi/mx-kfai,"---
|
| 109 |
+
license: mit
|
| 110 |
+
datasets:
|
| 111 |
+
- HuggingFaceFW/fineweb
|
| 112 |
+
- wikimedia/wikipedia
|
| 113 |
+
- Congliu/Chinese-DeepSeek-R1-Distill-data-110k
|
| 114 |
+
language:
|
| 115 |
+
- id
|
| 116 |
+
- ar
|
| 117 |
+
base_model:
|
| 118 |
+
- perplexity-ai/r1-1776
|
| 119 |
+
- deepseek-ai/DeepSeek-R1
|
| 120 |
+
- Qwen/QwQ-32B
|
| 121 |
+
- prithivMLmods/WebMind-7B-v0.1
|
| 122 |
+
- HuggingFaceM4/VLM_WebSight_finetuned
|
| 123 |
+
- UnfilteredAI/NSFW-gen-v2
|
| 124 |
+
- ModelSpace/GemmaX2-28-2B-v0.1
|
| 125 |
+
library_name: transformers
|
| 126 |
+
pipeline_tag: text-generation
|
| 127 |
+
---","{""id"": ""kangwifi/mx-kfai"", ""author"": ""kangwifi"", ""sha"": ""880b22200de53425564331989f162d9c5b790727"", ""last_modified"": ""2025-03-06 23:16:28+00:00"", ""created_at"": ""2025-03-06 22:45:41+00:00"", ""private"": false, ""gated"": ""auto"", ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""text-generation"", ""id"", ""ar"", ""dataset:HuggingFaceFW/fineweb"", ""dataset:wikimedia/wikipedia"", ""dataset:Congliu/Chinese-DeepSeek-R1-Distill-data-110k"", ""base_model:HuggingFaceM4/VLM_WebSight_finetuned"", ""base_model:finetune:HuggingFaceM4/VLM_WebSight_finetuned"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- perplexity-ai/r1-1776\n- deepseek-ai/DeepSeek-R1\n- Qwen/QwQ-32B\n- prithivMLmods/WebMind-7B-v0.1\n- HuggingFaceM4/VLM_WebSight_finetuned\n- UnfilteredAI/NSFW-gen-v2\n- ModelSpace/GemmaX2-28-2B-v0.1\ndatasets:\n- HuggingFaceFW/fineweb\n- wikimedia/wikipedia\n- Congliu/Chinese-DeepSeek-R1-Distill-data-110k\nlanguage:\n- id\n- ar\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-06 23:16:28+00:00"", ""cardData"": ""base_model:\n- perplexity-ai/r1-1776\n- deepseek-ai/DeepSeek-R1\n- Qwen/QwQ-32B\n- prithivMLmods/WebMind-7B-v0.1\n- HuggingFaceM4/VLM_WebSight_finetuned\n- UnfilteredAI/NSFW-gen-v2\n- ModelSpace/GemmaX2-28-2B-v0.1\ndatasets:\n- HuggingFaceFW/fineweb\n- wikimedia/wikipedia\n- Congliu/Chinese-DeepSeek-R1-Distill-data-110k\nlanguage:\n- id\n- ar\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67ca2595f3d32b750e6f604e"", ""modelId"": ""kangwifi/mx-kfai"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kangwifi/mx-kfai&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkangwifi%2Fmx-kfai%5D(%2Fkangwifi%2Fmx-kfai)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
bark_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
bart-large-cnn_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
controlnet-sd21_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
thibaud/controlnet-sd21,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
license: other
|
| 6 |
+
tags:
|
| 7 |
+
- art
|
| 8 |
+
- diffusers
|
| 9 |
+
- stable diffusion
|
| 10 |
+
- controlnet
|
| 11 |
+
datasets: laion/laion-art
|
| 12 |
+
---
|
| 13 |
+
Want to support my work: you can bought my Artbook: https://thibaud.art
|
| 14 |
+
___
|
| 15 |
+
|
| 16 |
+
Here's the first version of controlnet for stablediffusion 2.1
|
| 17 |
+
Trained on a subset of laion/laion-art
|
| 18 |
+
|
| 19 |
+
License: refers to the different preprocessor's ones.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
### Safetensors version uploaded, only 700mb!
|
| 23 |
+
|
| 24 |
+
### Canny:
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
### Depth:
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+
### ZoeDepth:
|
| 31 |
+

|
| 32 |
+
|
| 33 |
+
### Hed:
|
| 34 |
+

|
| 35 |
+
|
| 36 |
+
### Scribble:
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+
### OpenPose:
|
| 40 |
+

|
| 41 |
+
|
| 42 |
+
### Color:
|
| 43 |
+

|
| 44 |
+
|
| 45 |
+
### OpenPose:
|
| 46 |
+

|
| 47 |
+
|
| 48 |
+
### LineArt:
|
| 49 |
+

|
| 50 |
+
|
| 51 |
+
### Ade20K:
|
| 52 |
+

|
| 53 |
+
|
| 54 |
+
### Normal BAE:
|
| 55 |
+

|
| 56 |
+
|
| 57 |
+
### To use with Automatic1111:
|
| 58 |
+
* Download the ckpt files or safetensors ones
|
| 59 |
+
* Put it in extensions/sd-webui-controlnet/models
|
| 60 |
+
* in settings/controlnet, change cldm_v15.yaml by cldm_v21.yaml
|
| 61 |
+
* Enjoy
|
| 62 |
+
|
| 63 |
+
### To use ZoeDepth:
|
| 64 |
+
You can use it with annotator depth/le_res but it works better with ZoeDepth Annotator. My PR is not accepted yet but you can use my fork.
|
| 65 |
+
My fork: https://github.com/thibaudart/sd-webui-controlnet
|
| 66 |
+
The PR: https://github.com/Mikubill/sd-webui-controlnet/pull/655#issuecomment-1481724024
|
| 67 |
+
|
| 68 |
+
### Misuse, Malicious Use, and Out-of-Scope Use
|
| 69 |
+
|
| 70 |
+
The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
Thanks https://huggingface.co/lllyasviel/ for the implementation and the release of 1.5 models.
|
| 74 |
+
Thanks https://huggingface.co/p1atdev/ for the conversion script from ckpt to safetensors pruned & fp16
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
### Models can't be sell, merge, distributed without prior writing agreement.
|
| 78 |
+
|
| 79 |
+
","{""id"": ""thibaud/controlnet-sd21"", ""author"": ""thibaud"", ""sha"": ""ba159a8ce47185aa1821db7fe10137cf2f5e04dd"", ""last_modified"": ""2023-08-14 07:43:07+00:00"", ""created_at"": ""2023-03-06 15:24:04+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12543, ""downloads_all_time"": null, ""likes"": 401, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""art"", ""stable diffusion"", ""controlnet"", ""en"", ""dataset:laion/laion-art"", ""license:other"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets: laion/laion-art\nlanguage:\n- en\nlicense: other\ntags:\n- art\n- diffusers\n- stable diffusion\n- controlnet"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_ade20k.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_ade20k.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_canny.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_canny.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_canny.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_color.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_color.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_depth.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_depth.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_depth.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_hed.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_hed.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_hed.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_lineart.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_lineart.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_normalbae.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_normalbae.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_openpose.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_openpose.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_openpose.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_openposev2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_openposev2.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_scribble.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_scribble.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_scribble.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_zoedepth.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v11p_sd21_zoedepth.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_ade20k.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_canny.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_color.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_depth.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_hed.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_lineart.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_normalbae.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_openpose.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_openposev2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_scribble.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='example_zoedepth.png', size=None, blob_id=None, lfs=None)""], ""spaces"": [""chatbot4all/stabletest"", ""sachinkidzure/PowerPaint"", ""harshkidzure/PowerPaint""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-08-14 07:43:07+00:00"", ""cardData"": ""datasets: laion/laion-art\nlanguage:\n- en\nlicense: other\ntags:\n- art\n- diffusers\n- stable diffusion\n- controlnet"", ""transformersInfo"": null, ""_id"": ""6406059431ddcb90bdabb0df"", ""modelId"": ""thibaud/controlnet-sd21"", ""usedStorage"": 55944319096}",0,,0,,0,,0,,0,"chatbot4all/stabletest, harshkidzure/PowerPaint, huggingface/InferenceSupport/discussions/new?title=thibaud/controlnet-sd21&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bthibaud%2Fcontrolnet-sd21%5D(%2Fthibaud%2Fcontrolnet-sd21)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, sachinkidzure/PowerPaint",4
|
distil-large-v3_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
distilgpt2_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
fashion-clip_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
patrickjohncyh/fashion-clip,"---
|
| 3 |
+
license: mit
|
| 4 |
+
tags:
|
| 5 |
+
- vision
|
| 6 |
+
- language
|
| 7 |
+
- fashion
|
| 8 |
+
- ecommerce
|
| 9 |
+
library_name: transformers
|
| 10 |
+
language:
|
| 11 |
+
- en
|
| 12 |
+
widget:
|
| 13 |
+
- src: https://cdn-images.farfetch-contents.com/19/76/05/56/19760556_44221665_1000.jpg
|
| 14 |
+
candidate_labels: black shoe, red shoe, a cat
|
| 15 |
+
example_title: Black Shoe
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
[](https://www.youtube.com/watch?v=uqRSc-KSA1Y) [](https://huggingface.co/patrickjohncyh/fashion-clip) [](https://colab.research.google.com/drive/1Z1hAxBnWjF76bEi9KQ6CMBBEmI_FVDrW?usp=sharing) [](https://towardsdatascience.com/teaching-clip-some-fashion-3005ac3fdcc3) [](https://huggingface.co/spaces/vinid/fashion-clip-app)
|
| 19 |
+
|
| 20 |
+
# Model Card: Fashion CLIP
|
| 21 |
+
|
| 22 |
+
Disclaimer: The model card adapts the model card from [here](https://huggingface.co/openai/clip-vit-base-patch32).
|
| 23 |
+
|
| 24 |
+
## Model Details
|
| 25 |
+
|
| 26 |
+
UPDATE (10/03/23): We have updated the model! We found that [laion/CLIP-ViT-B-32-laion2B-s34B-b79K](https://huggingface.co/laion/CLIP-ViT-B-32-laion2B-s34B-b79K) checkpoint (thanks [Bin](https://www.linkedin.com/in/bin-duan-56205310/)!) worked better than original OpenAI CLIP on Fashion. We thus fine-tune a newer (and better!) version of FashionCLIP (henceforth FashionCLIP 2.0), while keeping the architecture the same. We postulate that the perofrmance gains afforded by `laion/CLIP-ViT-B-32-laion2B-s34B-b79K` are due to the increased training data (5x OpenAI CLIP data). Our [thesis](https://www.nature.com/articles/s41598-022-23052-9), however, remains the same -- fine-tuning `laion/CLIP` on our fashion dataset improved zero-shot perofrmance across our benchmarks. See the below table comparing weighted macro F1 score across models.
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
| Model | FMNIST | KAGL | DEEP |
|
| 30 |
+
| ------------- | ------------- | ------------- | ------------- |
|
| 31 |
+
| OpenAI CLIP | 0.66 | 0.63 | 0.45 |
|
| 32 |
+
| FashionCLIP | 0.74 | 0.67 | 0.48 |
|
| 33 |
+
| Laion CLIP | 0.78 | 0.71 | 0.58 |
|
| 34 |
+
| FashionCLIP 2.0 | __0.83__ | __0.73__ | __0.62__ |
|
| 35 |
+
|
| 36 |
+
---
|
| 37 |
+
|
| 38 |
+
FashionCLIP is a CLIP-based model developed to produce general product representations for fashion concepts. Leveraging the pre-trained checkpoint (ViT-B/32) released by [OpenAI](https://github.com/openai/CLIP), we train FashionCLIP on a large, high-quality novel fashion dataset to study whether domain specific fine-tuning of CLIP-like models is sufficient to produce product representations that are zero-shot transferable to entirely new datasets and tasks. FashionCLIP was not developed for model deplyoment - to do so, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within.
|
| 39 |
+
|
| 40 |
+
### Model Date
|
| 41 |
+
|
| 42 |
+
March 2023
|
| 43 |
+
|
| 44 |
+
### Model Type
|
| 45 |
+
|
| 46 |
+
The model uses a ViT-B/32 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained, starting from a pre-trained checkpoint, to maximize the similarity of (image, text) pairs via a contrastive loss on a fashion dataset containing 800K products.
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
### Documents
|
| 50 |
+
|
| 51 |
+
- [FashionCLIP Github Repo](https://github.com/patrickjohncyh/fashion-clip)
|
| 52 |
+
- [FashionCLIP Paper](https://www.nature.com/articles/s41598-022-23052-9)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
## Data
|
| 56 |
+
|
| 57 |
+
The model was trained on (image, text) pairs obtained from the Farfecth dataset[^1 Awaiting official release.], an English dataset comprising over 800K fashion products, with more than 3K brands across dozens of object types. The image used for encoding is the standard product image, which is a picture of the item over a white background, with no humans. The text used is a concatenation of the _highlight_ (e.g., “stripes”, “long sleeves”, “Armani”) and _short description_ (“80s styled t-shirt”)) available in the Farfetch dataset.
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
## Limitations, Bias and Fiarness
|
| 62 |
+
|
| 63 |
+
We acknowledge certain limitations of FashionCLIP and expect that it inherits certain limitations and biases present in the original CLIP model. We do not expect our fine-tuning to significantly augment these limitations: we acknowledge that the fashion data we use makes explicit assumptions about the notion of gender as in ""blue shoes for a woman"" that inevitably associate aspects of clothing with specific people.
|
| 64 |
+
|
| 65 |
+
Our investigations also suggest that the data used introduces certain limitations in FashionCLIP. From the textual modality, given that most captions derived from the Farfetch dataset are long, we observe that FashionCLIP may be more performant in longer queries than shorter ones. From the image modality, FashionCLIP is also biased towards standard product images (centered, white background).
|
| 66 |
+
|
| 67 |
+
Model selection, i.e. selecting an appropariate stopping critera during fine-tuning, remains an open challenge. We observed that using loss on an in-domain (i.e. same distribution as test) validation dataset is a poor selection critera when out-of-domain generalization (i.e. across different datasets) is desired, even when the dataset used is relatively diverse and large.
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
## Citation
|
| 71 |
+
```
|
| 72 |
+
@Article{Chia2022,
|
| 73 |
+
title=""Contrastive language and vision learning of general fashion concepts"",
|
| 74 |
+
author=""Chia, Patrick John
|
| 75 |
+
and Attanasio, Giuseppe
|
| 76 |
+
and Bianchi, Federico
|
| 77 |
+
and Terragni, Silvia
|
| 78 |
+
and Magalh{\~a}es, Ana Rita
|
| 79 |
+
and Goncalves, Diogo
|
| 80 |
+
and Greco, Ciro
|
| 81 |
+
and Tagliabue, Jacopo"",
|
| 82 |
+
journal=""Scientific Reports"",
|
| 83 |
+
year=""2022"",
|
| 84 |
+
month=""Nov"",
|
| 85 |
+
day=""08"",
|
| 86 |
+
volume=""12"",
|
| 87 |
+
number=""1"",
|
| 88 |
+
abstract=""The steady rise of online shopping goes hand in hand with the development of increasingly complex ML and NLP models. While most use cases are cast as specialized supervised learning problems, we argue that practitioners would greatly benefit from general and transferable representations of products. In this work, we build on recent developments in contrastive learning to train FashionCLIP, a CLIP-like model adapted for the fashion industry. We demonstrate the effectiveness of the representations learned by FashionCLIP with extensive tests across a variety of tasks, datasets and generalization probes. We argue that adaptations of large pre-trained models such as CLIP offer new perspectives in terms of scalability and sustainability for certain types of players in the industry. Finally, we detail the costs and environmental impact of training, and release the model weights and code as open source contribution to the community."",
|
| 89 |
+
issn=""2045-2322"",
|
| 90 |
+
doi=""10.1038/s41598-022-23052-9"",
|
| 91 |
+
url=""https://doi.org/10.1038/s41598-022-23052-9""
|
| 92 |
+
}
|
| 93 |
+
```","{""id"": ""patrickjohncyh/fashion-clip"", ""author"": ""patrickjohncyh"", ""sha"": ""7e3ba62ce16b379a1ab479346b66f192e76f51b7"", ""last_modified"": ""2024-09-17 15:19:43+00:00"", ""created_at"": ""2023-02-21 19:51:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3716368, ""downloads_all_time"": null, ""likes"": 222, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""onnx"", ""safetensors"", ""clip"", ""zero-shot-image-classification"", ""vision"", ""language"", ""fashion"", ""ecommerce"", ""en"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""zero-shot-image-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: transformers\nlicense: mit\ntags:\n- vision\n- language\n- fashion\n- ecommerce\nwidget:\n- src: https://cdn-images.farfetch-contents.com/19/76/05/56/19760556_44221665_1000.jpg\n candidate_labels: black shoe, red shoe, a cat\n example_title: Black Shoe"", ""widget_data"": [{""src"": ""https://cdn-images.farfetch-contents.com/19/76/05/56/19760556_44221665_1000.jpg"", ""candidate_labels"": ""black shoe, red shoe, a cat"", ""example_title"": ""Black Shoe""}], ""model_index"": null, ""config"": {""architectures"": [""CLIPModel""], ""model_type"": ""clip"", ""tokenizer_config"": {""unk_token"": {""content"": ""<|endoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""bos_token"": {""content"": ""<|startoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""eos_token"": {""content"": ""<|endoftext|>"", ""single_word"": false, ""lstrip"": false, ""rstrip"": false, ""normalized"": true, ""__type"": ""AddedToken""}, ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForZeroShotImageClassification"", ""custom_class"": null, ""pipeline_tag"": ""zero-shot-image-classification"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/model.onnx', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='onnx/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""vinid/fashion-clip-app"", ""user-agent/testing-full-flow"", ""jasonwu92/image-search-playground"", ""thelabel/ai-product-data"", ""rfmantoan/search-demo"", ""romadanskiy/open-source-models-hg"", ""FrezzyI/Fashion-Clip_App"", ""Simon-Pierre/patrickjohncyh-fashion-clip"", ""dwarfplanet/patrickjohncyh-fashion-clips"", ""SulimanGorsi/patrickjohncyh-fashion-clip"", ""CodeGitte/FashionClip"", ""pratyush19/Temp"", ""MinderaLabs/NewLook"", ""moPharma1/computer-vision-backend"", ""Saad0KH/fashion-clip"", ""Ahalya002/patrickjohncyh-fashion-clip"", ""user-agent/zero-shot-image-classification"", ""jayden1000/fashion-clip-app"", ""Daemon966/patrickjohncyh-fashion-clip"", ""DINGOLANI/Autocomplete-luxury-fashion"", ""ibne-osama/product-recsys"", ""TheScepter/Savvy"", ""Nipunnn/Easel_AI_Engineering""], ""safetensors"": {""parameters"": {""I64"": 127, ""F32"": 151277312}, ""total"": 151277439}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-17 15:19:43+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: transformers\nlicense: mit\ntags:\n- vision\n- language\n- fashion\n- ecommerce\nwidget:\n- src: https://cdn-images.farfetch-contents.com/19/76/05/56/19760556_44221665_1000.jpg\n candidate_labels: black shoe, red shoe, a cat\n example_title: Black Shoe"", ""transformersInfo"": {""auto_model"": ""AutoModelForZeroShotImageClassification"", ""custom_class"": null, ""pipeline_tag"": ""zero-shot-image-classification"", ""processor"": ""AutoProcessor""}, ""_id"": ""63f520d3cc1dd3168692e256"", ""modelId"": ""patrickjohncyh/fashion-clip"", ""usedStorage"": 3026665974}",0,"https://huggingface.co/justin-shopcapsule/screenshot-fashion-clip-finetuned, https://huggingface.co/justin-shopcapsule/screenshot-fashion-clip-finetuned-v2-t1",2,,0,,0,,0,"CodeGitte/FashionClip, FrezzyI/Fashion-Clip_App, Nipunnn/Easel_AI_Engineering, Simon-Pierre/patrickjohncyh-fashion-clip, SulimanGorsi/patrickjohncyh-fashion-clip, dwarfplanet/patrickjohncyh-fashion-clips, huggingface/InferenceSupport/discussions/new?title=patrickjohncyh/fashion-clip&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpatrickjohncyh%2Ffashion-clip%5D(%2Fpatrickjohncyh%2Ffashion-clip)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jasonwu92/image-search-playground, rfmantoan/search-demo, romadanskiy/open-source-models-hg, thelabel/ai-product-data, user-agent/testing-full-flow, vinid/fashion-clip-app",13
|
| 94 |
+
justin-shopcapsule/screenshot-fashion-clip-finetuned,"---
|
| 95 |
+
license: mit
|
| 96 |
+
base_model: patrickjohncyh/fashion-clip
|
| 97 |
+
tags:
|
| 98 |
+
- generated_from_trainer
|
| 99 |
+
datasets:
|
| 100 |
+
- cleaned_csv_local.csv
|
| 101 |
+
model-index:
|
| 102 |
+
- name: screenshot-fashion-clip-finetuned
|
| 103 |
+
results: []
|
| 104 |
+
---
|
| 105 |
+
|
| 106 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 107 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 108 |
+
|
| 109 |
+
# screenshot-fashion-clip-finetuned
|
| 110 |
+
|
| 111 |
+
This model is a fine-tuned version of [patrickjohncyh/fashion-clip](https://huggingface.co/patrickjohncyh/fashion-clip) on the cleaned_csv_local.csv 2023 dataset.
|
| 112 |
+
It achieves the following results on the evaluation set:
|
| 113 |
+
- Loss: 1.0196
|
| 114 |
+
|
| 115 |
+
## Model description
|
| 116 |
+
|
| 117 |
+
More information needed
|
| 118 |
+
|
| 119 |
+
## Intended uses & limitations
|
| 120 |
+
|
| 121 |
+
More information needed
|
| 122 |
+
|
| 123 |
+
## Training and evaluation data
|
| 124 |
+
|
| 125 |
+
More information needed
|
| 126 |
+
|
| 127 |
+
## Training procedure
|
| 128 |
+
|
| 129 |
+
### Training hyperparameters
|
| 130 |
+
|
| 131 |
+
The following hyperparameters were used during training:
|
| 132 |
+
- learning_rate: 5e-05
|
| 133 |
+
- train_batch_size: 32
|
| 134 |
+
- eval_batch_size: 32
|
| 135 |
+
- seed: 42
|
| 136 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 137 |
+
- lr_scheduler_type: linear
|
| 138 |
+
- num_epochs: 3.0
|
| 139 |
+
|
| 140 |
+
### Training results
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
### Framework versions
|
| 145 |
+
|
| 146 |
+
- Transformers 4.36.0.dev0
|
| 147 |
+
- Pytorch 2.1.1+cu118
|
| 148 |
+
- Datasets 2.15.0
|
| 149 |
+
- Tokenizers 0.15.0
|
| 150 |
+
","{""id"": ""justin-shopcapsule/screenshot-fashion-clip-finetuned"", ""author"": ""justin-shopcapsule"", ""sha"": ""bb813337f872863ca1ef591b5f51b32066068ee9"", ""last_modified"": ""2023-12-06 18:14:18+00:00"", ""created_at"": ""2023-12-06 17:43:25+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""clip"", ""zero-shot-image-classification"", ""generated_from_trainer"", ""dataset:cleaned_csv_local.csv"", ""base_model:patrickjohncyh/fashion-clip"", ""base_model:finetune:patrickjohncyh/fashion-clip"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""zero-shot-image-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: patrickjohncyh/fashion-clip\ndatasets:\n- cleaned_csv_local.csv\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: screenshot-fashion-clip-finetuned\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""screenshot-fashion-clip-finetuned"", ""results"": []}], ""config"": {""architectures"": [""CLIPModel""], ""model_type"": ""clip"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForZeroShotImageClassification"", ""custom_class"": null, ""pipeline_tag"": ""zero-shot-image-classification"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 151277312}, ""total"": 151277312}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-06 18:14:18+00:00"", ""cardData"": ""base_model: patrickjohncyh/fashion-clip\ndatasets:\n- cleaned_csv_local.csv\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: screenshot-fashion-clip-finetuned\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForZeroShotImageClassification"", ""custom_class"": null, ""pipeline_tag"": ""zero-shot-image-classification"", ""processor"": ""AutoProcessor""}, ""_id"": ""6570b2bd4f98a7a5a3bc046c"", ""modelId"": ""justin-shopcapsule/screenshot-fashion-clip-finetuned"", ""usedStorage"": 1210318080}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=justin-shopcapsule/screenshot-fashion-clip-finetuned&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjustin-shopcapsule%2Fscreenshot-fashion-clip-finetuned%5D(%2Fjustin-shopcapsule%2Fscreenshot-fashion-clip-finetuned)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 151 |
+
justin-shopcapsule/screenshot-fashion-clip-finetuned-v2-t1,"---
|
| 152 |
+
license: mit
|
| 153 |
+
base_model: patrickjohncyh/fashion-clip
|
| 154 |
+
tags:
|
| 155 |
+
- generated_from_trainer
|
| 156 |
+
datasets:
|
| 157 |
+
- cleaned_csv_local.csv
|
| 158 |
+
model-index:
|
| 159 |
+
- name: screenshot-fashion-clip-finetuned-v2-t1
|
| 160 |
+
results: []
|
| 161 |
+
---
|
| 162 |
+
|
| 163 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 164 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 165 |
+
|
| 166 |
+
# screenshot-fashion-clip-finetuned-v2-t1
|
| 167 |
+
|
| 168 |
+
This model is a fine-tuned version of [patrickjohncyh/fashion-clip](https://huggingface.co/patrickjohncyh/fashion-clip) on the cleaned_csv_local.csv 2023 dataset.
|
| 169 |
+
It achieves the following results on the evaluation set:
|
| 170 |
+
- Loss: 2.1175
|
| 171 |
+
|
| 172 |
+
## Model description
|
| 173 |
+
|
| 174 |
+
More information needed
|
| 175 |
+
|
| 176 |
+
## Intended uses & limitations
|
| 177 |
+
|
| 178 |
+
More information needed
|
| 179 |
+
|
| 180 |
+
## Training and evaluation data
|
| 181 |
+
|
| 182 |
+
More information needed
|
| 183 |
+
|
| 184 |
+
## Training procedure
|
| 185 |
+
|
| 186 |
+
### Training hyperparameters
|
| 187 |
+
|
| 188 |
+
The following hyperparameters were used during training:
|
| 189 |
+
- learning_rate: 5e-05
|
| 190 |
+
- train_batch_size: 32
|
| 191 |
+
- eval_batch_size: 32
|
| 192 |
+
- seed: 42
|
| 193 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 194 |
+
- lr_scheduler_type: linear
|
| 195 |
+
- num_epochs: 3.0
|
| 196 |
+
|
| 197 |
+
### Training results
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
### Framework versions
|
| 202 |
+
|
| 203 |
+
- Transformers 4.36.0.dev0
|
| 204 |
+
- Pytorch 2.1.1+cu118
|
| 205 |
+
- Datasets 2.15.0
|
| 206 |
+
- Tokenizers 0.15.0
|
| 207 |
+
","{""id"": ""justin-shopcapsule/screenshot-fashion-clip-finetuned-v2-t1"", ""author"": ""justin-shopcapsule"", ""sha"": ""6de644f8c675da2fe0e4a0edc0e3bb9b20583b6c"", ""last_modified"": ""2024-09-25 17:50:34+00:00"", ""created_at"": ""2023-12-07 16:00:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 18, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""clip"", ""zero-shot-image-classification"", ""generated_from_trainer"", ""dataset:cleaned_csv_local.csv"", ""base_model:patrickjohncyh/fashion-clip"", ""base_model:finetune:patrickjohncyh/fashion-clip"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""zero-shot-image-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: patrickjohncyh/fashion-clip\ndatasets:\n- cleaned_csv_local.csv\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: screenshot-fashion-clip-finetuned-v2-t1\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""screenshot-fashion-clip-finetuned-v2-t1"", ""results"": []}], ""config"": {""architectures"": [""CLIPModel""], ""model_type"": ""clip"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForZeroShotImageClassification"", ""custom_class"": null, ""pipeline_tag"": ""zero-shot-image-classification"", ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='eval_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-25 17:50:34+00:00"", ""cardData"": ""base_model: patrickjohncyh/fashion-clip\ndatasets:\n- cleaned_csv_local.csv\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: screenshot-fashion-clip-finetuned-v2-t1\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForZeroShotImageClassification"", ""custom_class"": null, ""pipeline_tag"": ""zero-shot-image-classification"", ""processor"": ""AutoProcessor""}, ""_id"": ""6571ec20c3442fe850f05ffb"", ""modelId"": ""justin-shopcapsule/screenshot-fashion-clip-finetuned-v2-t1"", ""usedStorage"": 3026016082}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=justin-shopcapsule/screenshot-fashion-clip-finetuned-v2-t1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bjustin-shopcapsule%2Fscreenshot-fashion-clip-finetuned-v2-t1%5D(%2Fjustin-shopcapsule%2Fscreenshot-fashion-clip-finetuned-v2-t1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
flux-controlnet-collections_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
XLabs-AI/flux-controlnet-collections,"---
|
| 3 |
+
license: other
|
| 4 |
+
license_name: flux-1-dev-non-commercial-license
|
| 5 |
+
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
pipeline_tag: text-to-image
|
| 9 |
+
tags:
|
| 10 |
+
- Stable Diffusion
|
| 11 |
+
- image-generation
|
| 12 |
+
- Flux
|
| 13 |
+
- diffusers
|
| 14 |
+
---
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
[<img src=""https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/light/join-our-discord-rev1.png?raw=true"">](https://discord.gg/FHY2guThfy)
|
| 18 |
+
|
| 19 |
+
This repository provides a collection of ControlNet checkpoints for
|
| 20 |
+
[FLUX.1-dev model](https://huggingface.co/black-forest-labs/FLUX.1-dev) by Black Forest Labs
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+
[See our github](https://github.com/XLabs-AI/x-flux-comfyui) for comfy ui workflows.
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
[See our github](https://github.com/XLabs-AI/x-flux) for train script, train configs and demo script for inference.
|
| 28 |
+
|
| 29 |
+
# Models
|
| 30 |
+
|
| 31 |
+
Our collection supports 3 models:
|
| 32 |
+
- Canny
|
| 33 |
+
- HED
|
| 34 |
+
- Depth (Midas)
|
| 35 |
+
|
| 36 |
+
Each ControlNet is trained on 1024x1024 resolution and works for 1024x1024 resolution.
|
| 37 |
+
We release **v3 versions** - better and realistic versions, which can be used directly in ComfyUI!
|
| 38 |
+
|
| 39 |
+
Please, see our [ComfyUI custom nodes installation guide](https://github.com/XLabs-AI/x-flux-comfyui)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Examples
|
| 43 |
+
|
| 44 |
+
See examples of our models results below.
|
| 45 |
+
Also, some generation results with input images are provided in ""Files and versions""
|
| 46 |
+
|
| 47 |
+
# Inference
|
| 48 |
+
|
| 49 |
+
To try our models, you have 2 options:
|
| 50 |
+
1. Use main.py from our [official repo](https://github.com/XLabs-AI/x-flux)
|
| 51 |
+
2. Use our custom nodes for ComfyUI and test it with provided workflows (check out folder /workflows)
|
| 52 |
+
3. Use gradio demo
|
| 53 |
+
|
| 54 |
+
See examples how to launch our models:
|
| 55 |
+
|
| 56 |
+
## Canny ControlNet (version 3)
|
| 57 |
+
|
| 58 |
+
1. Clone our [x-flux-comfyui](https://github.com/XLabs-AI/x-flux-comfyui) custom nodes
|
| 59 |
+
2. Launch ComfyUI
|
| 60 |
+
3. Try our canny_workflow.json
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+

|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
## Depth ControlNet (version 3)
|
| 67 |
+
|
| 68 |
+
1. Clone our [x-flux-comfyui](https://github.com/XLabs-AI/x-flux-comfyui) custom nodes
|
| 69 |
+
2. Launch ComfyUI
|
| 70 |
+
3. Try our depth_workflow.json
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+

|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
## HED ControlNet (version 3)
|
| 77 |
+
|
| 78 |
+
1. Clone our [x-flux-comfyui](https://github.com/XLabs-AI/x-flux-comfyui) custom nodes
|
| 79 |
+
2. Launch ComfyUI
|
| 80 |
+
3. Try our hed_workflow.json
|
| 81 |
+
|
| 82 |
+

|
| 83 |
+
|
| 84 |
+
## License
|
| 85 |
+
|
| 86 |
+
Our weights fall under the [FLUX.1 [dev]](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md) Non-Commercial License<br/>","{""id"": ""XLabs-AI/flux-controlnet-collections"", ""author"": ""XLabs-AI"", ""sha"": ""86ab1e915a389d5857135c00e0d350e9e38a9048"", ""last_modified"": ""2024-08-30 12:29:35+00:00"", ""created_at"": ""2024-08-13 00:09:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 23306, ""downloads_all_time"": null, ""likes"": 482, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""Stable Diffusion"", ""image-generation"", ""Flux"", ""text-to-image"", ""en"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.\npipeline_tag: text-to-image\ntags:\n- Stable Diffusion\n- image-generation\n- Flux\n- diffusers"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/canny_v2_res1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/canny_v2_res2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/canny_v2_res3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/depth_result_1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/depth_result_2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/depth_result_3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/depth_result_4.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/depth_v2_res1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/depth_v2_res2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/hed_result_1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/hed_result_2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/hed_result_3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/hed_result_4.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_image_canny.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_image_depth1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_image_depth2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_image_depth3.webp', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_image_hed1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/input_image_hed2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux-canny-controlnet-v3.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux-canny-controlnet.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux-canny-controlnet_v2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux-depth-controlnet-v3.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux-depth-controlnet.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux-depth-controlnet_v2.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux-hed-controlnet-v3.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux-hed-controlnet.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='workflows/canny_workflow.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='workflows/depth_workflow.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='workflows/example.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='workflows/hed_workflow.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""fotographerai/Zen-Style-Shape"", ""guardiancc/flux-advanced-explorer"", ""pathguide/XLabs-AI-flux-controlnet-collections"", ""TetoBer/XLabs-AI-flux-controlnet-collections"", ""TheDiplo/XLabs-AI-flux-controlnet-collections"", ""jabronie23/XLabs-AI-flux-controlnet-collections"", ""huggingxfred/XLabs-AI-flux-controlnet-collections"", ""Metplus/XLabs-AI-flux-controlnet-collections""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-30 12:29:35+00:00"", ""cardData"": ""language:\n- en\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.\npipeline_tag: text-to-image\ntags:\n- Stable Diffusion\n- image-generation\n- Flux\n- diffusers"", ""transformersInfo"": null, ""_id"": ""66baa4244de574a3295c9f0a"", ""modelId"": ""XLabs-AI/flux-controlnet-collections"", ""usedStorage"": 11922020867}",0,,0,,0,,0,,0,"Metplus/XLabs-AI-flux-controlnet-collections, TetoBer/XLabs-AI-flux-controlnet-collections, TheDiplo/XLabs-AI-flux-controlnet-collections, fotographerai/Zen-Style-Shape, guardiancc/flux-advanced-explorer, huggingface/InferenceSupport/discussions/new?title=XLabs-AI/flux-controlnet-collections&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BXLabs-AI%2Fflux-controlnet-collections%5D(%2FXLabs-AI%2Fflux-controlnet-collections)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, huggingxfred/XLabs-AI-flux-controlnet-collections, jabronie23/XLabs-AI-flux-controlnet-collections, pathguide/XLabs-AI-flux-controlnet-collections",9
|
gte-multilingual-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
idefics2-8b_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ip-composition-adapter_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
ostris/ip-composition-adapter,"---
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
pipeline_tag: text-to-image
|
| 5 |
+
tags:
|
| 6 |
+
- stable diffusion
|
| 7 |
+
- ip adapter
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# IP Composition Adapter
|
| 11 |
+
|
| 12 |
+
This adapter for Stable Diffusion 1.5 and SDXL is designed to inject the general composition of an image into the model while mostly ignoring the style and content. Meaning a portrait of a person waving their left hand will result in an image of a completely different person waving with their left hand.
|
| 13 |
+
|
| 14 |
+
### Follow Me
|
| 15 |
+
I do a lot of experiments and other things. To keep up to date, follow me on [Twitter](https://twitter.com/ostrisai).
|
| 16 |
+
|
| 17 |
+
### Thanks
|
| 18 |
+
|
| 19 |
+
I want to give a special thanks to [POM](https://huggingface.co/peteromallet) with [BANODOCO](https://huggingface.co/BANODOCO). This was their idea, I just trained it. Full credit goes to them.
|
| 20 |
+
|
| 21 |
+
## Usage
|
| 22 |
+
|
| 23 |
+
Use just like other IP+ adapters from [h94/IP-Adapter](https://huggingface.co/h94/IP-Adapter). For both SD1.5 and SDXL variants, use the CLIP vision encoder ([CLIP-H](https://huggingface.co/h94/IP-Adapter/tree/main/models/image_encoder))
|
| 24 |
+
|
| 25 |
+
You may need to lower the CFG to around 3 for best results, especially on the SDXL variant.
|
| 26 |
+
|
| 27 |
+
### How is it different from control nets?
|
| 28 |
+
|
| 29 |
+
Control nets are more rigid. A control net will spatially align an image to nearly perfectly match the control image. The composition adapter allows the control to be more flexible.
|
| 30 |
+
|
| 31 |
+
## SDXL Examples
|
| 32 |
+
|
| 33 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%202024-03-19%20195340.png"" alt=""1"" width=""100%""/>
|
| 34 |
+
|
| 35 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%202024-03-19%20194130.png"" alt=""1"" width=""100%""/>
|
| 36 |
+
|
| 37 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%202024-03-19%20193948.png"" alt=""1"" width=""100%""/>
|
| 38 |
+
|
| 39 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%202024-03-19%20193802.png"" alt=""1"" width=""100%""/>
|
| 40 |
+
|
| 41 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%202024-03-19%20193632.png"" alt=""1"" width=""100%""/>
|
| 42 |
+
|
| 43 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%202024-03-19%20192659.png"" alt=""1"" width=""100%""/>
|
| 44 |
+
|
| 45 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%202024-03-19%20192445.png"" alt=""1"" width=""100%""/>
|
| 46 |
+
|
| 47 |
+
## SD 1.5 Examples
|
| 48 |
+
|
| 49 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2013-06-32.jpg"" alt=""1"" width=""100%""/>
|
| 50 |
+
|
| 51 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2013-09-57.jpg"" alt=""2"" width=""100%""/>
|
| 52 |
+
|
| 53 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2013-11-27.jpg"" alt=""3"" width=""100%""/>
|
| 54 |
+
|
| 55 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2013-13-19.jpg"" alt=""4"" width=""100%""/>
|
| 56 |
+
|
| 57 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2013-56-51.jpg"" alt=""5"" width=""100%""/>
|
| 58 |
+
|
| 59 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2014-00-31.jpg"" alt=""6"" width=""100%""/>
|
| 60 |
+
|
| 61 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2014-04-41.jpg"" alt=""7"" width=""100%""/>
|
| 62 |
+
|
| 63 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2014-09-31.jpg"" alt=""8"" width=""100%""/>
|
| 64 |
+
|
| 65 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2014-11-10.jpg"" alt=""9"" width=""100%""/>
|
| 66 |
+
|
| 67 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2014-13-26.jpg"" alt=""10"" width=""100%""/>
|
| 68 |
+
|
| 69 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2014-19-20.jpg"" alt=""11"" width=""100%""/>
|
| 70 |
+
|
| 71 |
+
<img src=""https://huggingface.co/ostris/ip-composition-adapter/resolve/main/resources/Screenshot%20from%202024-03-16%2014-21-50.jpg?download=true"" alt=""12"" width=""100%""/>","{""id"": ""ostris/ip-composition-adapter"", ""author"": ""ostris"", ""sha"": ""0d2ed55c441a20c20e09da4dc086097703f26b61"", ""last_modified"": ""2024-03-20 02:06:44+00:00"", ""created_at"": ""2024-03-16 20:37:57+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 175, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""stable diffusion"", ""ip adapter"", ""text-to-image"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0\npipeline_tag: text-to-image\ntags:\n- stable diffusion\n- ip adapter"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ip_plus_composition_sd15.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ip_plus_composition_sdxl.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/.gitkeep', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot 2024-03-19 192445.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot 2024-03-19 192659.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot 2024-03-19 193632.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot 2024-03-19 193802.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot 2024-03-19 193948.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot 2024-03-19 194130.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot 2024-03-19 195340.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 13-06-32.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 13-09-57.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 13-11-27.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 13-13-19.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 13-56-51.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 14-00-31.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 14-04-41.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 14-09-31.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 14-11-10.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 14-13-26.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 14-19-20.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='resources/Screenshot from 2024-03-16 14-21-50.jpg', size=None, blob_id=None, lfs=None)""], ""spaces"": [""radames/Real-Time-Latent-Consistency-Model"", ""qyoo/AID-v2"", ""eMILF2/real-time-model""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-20 02:06:44+00:00"", ""cardData"": ""license: apache-2.0\npipeline_tag: text-to-image\ntags:\n- stable diffusion\n- ip adapter"", ""transformersInfo"": null, ""_id"": ""65f60325b76ab963c3b21058"", ""modelId"": ""ostris/ip-composition-adapter"", ""usedStorage"": 954398708}",0,,0,,0,,0,,0,"eMILF2/real-time-model, huggingface/InferenceSupport/discussions/new?title=ostris/ip-composition-adapter&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bostris%2Fip-composition-adapter%5D(%2Fostris%2Fip-composition-adapter)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, qyoo/AID-v2, radames/Real-Time-Latent-Consistency-Model",4
|
jina-clip-v2_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llama-2-ko-7b_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv
ADDED
|
@@ -0,0 +1,640 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
beomi/llama-2-ko-7b,"---
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
- ko
|
| 6 |
+
pipeline_tag: text-generation
|
| 7 |
+
inference: false
|
| 8 |
+
tags:
|
| 9 |
+
- facebook
|
| 10 |
+
- meta
|
| 11 |
+
- pytorch
|
| 12 |
+
- llama
|
| 13 |
+
- llama-2
|
| 14 |
+
- kollama
|
| 15 |
+
- llama-2-ko
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
**Update Log**
|
| 19 |
+
|
| 20 |
+
- 2023.12.27
|
| 21 |
+
- New Model is here! Trained with only open-accessible Korean text corpus: https://huggingface.co/beomi/open-llama-2-ko-7b
|
| 22 |
+
- 2023.10.19
|
| 23 |
+
- Fix Tokenizer bug(space not applied when decoding) after `transforemrs>=4.34.0`
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# **Llama-2-Ko** 🦙🇰🇷
|
| 27 |
+
|
| 28 |
+
Llama-2-Ko serves as an advanced iteration of Llama 2, benefiting from an expanded vocabulary and the inclusion of a Korean corpus in its further pretraining. Just like its predecessor, Llama-2-Ko operates within the broad range of generative text models that stretch from 7 billion to 70 billion parameters. This repository focuses on the 7B pretrained version, which is tailored to fit the Hugging Face Transformers format. For access to the other models, feel free to consult the index provided below.
|
| 29 |
+
|
| 30 |
+
## Model Details
|
| 31 |
+
|
| 32 |
+
**Model Developers** Junbum Lee (Beomi)
|
| 33 |
+
|
| 34 |
+
**Variations** Llama-2-Ko will come in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.
|
| 35 |
+
|
| 36 |
+
**Input** Models input text only.
|
| 37 |
+
|
| 38 |
+
**Output** Models generate text only.
|
| 39 |
+
|
| 40 |
+
**Model Architecture**
|
| 41 |
+
|
| 42 |
+
Llama-2-Ko is an auto-regressive language model that uses an optimized transformer architecture based on Llama-2.
|
| 43 |
+
|
| 44 |
+
||Training Data|Params|Content Length|GQA|Tokens|LR|
|
| 45 |
+
|---|---|---|---|---|---|---|
|
| 46 |
+
|Llama 2|*A new mix of Korean online data*|7B|4k|✗|>40B*|1e<sup>-5</sup>|
|
| 47 |
+
*Plan to train upto 200B tokens
|
| 48 |
+
|
| 49 |
+
**Vocab Expansion**
|
| 50 |
+
|
| 51 |
+
| Model Name | Vocabulary Size | Description |
|
| 52 |
+
| --- | --- | --- |
|
| 53 |
+
| Original Llama-2 | 32000 | Sentencepiece BPE |
|
| 54 |
+
| **Expanded Llama-2-Ko** | 46336 | Sentencepiece BPE. Added Korean vocab and merges |
|
| 55 |
+
|
| 56 |
+
**Tokenizing ""안녕하세요, 오늘은 날씨가 좋네요.""**
|
| 57 |
+
|
| 58 |
+
| Model | Tokens |
|
| 59 |
+
| --- | --- |
|
| 60 |
+
| Llama-2 | `['▁', '안', '<0xEB>', '<0x85>', '<0x95>', '하', '세', '요', ',', '▁', '오', '<0xEB>', '<0x8A>', '<0x98>', '은', '▁', '<0xEB>', '<0x82>', '<0xA0>', '씨', '가', '▁', '<0xEC>', '<0xA2>', '<0x8B>', '<0xEB>', '<0x84>', '<0xA4>', '요']` |
|
| 61 |
+
| Llama-2-Ko | `['▁안녕', '하세요', ',', '▁오늘은', '▁날', '씨가', '▁좋네요']` |
|
| 62 |
+
|
| 63 |
+
**Tokenizing ""Llama 2: Open Foundation and Fine-Tuned Chat Models""**
|
| 64 |
+
|
| 65 |
+
| Model | Tokens |
|
| 66 |
+
| --- | --- |
|
| 67 |
+
| Llama-2 | `['▁L', 'l', 'ama', '▁', '2', ':', '▁Open', '▁Foundation', '▁and', '▁Fine', '-', 'T', 'un', 'ed', '▁Ch', 'at', '▁Mod', 'els']` |
|
| 68 |
+
| Llama-2-Ko | `['▁L', 'l', 'ama', '▁', '2', ':', '▁Open', '▁Foundation', '▁and', '▁Fine', '-', 'T', 'un', 'ed', '▁Ch', 'at', '▁Mod', 'els']` |
|
| 69 |
+
|
| 70 |
+
# **Model Benchmark**
|
| 71 |
+
|
| 72 |
+
## LM Eval Harness - Korean (polyglot branch)
|
| 73 |
+
|
| 74 |
+
- Used EleutherAI's lm-evaluation-harness https://github.com/EleutherAI/lm-evaluation-harness/tree/polyglot
|
| 75 |
+
|
| 76 |
+
### NSMC (Acc) - 50000 full test
|
| 77 |
+
|
| 78 |
+
TBD
|
| 79 |
+
|
| 80 |
+
### COPA (F1)
|
| 81 |
+
|
| 82 |
+
<img src=https://user-images.githubusercontent.com/11323660/255575809-c037bc6e-0566-436a-a6c1-2329ac92187a.png style=""max-width: 700px; width: 100%"" />
|
| 83 |
+
|
| 84 |
+
| Model | 0-shot | 5-shot | 10-shot | 50-shot |
|
| 85 |
+
| --- | --- | --- | --- | --- |
|
| 86 |
+
| https://huggingface.co/skt/ko-gpt-trinity-1.2B-v0.5 | 0.6696 | 0.6477 | 0.6419 | 0.6514 |
|
| 87 |
+
| https://huggingface.co/kakaobrain/kogpt | 0.7345 | 0.7287 | 0.7277 | 0.7479 |
|
| 88 |
+
| https://huggingface.co/facebook/xglm-7.5B | 0.6723 | 0.6731 | 0.6769 | 0.7119 |
|
| 89 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-1.3b | 0.7196 | 0.7193 | 0.7204 | 0.7206 |
|
| 90 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-3.8b | 0.7595 | 0.7608 | 0.7638 | 0.7788 |
|
| 91 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-5.8b | 0.7745 | 0.7676 | 0.7775 | 0.7887 |
|
| 92 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-12.8b | 0.7937 | 0.8108 | 0.8037 | 0.8369 |
|
| 93 |
+
| Llama-2 Original 7B* | 0.562033 | 0.575982 | 0.576216 | 0.595532 |
|
| 94 |
+
| Llama-2-Ko-7b 20B (10k) | 0.738780 | 0.762639 | 0.780761 | 0.797863 |
|
| 95 |
+
| Llama-2-Ko-7b 40B (20k) | 0.743630 | 0.792716 | 0.803746 | 0.825944 |
|
| 96 |
+
*Llama-2 Original 7B used https://huggingface.co/meta-llama/Llama-2-7b-hf (w/o tokenizer updated)
|
| 97 |
+
|
| 98 |
+
### HellaSwag (F1)
|
| 99 |
+
|
| 100 |
+
<img src=https://user-images.githubusercontent.com/11323660/255576090-a2bfc1ae-d117-44b7-9f7b-262e41179ec1.png style=""max-width: 700px; width: 100%"" />
|
| 101 |
+
|
| 102 |
+
| Model | 0-shot | 5-shot | 10-shot | 50-shot |
|
| 103 |
+
| --- | --- | --- | --- | --- |
|
| 104 |
+
| https://huggingface.co/skt/ko-gpt-trinity-1.2B-v0.5 | 0.5243 | 0.5272 | 0.5166 | 0.5352 |
|
| 105 |
+
| https://huggingface.co/kakaobrain/kogpt | 0.5590 | 0.5833 | 0.5828 | 0.5907 |
|
| 106 |
+
| https://huggingface.co/facebook/xglm-7.5B | 0.5665 | 0.5689 | 0.5565 | 0.5622 |
|
| 107 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-1.3b | 0.5247 | 0.5260 | 0.5278 | 0.5427 |
|
| 108 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-3.8b | 0.5707 | 0.5830 | 0.5670 | 0.5787 |
|
| 109 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-5.8b | 0.5976 | 0.5998 | 0.5979 | 0.6208 |
|
| 110 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-12.8b | 0.5954 | 0.6306 | 0.6098 | 0.6118 |
|
| 111 |
+
| Llama-2 Original 7B* | 0.415390 | 0.431382 | 0.421342 | 0.442003 |
|
| 112 |
+
| Llama-2-Ko-7b 20B (10k) | 0.451757 | 0.466751 | 0.472607 | 0.482776 |
|
| 113 |
+
| Llama-2-Ko-7b 40B (20k) | 0.456246 | 0.465665 | 0.469810 | 0.477374 |
|
| 114 |
+
*Llama-2 Original 7B used https://huggingface.co/meta-llama/Llama-2-7b-hf (w/o tokenizer updated)
|
| 115 |
+
|
| 116 |
+
### BoolQ (F1)
|
| 117 |
+
|
| 118 |
+
<img src=https://user-images.githubusercontent.com/11323660/255576343-5d847a6f-3b6a-41a7-af37-0f11940a5ea4.png style=""max-width: 700px; width: 100%"" />
|
| 119 |
+
|
| 120 |
+
| Model | 0-shot | 5-shot | 10-shot | 50-shot |
|
| 121 |
+
| --- | --- | --- | --- | --- |
|
| 122 |
+
| https://huggingface.co/skt/ko-gpt-trinity-1.2B-v0.5 | 0.3356 | 0.4014 | 0.3640 | 0.3560 |
|
| 123 |
+
| https://huggingface.co/kakaobrain/kogpt | 0.4514 | 0.5981 | 0.5499 | 0.5202 |
|
| 124 |
+
| https://huggingface.co/facebook/xglm-7.5B | 0.4464 | 0.3324 | 0.3324 | 0.3324 |
|
| 125 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-1.3b | 0.3552 | 0.4751 | 0.4109 | 0.4038 |
|
| 126 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-3.8b | 0.4320 | 0.5263 | 0.4930 | 0.4038 |
|
| 127 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-5.8b | 0.4356 | 0.5698 | 0.5187 | 0.5236 |
|
| 128 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-12.8b | 0.4818 | 0.6041 | 0.6289 | 0.6448 |
|
| 129 |
+
| Llama-2 Original 7B* | 0.352050 | 0.563238 | 0.474788 | 0.419222 |
|
| 130 |
+
| Llama-2-Ko-7b 20B (10k) | 0.360656 | 0.679743 | 0.680109 | 0.662152 |
|
| 131 |
+
| Llama-2-Ko-7b 40B (20k) | 0.578640 | 0.697747 | 0.708358 | 0.714423 |
|
| 132 |
+
*Llama-2 Original 7B used https://huggingface.co/meta-llama/Llama-2-7b-hf (w/o tokenizer updated)
|
| 133 |
+
|
| 134 |
+
### SentiNeg (F1)
|
| 135 |
+
|
| 136 |
+
<img src=https://user-images.githubusercontent.com/11323660/255576572-b005a81d-fa4d-4709-b48a-f0fe4eed17a3.png style=""max-width: 700px; width: 100%"" />
|
| 137 |
+
|
| 138 |
+
| Model | 0-shot | 5-shot | 10-shot | 50-shot |
|
| 139 |
+
| --- | --- | --- | --- | --- |
|
| 140 |
+
| https://huggingface.co/skt/ko-gpt-trinity-1.2B-v0.5 | 0.6065 | 0.6878 | 0.7280 | 0.8413 |
|
| 141 |
+
| https://huggingface.co/kakaobrain/kogpt | 0.3747 | 0.8942 | 0.9294 | 0.9698 |
|
| 142 |
+
| https://huggingface.co/facebook/xglm-7.5B | 0.3578 | 0.4471 | 0.3964 | 0.5271 |
|
| 143 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-1.3b | 0.6790 | 0.6257 | 0.5514 | 0.7851 |
|
| 144 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-3.8b | 0.4858 | 0.7950 | 0.7320 | 0.7851 |
|
| 145 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-5.8b | 0.3394 | 0.8841 | 0.8808 | 0.9521 |
|
| 146 |
+
| https://huggingface.co/EleutherAI/polyglot-ko-12.8b | 0.9117 | 0.9015 | 0.9345 | 0.9723 |
|
| 147 |
+
| Llama-2 Original 7B* | 0.347502 | 0.529124 | 0.480641 | 0.788457 |
|
| 148 |
+
| Llama-2-Ko-7b 20B (10k) | 0.485546 | 0.829503 | 0.871141 | 0.851253 |
|
| 149 |
+
| Llama-2-Ko-7b 40B (20k) | 0.459447 | 0.761079 | 0.727611 | 0.936988 |
|
| 150 |
+
*Llama-2 Original 7B used https://huggingface.co/meta-llama/Llama-2-7b-hf (w/o tokenizer updated)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
## Note for oobabooga/text-generation-webui
|
| 154 |
+
|
| 155 |
+
Remove `ValueError` at `load_tokenizer` function(line 109 or near), in `modules/models.py`.
|
| 156 |
+
|
| 157 |
+
```python
|
| 158 |
+
diff --git a/modules/models.py b/modules/models.py
|
| 159 |
+
index 232d5fa..de5b7a0 100644
|
| 160 |
+
--- a/modules/models.py
|
| 161 |
+
+++ b/modules/models.py
|
| 162 |
+
@@ -106,7 +106,7 @@ def load_tokenizer(model_name, model):
|
| 163 |
+
trust_remote_code=shared.args.trust_remote_code,
|
| 164 |
+
use_fast=False
|
| 165 |
+
)
|
| 166 |
+
- except ValueError:
|
| 167 |
+
+ except:
|
| 168 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 169 |
+
path_to_model,
|
| 170 |
+
trust_remote_code=shared.args.trust_remote_code,
|
| 171 |
+
```
|
| 172 |
+
|
| 173 |
+
Since Llama-2-Ko uses FastTokenizer provided by HF tokenizers NOT sentencepiece package,
|
| 174 |
+
it is required to use `use_fast=True` option when initialize tokenizer.
|
| 175 |
+
|
| 176 |
+
Apple Sillicon does not support BF16 computing, use CPU instead. (BF16 is supported when using NVIDIA GPU)
|
| 177 |
+
|
| 178 |
+
## Citation
|
| 179 |
+
|
| 180 |
+
```
|
| 181 |
+
@misc {l._junbum_2023,
|
| 182 |
+
author = { {L. Junbum} },
|
| 183 |
+
title = { llama-2-ko-7b (Revision 4a9993e) },
|
| 184 |
+
year = 2023,
|
| 185 |
+
url = { https://huggingface.co/beomi/llama-2-ko-7b },
|
| 186 |
+
doi = { 10.57967/hf/1098 },
|
| 187 |
+
publisher = { Hugging Face }
|
| 188 |
+
}
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
## Acknowledgement
|
| 192 |
+
|
| 193 |
+
The training is supported by [TPU Research Cloud](https://sites.research.google/trc/) program.
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
| 197 |
+
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_beomi__llama-2-ko-7b)
|
| 198 |
+
|
| 199 |
+
| Metric | Value |
|
| 200 |
+
|-----------------------|---------------------------|
|
| 201 |
+
| Avg. | 39.43 |
|
| 202 |
+
| ARC (25-shot) | 48.46 |
|
| 203 |
+
| HellaSwag (10-shot) | 75.28 |
|
| 204 |
+
| MMLU (5-shot) | 39.56 |
|
| 205 |
+
| TruthfulQA (0-shot) | 34.49 |
|
| 206 |
+
| Winogrande (5-shot) | 72.14 |
|
| 207 |
+
| GSM8K (5-shot) | 1.97 |
|
| 208 |
+
| DROP (3-shot) | 4.1 |
|
| 209 |
+
","{""id"": ""beomi/llama-2-ko-7b"", ""author"": ""beomi"", ""sha"": ""4709f8cd6074590ed591e2fdf75499ae76ac4064"", ""last_modified"": ""2023-12-27 02:04:01+00:00"", ""created_at"": ""2023-07-20 03:25:25+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5089, ""downloads_all_time"": null, ""likes"": 175, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""llama"", ""text-generation"", ""facebook"", ""meta"", ""llama-2"", ""kollama"", ""llama-2-ko"", ""en"", ""ko"", ""doi:10.57967/hf/1098"", ""autotrain_compatible"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- ko\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\n- kollama\n- llama-2-ko\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00008-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00009-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00010-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00011-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00012-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00013-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00014-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00015-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""gsaivinay/open_llm_leaderboard"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""b1sheng/kg_llm_leaderboard_test"", ""neubla/neubla-llm-evaluation-board"", ""JUNGU/OpenELM-270M_chat"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard_two"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""kbmlcoding/open_llm_leaderboard_free"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""tamang0000/assamese-tokenizer-comparison"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""mjalg/IFEvalTR""], ""safetensors"": {""parameters"": {""BF16"": 6855856128, ""F32"": 2048}, ""total"": 6855858176}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-27 02:04:01+00:00"", ""cardData"": ""language:\n- en\n- ko\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\n- kollama\n- llama-2-ko\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64b8a9256b5ee8c3885bb2c4"", ""modelId"": ""beomi/llama-2-ko-7b"", ""usedStorage"": 137118440803}",0,"https://huggingface.co/illuni/illuni-llama-2-ko-7b, https://huggingface.co/54data/llama_2_ko_7b_wiki_QA, https://huggingface.co/54data/Llama2-ko-7b-finetune-v2, https://huggingface.co/nayohan/llama-2-ko-7b-Inst, https://huggingface.co/kim1/test_llama_2_ko_2, https://huggingface.co/kim1/test_llama_2_ko_3, https://huggingface.co/kim1/test_llama_2_ko_class_kosha_1, https://huggingface.co/kim1/test_llama_2_ko_class_kosha_2, https://huggingface.co/kody0525/KOpen-platypus-llama-2-ko-7b, https://huggingface.co/ccw7463/llama-ko-7b_translator_ver_0.1",10,"https://huggingface.co/yejeekang/legal-llama-ko-7b-50step, https://huggingface.co/gangkongkong/llama-2-ko-7b-gangkk-alpaca-all-epoch1-merge, https://huggingface.co/gangkongkong/llama-2-ko-7b-gangkk-alpaca-all-epoch3-nomerge, https://huggingface.co/gangkongkong/llama-2-ko-7b-gangkk-alpaca-cosin-all-epoch1-nomerge, https://huggingface.co/sanmaro6803/llama2-ko-7b-ds-qlora-sft-qi2, https://huggingface.co/seyeon-shijuan/KoAlpaca-llama-2-7b-adapter-cosmetic, https://huggingface.co/oosij/llama-2-ko-7b-ft-emo-single_json, https://huggingface.co/oosij/llama-2-ko-7b-ft-emo-multi, https://huggingface.co/Ash-Hun/WelSSiSKo_v3_llama-2-ko-base_text-generation, https://huggingface.co/oosij/llama2-ko-7b-3task, https://huggingface.co/howdi2000/may, https://huggingface.co/bambaram/llama-ko-4-journal-finetue, https://huggingface.co/howdi2000/may_v3, https://huggingface.co/Nada81/FineTunedllema_nada, https://huggingface.co/choiss7/msg-platfm, https://huggingface.co/Toastmachine/Orsay_ko_7b, https://huggingface.co/olzlt/results, https://huggingface.co/arcos02/llama2ko, https://huggingface.co/arcos02/llama2ko_def",19,"https://huggingface.co/geonheechoi22/llama-2-ko-7b-Q4_K_M-GGUF, https://huggingface.co/thos0412/llama-2-ko-7b-Q4_K_M-GGUF, https://huggingface.co/tensorblock/llama-2-ko-7b-GGUF",3,https://huggingface.co/LoudAI/kubwa-7b-josh,1,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, Vikhrmodels/small-shlepa-lb, b1sheng/kg_llm_leaderboard_test, felixz/open_llm_leaderboard, gsaivinay/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=beomi/llama-2-ko-7b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bbeomi%2Fllama-2-ko-7b%5D(%2Fbeomi%2Fllama-2-ko-7b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb, neubla/neubla-llm-evaluation-board, tamang0000/assamese-tokenizer-comparison",13
|
| 210 |
+
illuni/illuni-llama-2-ko-7b,"---
|
| 211 |
+
language:
|
| 212 |
+
- ko
|
| 213 |
+
base_model: beomi/llama-2-ko-7b
|
| 214 |
+
license: mit
|
| 215 |
+
pipeline_tag: question-answering
|
| 216 |
+
tags:
|
| 217 |
+
- instruct
|
| 218 |
+
- instruction
|
| 219 |
+
- llama-2
|
| 220 |
+
---
|
| 221 |
+
|
| 222 |
+
# llama2-7b
|
| 223 |
+
|
| 224 |
+
### Model Details
|
| 225 |
+
- Developed by: Julleong
|
| 226 |
+
- Backbone Model: beomi/llama-2-ko-7b
|
| 227 |
+
- Library: [transformers](https://github.com/huggingface/transformers)
|
| 228 |
+
|
| 229 |
+
### Used Datasets
|
| 230 |
+
- 개체명 사전 2022(모두의 말뭉치)
|
| 231 |
+
|
| 232 |
+
### Prompt Template
|
| 233 |
+
```
|
| 234 |
+
<usr>
|
| 235 |
+
{Instruction}
|
| 236 |
+
|
| 237 |
+
<bot>
|
| 238 |
+
{Answer}
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
### License
|
| 242 |
+
- MIT","{""id"": ""illuni/illuni-llama-2-ko-7b"", ""author"": ""illuni"", ""sha"": ""104fac91a859164fd379c96814788090bbe22e76"", ""last_modified"": ""2024-03-07 08:19:25+00:00"", ""created_at"": ""2024-02-29 03:22:31+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 65, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""instruct"", ""instruction"", ""llama-2"", ""question-answering"", ""ko"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\nlanguage:\n- ko\nlicense: mit\npipeline_tag: question-answering\ntags:\n- instruct\n- instruction\n- llama-2"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""</s>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 6855856128}, ""total"": 6855856128}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-07 08:19:25+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\nlanguage:\n- ko\nlicense: mit\npipeline_tag: question-answering\ntags:\n- instruct\n- instruction\n- llama-2"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65dff87760aae5fc119f9381"", ""modelId"": ""illuni/illuni-llama-2-ko-7b"", ""usedStorage"": 13711745792}",1,,0,,0,"https://huggingface.co/tensorblock/illuni-llama-2-ko-7b-GGUF, https://huggingface.co/mradermacher/illuni-llama-2-ko-7b-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=illuni/illuni-llama-2-ko-7b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Billuni%2Filluni-llama-2-ko-7b%5D(%2Filluni%2Filluni-llama-2-ko-7b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 243 |
+
54data/llama_2_ko_7b_wiki_QA,"---
|
| 244 |
+
base_model: beomi/llama-2-ko-7b
|
| 245 |
+
tags:
|
| 246 |
+
- generated_from_trainer
|
| 247 |
+
model-index:
|
| 248 |
+
- name: llama_2_ko_7b_wiki_QA
|
| 249 |
+
results: []
|
| 250 |
+
---
|
| 251 |
+
|
| 252 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 253 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 254 |
+
|
| 255 |
+
# llama_2_ko_7b_wiki_QA
|
| 256 |
+
|
| 257 |
+
This model is a fine-tuned version of [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) on an unknown dataset.
|
| 258 |
+
It achieves the following results on the evaluation set:
|
| 259 |
+
- Loss: 1.1703
|
| 260 |
+
|
| 261 |
+
## Model description
|
| 262 |
+
|
| 263 |
+
More information needed
|
| 264 |
+
|
| 265 |
+
## Intended uses & limitations
|
| 266 |
+
|
| 267 |
+
More information needed
|
| 268 |
+
|
| 269 |
+
## Training and evaluation data
|
| 270 |
+
|
| 271 |
+
More information needed
|
| 272 |
+
|
| 273 |
+
## Training procedure
|
| 274 |
+
|
| 275 |
+
### Training hyperparameters
|
| 276 |
+
|
| 277 |
+
The following hyperparameters were used during training:
|
| 278 |
+
- learning_rate: 0.0002
|
| 279 |
+
- train_batch_size: 4
|
| 280 |
+
- eval_batch_size: 8
|
| 281 |
+
- seed: 42
|
| 282 |
+
- gradient_accumulation_steps: 2
|
| 283 |
+
- total_train_batch_size: 8
|
| 284 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 285 |
+
- lr_scheduler_type: linear
|
| 286 |
+
- lr_scheduler_warmup_steps: 100
|
| 287 |
+
- training_steps: 300
|
| 288 |
+
|
| 289 |
+
### Training results
|
| 290 |
+
|
| 291 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 292 |
+
|:-------------:|:-----:|:----:|:---------------:|
|
| 293 |
+
| 1.8568 | 0.33 | 50 | 1.4294 |
|
| 294 |
+
| 1.2307 | 0.67 | 100 | 1.2169 |
|
| 295 |
+
| 1.1788 | 1.0 | 150 | 1.1865 |
|
| 296 |
+
| 1.0837 | 1.33 | 200 | 1.1810 |
|
| 297 |
+
| 1.1905 | 1.67 | 250 | 1.1740 |
|
| 298 |
+
| 1.161 | 2.0 | 300 | 1.1703 |
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
### Framework versions
|
| 302 |
+
|
| 303 |
+
- Transformers 4.33.0.dev0
|
| 304 |
+
- Pytorch 2.0.0+cu117
|
| 305 |
+
- Datasets 2.10.1
|
| 306 |
+
- Tokenizers 0.13.3
|
| 307 |
+
","{""id"": ""54data/llama_2_ko_7b_wiki_QA"", ""author"": ""54data"", ""sha"": ""84906d1ac920a1bf448f827d037a0fb6687d5581"", ""last_modified"": ""2023-08-31 05:00:25+00:00"", ""created_at"": ""2023-08-23 15:46:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama_2_ko_7b_wiki_QA\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""llama_2_ko_7b_wiki_QA"", ""results"": []}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-08-31 05:00:25+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama_2_ko_7b_wiki_QA\n results: []"", ""transformersInfo"": null, ""_id"": ""64e629e67372359023fcc52a"", ""modelId"": ""54data/llama_2_ko_7b_wiki_QA"", ""usedStorage"": 134591966}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=54data/llama_2_ko_7b_wiki_QA&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5B54data%2Fllama_2_ko_7b_wiki_QA%5D(%2F54data%2Fllama_2_ko_7b_wiki_QA)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 308 |
+
54data/Llama2-ko-7b-finetune-v2,"---
|
| 309 |
+
base_model: beomi/llama-2-ko-7b
|
| 310 |
+
tags:
|
| 311 |
+
- generated_from_trainer
|
| 312 |
+
model-index:
|
| 313 |
+
- name: Llama2-ko-7b-finetune-v2
|
| 314 |
+
results: []
|
| 315 |
+
---
|
| 316 |
+
|
| 317 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 318 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 319 |
+
|
| 320 |
+
# Llama2-ko-7b-finetune-v2
|
| 321 |
+
|
| 322 |
+
This model is a fine-tuned version of [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) on an unknown dataset.
|
| 323 |
+
It achieves the following results on the evaluation set:
|
| 324 |
+
- Loss: 1.5327
|
| 325 |
+
|
| 326 |
+
## Model description
|
| 327 |
+
|
| 328 |
+
More information needed
|
| 329 |
+
|
| 330 |
+
## Intended uses & limitations
|
| 331 |
+
|
| 332 |
+
More information needed
|
| 333 |
+
|
| 334 |
+
## Training and evaluation data
|
| 335 |
+
|
| 336 |
+
More information needed
|
| 337 |
+
|
| 338 |
+
## Training procedure
|
| 339 |
+
|
| 340 |
+
### Training hyperparameters
|
| 341 |
+
|
| 342 |
+
The following hyperparameters were used during training:
|
| 343 |
+
- learning_rate: 0.0003
|
| 344 |
+
- train_batch_size: 4
|
| 345 |
+
- eval_batch_size: 8
|
| 346 |
+
- seed: 42
|
| 347 |
+
- gradient_accumulation_steps: 4
|
| 348 |
+
- total_train_batch_size: 16
|
| 349 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 350 |
+
- lr_scheduler_type: linear
|
| 351 |
+
- lr_scheduler_warmup_steps: 100
|
| 352 |
+
- training_steps: 300
|
| 353 |
+
|
| 354 |
+
### Training results
|
| 355 |
+
|
| 356 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 357 |
+
|:-------------:|:-----:|:----:|:---------------:|
|
| 358 |
+
| 1.4901 | 0.22 | 100 | 1.5910 |
|
| 359 |
+
| 1.5736 | 0.44 | 200 | 1.5476 |
|
| 360 |
+
| 1.4611 | 0.66 | 300 | 1.5327 |
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
### Framework versions
|
| 364 |
+
|
| 365 |
+
- Transformers 4.34.0.dev0
|
| 366 |
+
- Pytorch 2.0.0+cu117
|
| 367 |
+
- Datasets 2.10.1
|
| 368 |
+
- Tokenizers 0.13.3
|
| 369 |
+
","{""id"": ""54data/Llama2-ko-7b-finetune-v2"", ""author"": ""54data"", ""sha"": ""45897bb0c854c15a6c6c66f9e6aac6193d0e9831"", ""last_modified"": ""2023-09-05 08:31:15+00:00"", ""created_at"": ""2023-09-05 04:44:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: Llama2-ko-7b-finetune-v2\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""Llama2-ko-7b-finetune-v2"", ""results"": []}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-05 08:31:15+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: Llama2-ko-7b-finetune-v2\n results: []"", ""transformersInfo"": null, ""_id"": ""64f6b22ec3bdaab6f590a6c1"", ""modelId"": ""54data/Llama2-ko-7b-finetune-v2"", ""usedStorage"": 50472994}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=54data/Llama2-ko-7b-finetune-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5B54data%2FLlama2-ko-7b-finetune-v2%5D(%2F54data%2FLlama2-ko-7b-finetune-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 370 |
+
nayohan/llama-2-ko-7b-Inst,"---
|
| 371 |
+
license: apache-2.0
|
| 372 |
+
datasets:
|
| 373 |
+
- DILAB-HYU/KoQuality
|
| 374 |
+
language:
|
| 375 |
+
- ko
|
| 376 |
+
pipeline_tag: text-generation
|
| 377 |
+
tags:
|
| 378 |
+
- llama-2-ko
|
| 379 |
+
- KoQuality
|
| 380 |
+
base_model: beomi/llama-2-ko-7b
|
| 381 |
+
---
|
| 382 |
+
|
| 383 |
+
This model is a instruct-tuned llama-2-ko-7b model, using only 10% of [Kullm, OIG, KoAlpaca] Instruction dataset.
|
| 384 |
+
len10_k100_mppl_n0.1.json -> 121step
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
## Training hyperparameters
|
| 388 |
+
- learning_rate: 5e-5
|
| 389 |
+
- train_batch_size: 1
|
| 390 |
+
- seed: 42
|
| 391 |
+
- distributed_type: multi-GPU (A30 24G) + CPU Offloading
|
| 392 |
+
- num_devices: 2
|
| 393 |
+
- gradient_accumulation_steps: 32
|
| 394 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 395 |
+
- lr_scheduler_type: linear
|
| 396 |
+
- num_epochs: 2.0
|
| 397 |
+
|
| 398 |
+
## Framework versions
|
| 399 |
+
- Transformers 4.30.2
|
| 400 |
+
- Pytorch 2.0.1+cu117
|
| 401 |
+
- Datasets 2.11.0
|
| 402 |
+
- deepspeed 0.9.5","{""id"": ""nayohan/llama-2-ko-7b-Inst"", ""author"": ""nayohan"", ""sha"": ""6d4b2a4bc363d79aa03edc287f8921dc1056262f"", ""last_modified"": ""2023-10-26 10:44:28+00:00"", ""created_at"": ""2023-10-25 04:31:11+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""llama-2-ko"", ""KoQuality"", ""ko"", ""dataset:DILAB-HYU/KoQuality"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\ndatasets:\n- DILAB-HYU/KoQuality\nlanguage:\n- ko\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- llama-2-ko\n- KoQuality"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-26 10:44:28+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\ndatasets:\n- DILAB-HYU/KoQuality\nlanguage:\n- ko\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- llama-2-ko\n- KoQuality"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65389a0fad8f7c2cbe828f54"", ""modelId"": ""nayohan/llama-2-ko-7b-Inst"", ""usedStorage"": 27423544865}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=nayohan/llama-2-ko-7b-Inst&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnayohan%2Fllama-2-ko-7b-Inst%5D(%2Fnayohan%2Fllama-2-ko-7b-Inst)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 403 |
+
kim1/test_llama_2_ko_2,"---
|
| 404 |
+
base_model: beomi/llama-2-ko-7b
|
| 405 |
+
tags:
|
| 406 |
+
- generated_from_trainer
|
| 407 |
+
model-index:
|
| 408 |
+
- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_Feb_7th
|
| 409 |
+
results: []
|
| 410 |
+
---
|
| 411 |
+
|
| 412 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 413 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 414 |
+
|
| 415 |
+
# llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_Feb_7th
|
| 416 |
+
|
| 417 |
+
This model is a fine-tuned version of [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) on an unknown dataset.
|
| 418 |
+
|
| 419 |
+
## Model description
|
| 420 |
+
|
| 421 |
+
More information needed
|
| 422 |
+
|
| 423 |
+
## Intended uses & limitations
|
| 424 |
+
|
| 425 |
+
More information needed
|
| 426 |
+
|
| 427 |
+
## Training and evaluation data
|
| 428 |
+
|
| 429 |
+
More information needed
|
| 430 |
+
|
| 431 |
+
## Training procedure
|
| 432 |
+
|
| 433 |
+
### Training hyperparameters
|
| 434 |
+
|
| 435 |
+
The following hyperparameters were used during training:
|
| 436 |
+
- learning_rate: 2e-05
|
| 437 |
+
- train_batch_size: 1
|
| 438 |
+
- eval_batch_size: 8
|
| 439 |
+
- seed: 42
|
| 440 |
+
- gradient_accumulation_steps: 32
|
| 441 |
+
- total_train_batch_size: 32
|
| 442 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 443 |
+
- lr_scheduler_type: linear
|
| 444 |
+
- num_epochs: 30.0
|
| 445 |
+
|
| 446 |
+
### Training results
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
### Framework versions
|
| 451 |
+
|
| 452 |
+
- Transformers 4.33.3
|
| 453 |
+
- Pytorch 2.2.0+cu121
|
| 454 |
+
- Datasets 2.16.0
|
| 455 |
+
- Tokenizers 0.13.3
|
| 456 |
+
","{""id"": ""kim1/test_llama_2_ko_2"", ""author"": ""kim1"", ""sha"": ""a8f33ec35fd7ce60eefbf055e6d233ef71d2b920"", ""last_modified"": ""2024-02-08 01:45:26+00:00"", ""created_at"": ""2024-02-08 01:21:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""generated_from_trainer"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_Feb_7th\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_Feb_7th"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-08 01:45:26+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_Feb_7th\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65c42ca07b72ab4d7b88bbbe"", ""modelId"": ""kim1/test_llama_2_ko_2"", ""usedStorage"": 13711821615}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kim1/test_llama_2_ko_2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkim1%2Ftest_llama_2_ko_2%5D(%2Fkim1%2Ftest_llama_2_ko_2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 457 |
+
kim1/test_llama_2_ko_3,"---
|
| 458 |
+
base_model: beomi/llama-2-ko-7b
|
| 459 |
+
tags:
|
| 460 |
+
- generated_from_trainer
|
| 461 |
+
model-index:
|
| 462 |
+
- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_plus_Feb_14th
|
| 463 |
+
results: []
|
| 464 |
+
---
|
| 465 |
+
|
| 466 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 467 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 468 |
+
|
| 469 |
+
# llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_plus_Feb_14th
|
| 470 |
+
|
| 471 |
+
This model is a fine-tuned version of [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) on an unknown dataset.
|
| 472 |
+
|
| 473 |
+
## Model description
|
| 474 |
+
|
| 475 |
+
More information needed
|
| 476 |
+
|
| 477 |
+
## Intended uses & limitations
|
| 478 |
+
|
| 479 |
+
More information needed
|
| 480 |
+
|
| 481 |
+
## Training and evaluation data
|
| 482 |
+
|
| 483 |
+
More information needed
|
| 484 |
+
|
| 485 |
+
## Training procedure
|
| 486 |
+
|
| 487 |
+
### Training hyperparameters
|
| 488 |
+
|
| 489 |
+
The following hyperparameters were used during training:
|
| 490 |
+
- learning_rate: 2e-05
|
| 491 |
+
- train_batch_size: 1
|
| 492 |
+
- eval_batch_size: 8
|
| 493 |
+
- seed: 42
|
| 494 |
+
- gradient_accumulation_steps: 16
|
| 495 |
+
- total_train_batch_size: 16
|
| 496 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 497 |
+
- lr_scheduler_type: linear
|
| 498 |
+
- num_epochs: 30.0
|
| 499 |
+
|
| 500 |
+
### Training results
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
### Framework versions
|
| 505 |
+
|
| 506 |
+
- Transformers 4.33.3
|
| 507 |
+
- Pytorch 2.2.0+cu121
|
| 508 |
+
- Datasets 2.16.0
|
| 509 |
+
- Tokenizers 0.13.3
|
| 510 |
+
","{""id"": ""kim1/test_llama_2_ko_3"", ""author"": ""kim1"", ""sha"": ""827a50c09a37fcca9f7fae11df6de0de0c13bf6f"", ""last_modified"": ""2024-02-15 00:57:29+00:00"", ""created_at"": ""2024-02-15 00:34:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""generated_from_trainer"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_plus_Feb_14th\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_plus_Feb_14th"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-15 00:57:29+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_all_data_test_1_1_plus_Feb_14th\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65cd5c0c0cdcf1ddd5128375"", ""modelId"": ""kim1/test_llama_2_ko_3"", ""usedStorage"": 13711821615}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kim1/test_llama_2_ko_3&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkim1%2Ftest_llama_2_ko_3%5D(%2Fkim1%2Ftest_llama_2_ko_3)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 511 |
+
kim1/test_llama_2_ko_class_kosha_1,"---
|
| 512 |
+
base_model: beomi/llama-2-ko-7b
|
| 513 |
+
tags:
|
| 514 |
+
- generated_from_trainer
|
| 515 |
+
model-index:
|
| 516 |
+
- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_19th
|
| 517 |
+
results: []
|
| 518 |
+
---
|
| 519 |
+
|
| 520 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 521 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 522 |
+
|
| 523 |
+
# llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_19th
|
| 524 |
+
|
| 525 |
+
This model is a fine-tuned version of [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) on an unknown dataset.
|
| 526 |
+
|
| 527 |
+
## Model description
|
| 528 |
+
|
| 529 |
+
More information needed
|
| 530 |
+
|
| 531 |
+
## Intended uses & limitations
|
| 532 |
+
|
| 533 |
+
More information needed
|
| 534 |
+
|
| 535 |
+
## Training and evaluation data
|
| 536 |
+
|
| 537 |
+
More information needed
|
| 538 |
+
|
| 539 |
+
## Training procedure
|
| 540 |
+
|
| 541 |
+
### Training hyperparameters
|
| 542 |
+
|
| 543 |
+
The following hyperparameters were used during training:
|
| 544 |
+
- learning_rate: 2e-05
|
| 545 |
+
- train_batch_size: 1
|
| 546 |
+
- eval_batch_size: 8
|
| 547 |
+
- seed: 42
|
| 548 |
+
- gradient_accumulation_steps: 16
|
| 549 |
+
- total_train_batch_size: 16
|
| 550 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 551 |
+
- lr_scheduler_type: linear
|
| 552 |
+
- num_epochs: 30.0
|
| 553 |
+
|
| 554 |
+
### Training results
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
### Framework versions
|
| 559 |
+
|
| 560 |
+
- Transformers 4.33.3
|
| 561 |
+
- Pytorch 2.2.0+cu121
|
| 562 |
+
- Datasets 2.16.0
|
| 563 |
+
- Tokenizers 0.13.3
|
| 564 |
+
","{""id"": ""kim1/test_llama_2_ko_class_kosha_1"", ""author"": ""kim1"", ""sha"": ""c84e481f6a81c6cdc6beb60ad227789bfd2f614c"", ""last_modified"": ""2024-02-20 00:46:39+00:00"", ""created_at"": ""2024-02-20 00:23:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""generated_from_trainer"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_19th\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_19th"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-20 00:46:39+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_32_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_19th\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65d3f11bede228eb4149138f"", ""modelId"": ""kim1/test_llama_2_ko_class_kosha_1"", ""usedStorage"": 13711821679}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kim1/test_llama_2_ko_class_kosha_1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkim1%2Ftest_llama_2_ko_class_kosha_1%5D(%2Fkim1%2Ftest_llama_2_ko_class_kosha_1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 565 |
+
kim1/test_llama_2_ko_class_kosha_2,"---
|
| 566 |
+
base_model: beomi/llama-2-ko-7b
|
| 567 |
+
tags:
|
| 568 |
+
- generated_from_trainer
|
| 569 |
+
model-index:
|
| 570 |
+
- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_16_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_20th_retry
|
| 571 |
+
results: []
|
| 572 |
+
---
|
| 573 |
+
|
| 574 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 575 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 576 |
+
|
| 577 |
+
# llama-2-ko-7b-v1.1b-singlegpu_gradient_16_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_20th_retry
|
| 578 |
+
|
| 579 |
+
This model is a fine-tuned version of [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) on an unknown dataset.
|
| 580 |
+
|
| 581 |
+
## Model description
|
| 582 |
+
|
| 583 |
+
More information needed
|
| 584 |
+
|
| 585 |
+
## Intended uses & limitations
|
| 586 |
+
|
| 587 |
+
More information needed
|
| 588 |
+
|
| 589 |
+
## Training and evaluation data
|
| 590 |
+
|
| 591 |
+
More information needed
|
| 592 |
+
|
| 593 |
+
## Training procedure
|
| 594 |
+
|
| 595 |
+
### Training hyperparameters
|
| 596 |
+
|
| 597 |
+
The following hyperparameters were used during training:
|
| 598 |
+
- learning_rate: 2e-05
|
| 599 |
+
- train_batch_size: 1
|
| 600 |
+
- eval_batch_size: 8
|
| 601 |
+
- seed: 42
|
| 602 |
+
- gradient_accumulation_steps: 16
|
| 603 |
+
- total_train_batch_size: 16
|
| 604 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 605 |
+
- lr_scheduler_type: linear
|
| 606 |
+
- num_epochs: 30.0
|
| 607 |
+
|
| 608 |
+
### Training results
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
### Framework versions
|
| 613 |
+
|
| 614 |
+
- Transformers 4.33.3
|
| 615 |
+
- Pytorch 2.2.0+cu121
|
| 616 |
+
- Datasets 2.16.0
|
| 617 |
+
- Tokenizers 0.13.3
|
| 618 |
+
","{""id"": ""kim1/test_llama_2_ko_class_kosha_2"", ""author"": ""kim1"", ""sha"": ""d7a5aa8d3be133da449b60f69cc05c31eb6a7f4d"", ""last_modified"": ""2024-02-21 00:27:37+00:00"", ""created_at"": ""2024-02-21 00:04:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""generated_from_trainer"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_16_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_20th_retry\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""llama-2-ko-7b-v1.1b-singlegpu_gradient_16_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_20th_retry"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='all_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-21 00:27:37+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\ntags:\n- generated_from_trainer\nmodel-index:\n- name: llama-2-ko-7b-v1.1b-singlegpu_gradient_16_epoch_30_train_batch_size_1_df_mongodb_terminology_terms_kosha_1_1_Feb_20th_retry\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65d53e00be26e8e084efbcf0"", ""modelId"": ""kim1/test_llama_2_ko_class_kosha_2"", ""usedStorage"": 13711821679}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kim1/test_llama_2_ko_class_kosha_2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkim1%2Ftest_llama_2_ko_class_kosha_2%5D(%2Fkim1%2Ftest_llama_2_ko_class_kosha_2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
|
| 619 |
+
kody0525/KOpen-platypus-llama-2-ko-7b,"---
|
| 620 |
+
license: apache-2.0
|
| 621 |
+
language:
|
| 622 |
+
- en
|
| 623 |
+
tags:
|
| 624 |
+
- llama-2-ko-7b
|
| 625 |
+
- KOpen-platypus
|
| 626 |
+
pipeline_tag: text-generation
|
| 627 |
+
datasets:
|
| 628 |
+
- kyujinpy/KOpen-platypus
|
| 629 |
+
base_model: beomi/llama-2-ko-7b
|
| 630 |
+
model-index:
|
| 631 |
+
- name: KOpen-platypus-llama-2-ko-7b
|
| 632 |
+
results: []
|
| 633 |
+
---
|
| 634 |
+
|
| 635 |
+
Update @ 2024.03.20
|
| 636 |
+
|
| 637 |
+
# KOpen-platypus-llama-2-ko-7b
|
| 638 |
+
|
| 639 |
+
This model is a fine-tuned version of beomi/llama-2-ko-7b","{""id"": ""kody0525/KOpen-platypus-llama-2-ko-7b"", ""author"": ""kody0525"", ""sha"": ""d782a6ba87761ae87c3e93d3f56dc734ca314f27"", ""last_modified"": ""2024-03-20 03:48:15+00:00"", ""created_at"": ""2024-03-14 01:24:48+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""llama-2-ko-7b"", ""KOpen-platypus"", ""en"", ""dataset:kyujinpy/KOpen-platypus"", ""base_model:beomi/llama-2-ko-7b"", ""base_model:finetune:beomi/llama-2-ko-7b"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: beomi/llama-2-ko-7b\ndatasets:\n- kyujinpy/KOpen-platypus\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- llama-2-ko-7b\n- KOpen-platypus\nmodel-index:\n- name: KOpen-platypus-llama-2-ko-7b\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""KOpen-platypus-llama-2-ko-7b"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""kody0525/KOpen-platypus-llama-2-ko-7b""], ""safetensors"": {""parameters"": {""F32"": 6855856128}, ""total"": 6855856128}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-20 03:48:15+00:00"", ""cardData"": ""base_model: beomi/llama-2-ko-7b\ndatasets:\n- kyujinpy/KOpen-platypus\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- llama-2-ko-7b\n- KOpen-platypus\nmodel-index:\n- name: KOpen-platypus-llama-2-ko-7b\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65f251e0e0724601637d175f"", ""modelId"": ""kody0525/KOpen-platypus-llama-2-ko-7b"", ""usedStorage"": 27423458152}",1,,0,,0,https://huggingface.co/mradermacher/KOpen-platypus-llama-2-ko-7b-GGUF,1,,0,"huggingface/InferenceSupport/discussions/new?title=kody0525/KOpen-platypus-llama-2-ko-7b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkody0525%2FKOpen-platypus-llama-2-ko-7b%5D(%2Fkody0525%2FKOpen-platypus-llama-2-ko-7b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kody0525/KOpen-platypus-llama-2-ko-7b",2
|
| 640 |
+
https://huggingface.co/ccw7463/llama-ko-7b_translator_ver_0.1,N/A,N/A,1,,0,,0,,0,,0,,0
|
m3e-base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
|
| 2 |
+
moka-ai/m3e-base,"---
|
| 3 |
+
language:
|
| 4 |
+
- zh
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- embedding
|
| 8 |
+
- text-embedding
|
| 9 |
+
library_name: sentence-transformers
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# 🅜 M3E Models
|
| 13 |
+
|
| 14 |
+
[m3e-small](https://huggingface.co/moka-ai/m3e-small) | [m3e-base](https://huggingface.co/moka-ai/m3e-base)
|
| 15 |
+
|
| 16 |
+
M3E 是 Moka Massive Mixed Embedding 的缩写
|
| 17 |
+
|
| 18 |
+
- Moka,此模型由 MokaAI 训练,开源和评测,训练脚本使用 [uniem](https://github.com/wangyuxinwhy/uniem/blob/main/scripts/train_m3e.py) ,评测 BenchMark 使用 [MTEB-zh](https://github.com/wangyuxinwhy/uniem/tree/main/mteb-zh)
|
| 19 |
+
- Massive,此模型通过**千万级** (2200w+) 的中文句对数据集进行训练
|
| 20 |
+
- Mixed,此模型支持中英双语的同质文本相似度计算,异质文本检索等功能,未来还会支持代码检索
|
| 21 |
+
- Embedding,此模型是文本嵌入模型,可以将自然语言转换成稠密的向量
|
| 22 |
+
|
| 23 |
+
## 🆕 更新说明
|
| 24 |
+
|
| 25 |
+
- 2023.06.24,添加微调 M3E 的教程 [notebook](https://github.com/wangyuxinwhy/uniem/blob/main/examples/finetune.ipynb),几行代码,更佳适配!<a target=""_blank"" href=""https://colab.research.google.com/github/wangyuxinwhy/uniem/blob/main/examples/finetune.ipynb"">
|
| 26 |
+
<img src=""https://colab.research.google.com/assets/colab-badge.svg"" alt=""Open In Colab""/>
|
| 27 |
+
</a>
|
| 28 |
+
- 2023.06.14,添加了三个中文开源文本嵌入模型到评测中,包括 UER, ErLangShen, DMetaSoul
|
| 29 |
+
- 2023.06.08,添加检索任务的评测结果,在 T2Ranking 1W 中文数据集上,m3e-base 在 ndcg@10 上达到了 0.8004,超过了 openai-ada-002 的 0.7786
|
| 30 |
+
- 2023.06.07,添加文本分类任务的评测结果,在 6 种文本分类数据集上,m3e-base 在 accuracy 上达到了 0.6157,超过了 openai-ada-002 的 0.5956
|
| 31 |
+
|
| 32 |
+
## ⚖️ 模型对比
|
| 33 |
+
|
| 34 |
+
| | 参数数量 | 维度 | 中文 | 英文 | s2s | s2p | s2c | 开源 | 兼容性 | s2s Acc | s2p ndcg@10 |
|
| 35 |
+
| --------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | ---- | ---------- | ------------ | -------- |
|
| 36 |
+
| m3e-small | 24M | 512 | 是 | 否 | 是 | 否 | 否 | 是 | 优 | 0.5834 | 0.7262 |
|
| 37 |
+
| m3e-base | 110M | 768 | 是 | 是 | 是 | 是 | 否 | 是 | 优 | **0.6157** | **0.8004** |
|
| 38 |
+
| text2vec | 110M | 768 | 是 | 否 | 是 | 否 | 否 | 是 | 优 | 0.5755 | 0.6346 |
|
| 39 |
+
| openai-ada-002 | 未知 | 1536 | 是 | 是 | 是 | 是 | 是 | 否 | 优 | 0.5956 | 0.7786 |
|
| 40 |
+
|
| 41 |
+
说明:
|
| 42 |
+
- s2s, 即 sentence to sentence ,代表了同质文本之间的嵌入能力,适用任务:文本相似度,重复问题检测,文本分类等
|
| 43 |
+
- s2p, 即 sentence to passage ,代表了异质文本之间的嵌入能力,适用任务:文本检索,GPT 记忆模块等
|
| 44 |
+
- s2c, 即 sentence to code ,代表了自然语言和程序语言之间的嵌入能力,适用任务:代码检索
|
| 45 |
+
- 兼容性,代表了模型在开源社区中各种项目被支持的程度,由于 m3e 和 text2vec 都可以直接通过 sentence-transformers 直接使用,所以和 openai 在社区的支持度上相当
|
| 46 |
+
- ACC & ndcg@10,详情见下方的评测
|
| 47 |
+
|
| 48 |
+
Tips:
|
| 49 |
+
- 使用场景主要是中文,少量英文的情况,建议使用 m3e 系列的模型
|
| 50 |
+
- 多语言使用场景,并且不介意数据隐私的话,我建议使用 openai text-embedding-ada-002
|
| 51 |
+
- 代码检索场景,推荐使用 openai text-embedding-ada-002
|
| 52 |
+
- 文本检索场景,请使用具备文本检索能力的模型,只在 S2S 上训练的文本嵌入模型,没有办法完成文本检索任务
|
| 53 |
+
|
| 54 |
+
## 🔧 使用 M3E
|
| 55 |
+
|
| 56 |
+
您需要先安装 sentence-transformers
|
| 57 |
+
|
| 58 |
+
```bash
|
| 59 |
+
pip install -U sentence-transformers
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
安装完成后,您可以使用以下代码来使用 M3E Models
|
| 63 |
+
|
| 64 |
+
```python
|
| 65 |
+
from sentence_transformers import SentenceTransformer
|
| 66 |
+
|
| 67 |
+
model = SentenceTransformer('moka-ai/m3e-base')
|
| 68 |
+
|
| 69 |
+
#Our sentences we like to encode
|
| 70 |
+
sentences = [
|
| 71 |
+
'* Moka 此文本嵌入模型由 MokaAI 训练并开源,训练脚本使用 uniem',
|
| 72 |
+
'* Massive 此文本嵌入模型通过**千万级**的中文句对数据集进行训练',
|
| 73 |
+
'* Mixed 此文本嵌入模型支持中英双语的同质文本相似度计算,异质文本检索等功能,未来还会支持代码检索,ALL in one'
|
| 74 |
+
]
|
| 75 |
+
|
| 76 |
+
#Sentences are encoded by calling model.encode()
|
| 77 |
+
embeddings = model.encode(sentences)
|
| 78 |
+
|
| 79 |
+
#Print the embeddings
|
| 80 |
+
for sentence, embedding in zip(sentences, embeddings):
|
| 81 |
+
print(""Sentence:"", sentence)
|
| 82 |
+
print(""Embedding:"", embedding)
|
| 83 |
+
print("""")
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
M3E 系列的所有模型在设计的时候就考虑到完全兼容 [sentence-transformers](https://www.sbert.net/) ,所以你可以通过**替换名称字符串**的方式在所有支持 sentence-transformers 的项目中**无缝**使用 M3E Models,比如 [chroma](https://docs.trychroma.com/getting-started), [guidance](https://github.com/microsoft/guidance), [semantic-kernel](https://github.com/microsoft/semantic-kernel) 。
|
| 88 |
+
|
| 89 |
+
## 🎨 微调模型
|
| 90 |
+
|
| 91 |
+
`uniem` 提供了非常易用的 finetune 接口,几行代码,即刻适配!
|
| 92 |
+
|
| 93 |
+
```python
|
| 94 |
+
from datasets import load_dataset
|
| 95 |
+
|
| 96 |
+
from uniem.finetuner import FineTuner
|
| 97 |
+
|
| 98 |
+
dataset = load_dataset('shibing624/nli_zh', 'STS-B')
|
| 99 |
+
# 指定训练的模型为 m3e-small
|
| 100 |
+
finetuner = FineTuner.from_pretrained('moka-ai/m3e-small', dataset=dataset)
|
| 101 |
+
finetuner.run(epochs=1)
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
详见 [uniem 微调教程](https://github.com/wangyuxinwhy/uniem/blob/main/examples/finetune.ipynb)
|
| 105 |
+
|
| 106 |
+
<a target=""_blank"" href=""https://colab.research.google.com/github/wangyuxinwhy/uniem/blob/main/examples/finetune.ipynb"">
|
| 107 |
+
<img src=""https://colab.research.google.com/assets/colab-badge.svg"" alt=""Open In Colab""/>
|
| 108 |
+
</a>
|
| 109 |
+
|
| 110 |
+
## ➿ 训练方案
|
| 111 |
+
|
| 112 |
+
M3E 使用 in-batch 负采样的对比学习的方式在句对数据集进行训练,为了保证 in-batch 负采样的效果,我们使用 A100 80G 来最大化 batch-size,并在共计 2200W+ 的句对数据集上训练了 1 epoch。训练脚本使用 [uniem](https://github.com/wangyuxinwhy/uniem/blob/main/scripts/train_m3e.py),您可以在这里查看具体细节。
|
| 113 |
+
|
| 114 |
+
## 🌟 特性
|
| 115 |
+
|
| 116 |
+
- 中文训练集,M3E 在大规模句对数据集上的训练,包含中文百科,金融,医疗,法律,新闻,学术等多个领域共计 2200W 句对样本,数据集详见 [M3E 数据集](#M3E数据集)
|
| 117 |
+
- 英文训练集,M3E 使用 MEDI 145W 英文三元组数据集进行训练,数据集详见 [MEDI 数据集](https://drive.google.com/file/d/1vZ5c2oJNonGOvXzppNg5mHz24O6jcc52/view),此数据集由 [instructor team](https://github.com/HKUNLP/instructor-embedding) 提供
|
| 118 |
+
- 指令数据集,M3E 使用了 300W + 的指令微调数据集,这使得 M3E 对文本编码的时候可以遵从指令,这部分的工作主要被启发于 [instructor-embedding](https://github.com/HKUNLP/instructor-embedding)
|
| 119 |
+
- 基础模型,M3E 使用 hfl 实验室的 [Roberta](https://huggingface.co/hfl/chinese-roberta-wwm-ext) 系列模型进行训练,目前提供 small 和 base 两个版本,大家则需选用
|
| 120 |
+
- ALL IN ONE,M3E 旨在提供一个 ALL IN ONE 的文本嵌入模型,不仅支持同质句子相似度判断,还支持异质文本检索,你只需要一个模型就可以覆盖全部的应用场景,未来还会支持代码检索
|
| 121 |
+
|
| 122 |
+
## 💯 MTEB-zh 评测
|
| 123 |
+
|
| 124 |
+
- 评测模型,[text2vec](https://github.com/shibing624/text2vec), m3e-base, m3e-small, openai text-embedding-ada-002, [DMetaSoul](https://huggingface.co/DMetaSoul/sbert-chinese-general-v2), [UER](https://huggingface.co/uer/sbert-base-chinese-nli), [ErLangShen](https://huggingface.co/IDEA-CCNL/Erlangshen-SimCSE-110M-Chinese)
|
| 125 |
+
- 评测脚本,具体参考 [MTEB-zh] (https://github.com/wangyuxinwhy/uniem/blob/main/mteb-zh)
|
| 126 |
+
|
| 127 |
+
### 文本分类
|
| 128 |
+
|
| 129 |
+
- 数据集选择,选择开源在 HuggingFace 上的 6 种文本分类数据集,包括新闻、电商评论、股票评论、长文本等
|
| 130 |
+
- 评测方式,使用 MTEB 的方式进行评测,报告 Accuracy。
|
| 131 |
+
|
| 132 |
+
| | text2vec | m3e-small | m3e-base | openai | DMetaSoul | uer | erlangshen |
|
| 133 |
+
| ----------------- | -------- | --------- | -------- | ------ | ----------- | ------- | ----------- |
|
| 134 |
+
| TNews | 0.43 | 0.4443 | **0.4827** | 0.4594 | 0.3084 | 0.3539 | 0.4361 |
|
| 135 |
+
| JDIphone | 0.8214 | 0.8293 | **0.8533** | 0.746 | 0.7972 | 0.8283 | 0.8356 |
|
| 136 |
+
| GubaEastmony | 0.7472 | 0.712 | 0.7621 | 0.7574 | 0.735 | 0.7534 | **0.7787** |
|
| 137 |
+
| TYQSentiment | 0.6099 | 0.6596 | **0.7188** | 0.68 | 0.6437 | 0.6662 | 0.6444 |
|
| 138 |
+
| StockComSentiment | 0.4307 | 0.4291 | 0.4363 | **0.4819** | 0.4309 | 0.4555 | 0.4482 |
|
| 139 |
+
| IFlyTek | 0.414 | 0.4263 | 0.4409 | **0.4486** | 0.3969 | 0.3762 | 0.4241 |
|
| 140 |
+
| Average | 0.5755 | 0.5834 | **0.6157** | 0.5956 | 0.552016667 | 0.57225 | 0.594516667 |
|
| 141 |
+
|
| 142 |
+
### 检索排序
|
| 143 |
+
|
| 144 |
+
#### T2Ranking 1W
|
| 145 |
+
|
| 146 |
+
- 数据集选择,使用 [T2Ranking](https://github.com/THUIR/T2Ranking/tree/main) 数据集,由于 T2Ranking 的数据集太大,openai 评测起来的时间成本和 api 费用有些高,所以我们只选择了 T2Ranking 中的前 10000 篇文章
|
| 147 |
+
- 评测方式,使用 MTEB 的方式进行评测,报告 map@1, map@10, mrr@1, mrr@10, ndcg@1, ndcg@10
|
| 148 |
+
- 注意!从实验结果和训练方式来看,除了 M3E 模型和 openai 模型外,其余模型都没有做检索任务的训练,所以结果仅供参考。
|
| 149 |
+
|
| 150 |
+
| | text2vec | openai-ada-002 | m3e-small | m3e-base | DMetaSoul | uer | erlangshen |
|
| 151 |
+
| ------- | -------- | -------------- | --------- | -------- | --------- | ------- | ---------- |
|
| 152 |
+
| map@1 | 0.4684 | 0.6133 | 0.5574 | **0.626** | 0.25203 | 0.08647 | 0.25394 |
|
| 153 |
+
| map@10 | 0.5877 | 0.7423 | 0.6878 | **0.7656** | 0.33312 | 0.13008 | 0.34714 |
|
| 154 |
+
| mrr@1 | 0.5345 | 0.6931 | 0.6324 | **0.7047** | 0.29258 | 0.10067 | 0.29447 |
|
| 155 |
+
| mrr@10 | 0.6217 | 0.7668 | 0.712 | **0.7841** | 0.36287 | 0.14516 | 0.3751 |
|
| 156 |
+
| ndcg@1 | 0.5207 | 0.6764 | 0.6159 | **0.6881** | 0.28358 | 0.09748 | 0.28578 |
|
| 157 |
+
| ndcg@10 | 0.6346 | 0.7786 | 0.7262 | **0.8004** | 0.37468 | 0.15783 | 0.39329 |
|
| 158 |
+
|
| 159 |
+
#### T2Ranking
|
| 160 |
+
|
| 161 |
+
- 数据集选择,使用 T2Ranking,刨除 openai-ada-002 模型后,我们对剩余的三个模型,进行 T2Ranking 10W 和 T2Ranking 50W 的评测。(T2Ranking 评测太耗内存了... 128G 都不行)
|
| 162 |
+
- 评测方式,使用 MTEB 的方式进行评测,报告 ndcg@10
|
| 163 |
+
|
| 164 |
+
| | text2vec | m3e-small | m3e-base |
|
| 165 |
+
| ------- | -------- | --------- | -------- |
|
| 166 |
+
| t2r-1w | 0.6346 | 0.72621 | **0.8004** |
|
| 167 |
+
| t2r-10w | 0.44644 | 0.5251 | **0.6263** |
|
| 168 |
+
| t2r-50w | 0.33482 | 0.38626 | **0.47364** |
|
| 169 |
+
|
| 170 |
+
说明:
|
| 171 |
+
- 检索排序对于 text2vec 并不公平,因为 text2vec 在训练的时候没有使用过检索相关的数据集,所以没有办法很好的完成检索任务也是正常的。
|
| 172 |
+
|
| 173 |
+
## 📂 M3E数据集
|
| 174 |
+
|
| 175 |
+
如果您想要使用这些数据集,你可以在 [uniem process_zh_datasets](https://github.com/wangyuxinwhy/uniem/blob/main/scripts/process_zh_datasets.py) 中找到加载 huggingface 数据集的脚本,非 huggingface 数据集需要您根据下方提供的链接自行下载和处理。
|
| 176 |
+
|
| 177 |
+
| 数据集名称 | 领域 | 数量 | 任务类型 | Prompt | 质量 | 数据提供者 | 说明 | 是否开源/研究使用 | 是否商用 | 脚本 | Done | URL | 是否同质 |
|
| 178 |
+
| -------------------- | ---- | --------- | ----------------- | ------ | ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | ----------------- | -------- | ---- | ---- | ------------------------------------------------------------ | -------- |
|
| 179 |
+
| cmrc2018 | 百科 | 14,363 | 问答 | 问答 | 优 | Yiming Cui, Ting Liu, Wanxiang Che, Li Xiao, Zhipeng Chen, Wentao Ma, Shijin Wang, Guoping Hu | https://github.com/ymcui/cmrc2018/blob/master/README_CN.md 专家标注的基于维基百科的中文阅读理解数据集,将问题和上下文视为正例 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/cmrc2018 | 否 |
|
| 180 |
+
| belle_2m | 百科 | 2,000,000 | 指令微调 | 无 | 优 | LianjiaTech/BELLE | belle 的指令微调数据集,使用 self instruct 方法基于 gpt3.5 生成 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/BelleGroup/train_2M_CN | 否 |
|
| 181 |
+
| firefily | 百科 | 1,649,399 | 指令微调 | 无 | 优 | YeungNLP | Firefly(流萤) 是一个开源的中文对话式大语言模型,使用指令微调(Instruction Tuning)在中文数据集上进行调优。使用了词表裁剪、ZeRO等技术,有效降低显存消耗和提高训练效率。 在训练中,我们使用了更小的模型参数量,以及更少的计算资源。 | 未说明 | 未说明 | 是 | 是 | https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M | 否 |
|
| 182 |
+
| alpaca_gpt4 | 百科 | 48,818 | 指令微调 | 无 | 优 | Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, Jianfeng Gao | 本数据集是参考Alpaca方法基于GPT4得到的self-instruct数据,约5万条。 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/shibing624/alpaca-zh | 否 |
|
| 183 |
+
| zhihu_kol | 百科 | 1,006,218 | 问答 | 问答 | 优 | wangrui6 | 知乎问答 | 未说明 | 未说明 | 是 | 是 | https://huggingface.co/datasets/wangrui6/Zhihu-KOL | 否 |
|
| 184 |
+
| hc3_chinese | 百科 | 39,781 | 问答 | 问答 | 良 | Hello-SimpleAI | 问答数据,包括人工回答和 GPT 回答 | 是 | 未说明 | 是 | 是 | https://huggingface.co/datasets/Hello-SimpleAI/HC3-Chinese | 否 |
|
| 185 |
+
| amazon_reviews_multi | 电商 | 210,000 | 问答 文本分类 | 摘要 | 优 | 亚马逊 | 亚马逊产品评论数据集 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/amazon_reviews_multi/viewer/zh/train?row=8 | 否 |
|
| 186 |
+
| mlqa | 百科 | 85,853 | 问答 | 问答 | 良 | patrickvonplaten | 一个用于评估跨语言问答性能的基准数据集 | 是 | 未说明 | 是 | 是 | https://huggingface.co/datasets/mlqa/viewer/mlqa-translate-train.zh/train?p=2 | 否 |
|
| 187 |
+
| xlsum | 新闻 | 93,404 | 摘要 | 摘要 | 良 | BUET CSE NLP Group | BBC的专业注释文章摘要对 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/csebuetnlp/xlsum/viewer/chinese_simplified/train?row=259 | 否 |
|
| 188 |
+
| ocnli | 口语 | 17,726 | 自然语言推理 | 推理 | 良 | Thomas Wolf | 自然语言推理数据集 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/clue/viewer/ocnli | 是 |
|
| 189 |
+
| BQ | 金融 | 60,000 | 文本分类 | 相似 | 良 | Intelligent Computing Research Center, Harbin Institute of Technology(Shenzhen) | http://icrc.hitsz.edu.cn/info/1037/1162.htm BQ 语料库包含来自网上银行自定义服务日志的 120,000 个问题对。它分为三部分:100,000 对用于训练,10,000 对用于验证,10,000 对用于测试。 数据提供者: 哈尔滨工业大学(深圳)智能计算研究中心 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/shibing624/nli_zh/viewer/BQ | 是 |
|
| 190 |
+
| lcqmc | 口语 | 149,226 | 文本分类 | 相似 | 良 | Ming Xu | 哈工大文本匹配数据集,LCQMC 是哈尔滨工业大学在自然语言处理国际顶会 COLING2018 构建的问题语义匹配数据集,其目标是判断两个问题的语义是否相同 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/shibing624/nli_zh/viewer/LCQMC/train | 是 |
|
| 191 |
+
| paws-x | 百科 | 23,576 | 文本分类 | 相似 | 优 | Bhavitvya Malik | PAWS Wiki中的示例 | 是 | 是 | 是 | 是 | https://huggingface.co/datasets/paws-x/viewer/zh/train | 是 |
|
| 192 |
+
| wiki_atomic_edit | 百科 | 1,213,780 | 平行语义 | 相似 | 优 | abhishek thakur | 基于中文维基百科的编辑记录收集的数据集 | 未说明 | 未说明 | 是 | 是 | https://huggingface.co/datasets/wiki_atomic_edits | 是 |
|
| 193 |
+
| chatmed_consult | 医药 | 549,326 | 问答 | 问答 | 优 | Wei Zhu | 真实世界的医学相关的问题,使用 gpt3.5 进行回答 | 是 | 否 | 是 | 是 | https://huggingface.co/datasets/michaelwzhu/ChatMed_Consult_Dataset | 否 |
|
| 194 |
+
| webqa | 百科 | 42,216 | 问答 | 问答 | 优 | suolyer | 百度于2016年开源的数据集,数据来自于百度知道;格式为一个问题多篇意思基本一致的文章,分为人为标注以及浏览器检索;数据整体质量中,因为混合了很多检索而来的文章 | 是 | 未说明 | 是 | 是 | https://huggingface.co/datasets/suolyer/webqa/viewer/suolyer--webqa/train?p=3 | 否 |
|
| 195 |
+
| dureader_robust | 百科 | 65,937 | 机器阅读理解 问答 | 问答 | 优 | 百度 | DuReader robust旨在利用真实应用中的数据样本来衡量阅读理解模型的鲁棒性,评测模型的过敏感性、过稳定性以及泛化能力,是首个中文阅读理解鲁棒性数据集。 | 是 | 是 | 是 | 是 | https://huggingface.co/datasets/PaddlePaddle/dureader_robust/viewer/plain_text/train?row=96 | 否 |
|
| 196 |
+
| csl | 学术 | 395,927 | 语料 | 摘要 | 优 | Yudong Li, Yuqing Zhang, Zhe Zhao, Linlin Shen, Weijie Liu, Weiquan Mao and Hui Zhang | 提供首个中文科学文献数据集(CSL),包含 396,209 篇中文核心期刊论文元信息 (标题、摘要、关键词、学科、门类)。CSL 数据集可以作为预训练语料,也可以构建许多NLP任务,例如文本摘要(标题预测)、 关键词生成和文本分类等。 | 是 | 是 | 是 | 是 | https://huggingface.co/datasets/neuclir/csl | 否 |
|
| 197 |
+
| miracl-corpus | 百科 | 4,934,368 | 语料 | 摘要 | 优 | MIRACL | The corpus for each language is prepared from a Wikipedia dump, where we keep only the plain text and discard images, tables, etc. Each article is segmented into multiple passages using WikiExtractor based on natural discourse units (e.g., \n\n in the wiki markup). Each of these passages comprises a ""document"" or unit of retrieval. We preserve the Wikipedia article title of each passage. | 是 | 是 | 是 | 是 | https://huggingface.co/datasets/miracl/miracl-corpus | 否 |
|
| 198 |
+
| lawzhidao | 法律 | 36,368 | 问答 | 问答 | 优 | 和鲸社区-Ustinian | 百度知道清洗后的法律问答 | 是 | 是 | 否 | 是 | https://www.heywhale.com/mw/dataset/5e953ca8e7ec38002d02fca7/content | 否 |
|
| 199 |
+
| CINLID | 成语 | 34,746 | 平行语义 | 相似 | 优 | 高长宽 | 中文成语语义推理数据集(Chinese Idioms Natural Language Inference Dataset)收集了106832条由人工撰写的成语对(含少量歇后语、俗语等短文本),通过人工标注的方式进行平衡分类,标签为entailment、contradiction和neutral,支持自然语言推理(NLI)的任务。 | 是 | 否 | 否 | 是 | https://www.luge.ai/#/luge/dataDetail?id=39 | 是 |
|
| 200 |
+
| DuSQL | SQL | 25,003 | NL2SQL | SQL | 优 | 百度 | DuSQL是一个面向实际应用的数据集,包含200个数据库,覆盖了164个领域,问题覆盖了匹配、计算、推理等实际应用中常见形式。该数据集更贴近真实应用场景,要求模型领域无关、问题无关,且具备计算推理等能力。 | 是 | 否 | 否 | 是 | https://www.luge.ai/#/luge/dataDetail?id=13 | 否 |
|
| 201 |
+
| Zhuiyi-NL2SQL | SQL | 45,918 | NL2SQL | SQL | 优 | 追一科技 刘云峰 | NL2SQL是一个多领域的简单数据集,其主要包含匹配类型问题。该数据集主要验证模型的泛化能力,其要求模型具有较强的领域泛化能力、问题泛化能力。 | 是 | 否 | 否 | 是 | https://www.luge.ai/#/luge/dataDetail?id=12 | 否 |
|
| 202 |
+
| Cspider | SQL | 7,785 | NL2SQL | SQL | 优 | 西湖大学 张岳 | CSpider是一个多语言数据集,其问题以中文表达,数据库以英文存储,这种双语模式在实际应用中也非常常见,尤其是数据库引擎对中文支持不好的情况下。该数据集要求模型领域无关、问题无关,且能够实现多语言匹配。 | 是 | 否 | 否 | 是 | https://www.luge.ai/#/luge/dataDetail?id=11 | 否 |
|
| 203 |
+
| news2016zh | 新闻 | 2,507,549 | 语料 | 摘要 | 良 | Bright Xu | 包含了250万篇新闻。新闻来源涵盖了6.3万个媒体,含标题、关键词、描述、正文。 | 是 | 是 | 否 | 是 | https://github.com/brightmart/nlp_chinese_corpus | 否 |
|
| 204 |
+
| baike2018qa | 百科 | 1,470,142 | 问答 | 问答 | 良 | Bright Xu | 含有150万个预先过滤过的、高质量问题和答案,每个问题属于一个类别。总共有492个类别,其中频率达到或超过10次的类别有434个。 | 是 | 是 | 否 | 是 | https://github.com/brightmart/nlp_chinese_corpus | 否 |
|
| 205 |
+
| webtext2019zh | 百科 | 4,258,310 | 问答 | 问答 | 优 | Bright Xu | 含有410万个预先过滤过的、高质量问题和回复。每个问题属于一个【话题】,总共有2.8万个各式话题,话题包罗万象。 | 是 | 是 | 否 | 是 | https://github.com/brightmart/nlp_chinese_corpus | 否 |
|
| 206 |
+
| SimCLUE | 百科 | 775,593 | 平行语义 | 相似 | 良 | 数据集合,请在 simCLUE 中查看 | 整合了中文领域绝大多数可用的开源的语义相似度和自然语言推理的数据集,并重新做了数据拆分和整理。 | 是 | 否 | 否 | 是 | https://github.com/CLUEbenchmark/SimCLUE | 是 |
|
| 207 |
+
| Chinese-SQuAD | 新闻 | 76,449 | 机器阅读理解 | 问答 | 优 | junzeng-pluto | 中文机器阅读理解数据集,通过机器翻译加人工校正的方式从原始Squad转换而来 | 是 | 否 | 否 | 是 | https://github.com/pluto-junzeng/ChineseSquad | 否 |
|
| 208 |
+
|
| 209 |
+
## 🗓️ 计划表
|
| 210 |
+
|
| 211 |
+
- [x] 完成 MTEB 中文评测 BenchMark, [MTEB-zh](https://github.com/wangyuxinwhy/uniem/tree/main/mteb-zh)
|
| 212 |
+
- [x] 完成 Large 模型的训练和开源
|
| 213 |
+
- [x] 完成 Finetuner ,允许更优雅的微调
|
| 214 |
+
- [ ] 完成支持代码检索的模型
|
| 215 |
+
- [ ] 对 M3E 数据集进行清洗,保留高质量的部分,组成 m3e-hq,并在 huggingface 上开源
|
| 216 |
+
- [ ] 在 m3e-hq 的数据集上补充 hard negative 的样本及相似度分数,组成 m3e-hq-with-score,并在 huggingface 上开源
|
| 217 |
+
- [ ] 在 m3e-hq-with-score 上通过 [cosent loss](https://github.com/wangyuxinwhy/uniem/blob/main/uniem/criteria.py#LL24C39-L24C39) loss 进行训练并开源模型,CoSent 原理参考这篇[博客](https://kexue.fm/archives/8847)
|
| 218 |
+
- [ ] 开源商用版本的 M3E models
|
| 219 |
+
|
| 220 |
+
## 🙏 致谢
|
| 221 |
+
|
| 222 |
+
感谢开源社区提供的中文语料,感谢所有在此工作中提供帮助的人们,希望中文社区越来越好,共勉!
|
| 223 |
+
|
| 224 |
+
## 📜 License
|
| 225 |
+
|
| 226 |
+
M3E models 使用的数据集中包括大量非商用的数据集,所以 M3E models 也是非商用的,仅供研究使用。不过我们已经在 M3E 数据集上标识了商用和非商用的数据集,您可以根据自己的需求自行训练。
|
| 227 |
+
|
| 228 |
+
## Citation
|
| 229 |
+
Please cite this model using the following format:
|
| 230 |
+
```
|
| 231 |
+
@software {Moka Massive Mixed Embedding,
|
| 232 |
+
author = {Wang Yuxin,Sun Qingxuan,He sicheng},
|
| 233 |
+
title = {M3E: Moka Massive Mixed Embedding Model},
|
| 234 |
+
year = {2023}
|
| 235 |
+
}
|
| 236 |
+
```","{""id"": ""moka-ai/m3e-base"", ""author"": ""moka-ai"", ""sha"": ""764b537a0e50e5c7d64db883f2d2e051cbe3c64c"", ""last_modified"": ""2023-07-14 02:29:36+00:00"", ""created_at"": ""2023-06-06 02:28:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 148798, ""downloads_all_time"": null, ""likes"": 941, ""library_name"": ""sentence-transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""sentence-transformers"", ""pytorch"", ""safetensors"", ""bert"", ""embedding"", ""text-embedding"", ""zh"", ""en"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\nlibrary_name: sentence-transformers\ntags:\n- embedding\n- text-embedding"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""BertModel""], ""model_type"": ""bert"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='1_Pooling/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modules.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sentence_bert_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""mteb/leaderboard"", ""mteb/leaderboard_legacy"", ""Thun09/leaderboard_demo"", ""justest/embeddings-api"", ""Zulelee/langchain-chatchat"", ""abidlabs/mteb-leaderboard"", ""y001j/ChatGLM"", ""XuBailing/CongMa"", ""XuBailing/CongMa2"", ""dengkane/learn-streamlit"", ""sumandeng/xrundai"", ""justest/embeddings-api-ernie"", ""Beuys/chatbot"", ""hyattDD/WordsApp"", ""cosco/chat_with_langchain"", ""sq66/leaderboard_legacy"", ""SmileXing/leaderboard"", ""q275343119/leaderboard""], ""safetensors"": {""parameters"": {""I64"": 512, ""F32"": 102267648}, ""total"": 102268160}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-07-14 02:29:36+00:00"", ""cardData"": ""language:\n- zh\n- en\nlibrary_name: sentence-transformers\ntags:\n- embedding\n- text-embedding"", ""transformersInfo"": null, ""_id"": ""647e99df10b7a3b157196811"", ""modelId"": ""moka-ai/m3e-base"", ""usedStorage"": 818238909}",0,,0,,0,"https://huggingface.co/mradermacher/m3e-base-GGUF, https://huggingface.co/mradermacher/m3e-base-i1-GGUF",2,,0,"SmileXing/leaderboard, Thun09/leaderboard_demo, XuBailing/CongMa, XuBailing/CongMa2, Zulelee/langchain-chatchat, abidlabs/mteb-leaderboard, huggingface/InferenceSupport/discussions/new?title=moka-ai/m3e-base&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmoka-ai%2Fm3e-base%5D(%2Fmoka-ai%2Fm3e-base)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, justest/embeddings-api, mteb/leaderboard, mteb/leaderboard_legacy, q275343119/leaderboard, sq66/leaderboard_legacy, y001j/ChatGLM",13
|
miqu-1-70b-sf_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|