modelId
stringlengths
4
111
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringlengths
5
30
author
stringlengths
2
34
config
null
securityStatus
null
id
stringlengths
4
111
likes
int64
0
9.53k
downloads
int64
2
73.6M
library_name
stringlengths
2
84
created
timestamp[us]
card
stringlengths
101
901k
card_len
int64
101
901k
embeddings
list
laion/CLIP-ViT-B-16-DataComp.L-s1B-b8K
2023-04-26T01:37:53.000Z
[ "open_clip", "zero-shot-image-classification", "clip", "license:mit", "region:us", "has_space" ]
zero-shot-image-classification
laion
null
null
laion/CLIP-ViT-B-16-DataComp.L-s1B-b8K
0
2,042
open_clip
2023-04-26T01:37:39
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-16-DataComp.L-s1B-b8K
141
[ [ -0.0188140869140625, -0.023468017578125, 0.0176239013671875, 0.025421142578125, -0.0726318359375, 0.01172637939453125, 0.0550537109375, 0.0231170654296875, 0.049407958984375, 0.033782958984375, -0.073486328125, -0.034881591796875, -0.0227203369140625, -0.004...
stablediffusionapi/all-526-animated
2023-04-26T20:32:40.000Z
[ "diffusers", "stablediffusionapi.com", "stable-diffusion-api", "text-to-image", "ultra-realistic", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
stablediffusionapi
null
null
stablediffusionapi/all-526-animated
2
2,042
diffusers
2023-04-26T20:30:32
--- license: creativeml-openrail-m tags: - stablediffusionapi.com - stable-diffusion-api - text-to-image - ultra-realistic pinned: true --- # All 526 Animated API Inference ![generated from stablediffusionapi.com](https://pub-8b49af329fae499aa563997f5d4068a4.r2.dev/generations/7598750091682540995.png) ## Get API Key Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed. Replace Key in below code, change **model_id** to "all-526-animated" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs) Model link: [View model](https://stablediffusionapi.com/models/all-526-animated) Credits: [View credits](https://civitai.com/?query=All%20526%20Animated) View all models: [View Models](https://stablediffusionapi.com/models) import requests import json url = "https://stablediffusionapi.com/api/v3/dreambooth" payload = json.dumps({ "key": "", "model_id": "all-526-animated", "prompt": "actual 8K portrait photo of gareth person, portrait, happy colors, bright eyes, clear eyes, warm smile, smooth soft skin, big dreamy eyes, beautiful intricate colored hair, symmetrical, anime wide eyes, soft lighting, detailed face, by makoto shinkai, stanley artgerm lau, wlop, rossdraws, concept art, digital painting, looking into camera", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
2,437
[ [ -0.02850341796875, -0.048583984375, 0.0289306640625, 0.029937744140625, -0.031402587890625, 0.0078125, 0.0228424072265625, -0.0259246826171875, 0.042633056640625, 0.03668212890625, -0.074462890625, -0.057525634765625, -0.0298614501953125, -0.0016441345214843...
Yntec/DucHaiten-Retro-Diffusers
2023-07-13T13:39:06.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "Retro", "DucHaiten", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
Yntec
null
null
Yntec/DucHaiten-Retro-Diffusers
1
2,040
diffusers
2023-07-13T13:02:56
--- license: creativeml-openrail-m library_name: diffusers pipeline_tag: text-to-image tags: - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image - Retro - DucHaiten --- # DucHaiten Retro I don't know about you, but in my opinion this is the best retro model DucHaiten has ever created. It's sad to see it sitting at 0 downloads at huggingface, so here's a Diffusers version you can use with huggingface's pipeline! If you like their content, support them at: https://linktr.ee/Duc_Haiten Original page: https://civitai.com/models/103966?modelVersionId=111392
586
[ [ -0.0653076171875, -0.029144287109375, 0.0347900390625, 0.037261962890625, -0.04425048828125, -0.02178955078125, 0.0210723876953125, -0.01739501953125, 0.058135986328125, 0.0167236328125, -0.057098388671875, -0.03460693359375, -0.047576904296875, 0.0012683868...
Daniil-plotnikov/deepvision
2023-10-09T14:46:49.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "en", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
Daniil-plotnikov
null
null
Daniil-plotnikov/deepvision
2
2,038
diffusers
2023-10-09T14:11:53
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion language: - en --- ### DeepVision - good universal model
132
[ [ -0.01461029052734375, 0.0001569986343383789, 0.037994384765625, 0.0282745361328125, -0.039764404296875, -0.0151824951171875, 0.0178070068359375, 0.00665283203125, -0.00902557373046875, 0.0723876953125, -0.00836181640625, -0.052276611328125, -0.032470703125, ...
kyujinpy/KO-Platypus2-13B
2023-10-19T13:29:09.000Z
[ "transformers", "pytorch", "llama", "text-generation", "en", "ko", "dataset:kyujinpy/KOpen-platypus", "license:cc-by-nc-sa-4.0", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
kyujinpy
null
null
kyujinpy/KO-Platypus2-13B
5
2,037
transformers
2023-08-20T12:55:06
--- language: - en - ko datasets: - kyujinpy/KOpen-platypus library_name: transformers pipeline_tag: text-generation license: cc-by-nc-sa-4.0 --- **(주)미디어그룹사람과숲과 (주)마커의 LLM 연구 컨소시엄에서 개발된 모델입니다** **The license is `cc-by-nc-sa-4.0`.** # **Ko-Platypus2-13B** ![KO-Platypus2-13B](./KO_platypus.png) ## Model Details **More detail repo(Github): [KO-Platypus](https://github.com/Marker-Inc-Korea/KO-Platypus)** **Model Developers** Kyujin Han (kyujinpy) **Input** Models input text only. **Output** Models generate text only. **Model Architecture** KO-Platypus2-13B is an auto-regressive language model based on the LLaMA2 transformer architecture. **Base Model** [hyunseoki/ko-en-llama2-13b](https://huggingface.co/hyunseoki/ko-en-llama2-13b) **Training Dataset** I use [KOpen-platypus](https://huggingface.co/datasets/kyujinpy/KOpen-platypus). It is high-quality korean translation dataset about [open-platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus). I use A100 GPU 40GB and COLAB, when trianing. # **Model Benchmark** ## KO-LLM leaderboard - Follow up as [Open KO-LLM LeaderBoard](https://huggingface.co/spaces/upstage/open-ko-llm-leaderboard). ![img](./leaderboard.png) | Model | Average |Ko-ARC | Ko-HellaSwag | Ko-MMLU | Ko-TruthfulQA | Ko-CommonGen V2 | | --- | --- | --- | --- | --- | --- | --- | | KO-Platypus2-13B(ours) | 47.90 | 44.20 | 54.31 | 42.47 | 44.41 | 54.11 | | [hyunseoki/ko-en-llama2-13b](https://huggingface.co/hyunseoki/ko-en-llama2-13b) | 46.68 | 42.15 | 54.23 | 38.90 | 40.74 | 57.39 | | [MarkrAI/kyujin-CoTy-platypus-ko-12.8b](https://huggingface.co/MarkrAI/kyujin-CoTy-platypus-ko-12.8b) | 46.44 | 34.98 | 49.11 | 25.68 | 37.59 | 84.86 | | [momo/polyglot-ko-12.8b-Chat-QLoRA-Merge](https://huggingface.co/momo/polyglot-ko-12.8b-Chat-QLoRA-Merge) | 45.71 | 35.49 | 49.93 | 25.97 | 39.43 | 77.70 | | [KoT-platypus2-7B](https://huggingface.co/kyujinpy/KoT-platypus2-7B) | 45.62 | 38.05 | 49.63 | 34.68 | 37.69 | 68.08 | > Compare with Top 4 SOTA models. (update: 10/06) --- # Implementation Code ```python ### KO-Platypus from transformers import AutoModelForCausalLM, AutoTokenizer import torch repo = "kyujinpy/KO-Platypus2-13B" CoT-llama = AutoModelForCausalLM.from_pretrained( repo, return_dict=True, torch_dtype=torch.float16, device_map='auto' ) CoT-llama_tokenizer = AutoTokenizer.from_pretrained(repo) ``` > Readme format: [kyujinpy/KoT-platypus2-7B](https://huggingface.co/kyujinpy/KoT-platypus2-7B) ---
2,531
[ [ -0.036041259765625, -0.0418701171875, 0.0243988037109375, 0.03814697265625, -0.039520263671875, 0.0029315948486328125, -0.01432037353515625, -0.027099609375, 0.0180511474609375, 0.0238189697265625, -0.04498291015625, -0.049957275390625, -0.0478515625, 0.0034...
camembert/camembert-base
2022-06-17T23:06:40.000Z
[ "transformers", "pytorch", "camembert", "fill-mask", "fr", "arxiv:1911.03894", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
camembert
null
null
camembert/camembert-base
5
2,035
transformers
2022-03-02T23:29:05
--- language: fr --- # CamemBERT: a Tasty French Language Model ## Introduction [CamemBERT](https://arxiv.org/abs/1911.03894) is a state-of-the-art language model for French based on the RoBERTa model. It is now available on Hugging Face in 6 different versions with varying number of parameters, amount of pretraining data and pretraining data source domains. For further information or requests, please go to [Camembert Website](https://camembert-model.fr/) ## Pre-trained models | Model | #params | Arch. | Training data | |--------------------------------|--------------------------------|-------|-----------------------------------| | `camembert-base` | 110M | Base | OSCAR (138 GB of text) | | `camembert/camembert-large` | 335M | Large | CCNet (135 GB of text) | | `camembert/camembert-base-ccnet` | 110M | Base | CCNet (135 GB of text) | | `camembert/camembert-base-wikipedia-4gb` | 110M | Base | Wikipedia (4 GB of text) | | `camembert/camembert-base-oscar-4gb` | 110M | Base | Subsample of OSCAR (4 GB of text) | | `camembert/camembert-base-ccnet-4gb` | 110M | Base | Subsample of CCNet (4 GB of text) | ## How to use CamemBERT with HuggingFace ##### Load CamemBERT and its sub-word tokenizer : ```python from transformers import CamembertModel, CamembertTokenizer # You can replace "camembert-base" with any other model from the table, e.g. "camembert/camembert-large". tokenizer = CamembertTokenizer.from_pretrained("camembert/camembert-base-wikipedia-4gb") camembert = CamembertModel.from_pretrained("camembert/camembert-base-wikipedia-4gb") camembert.eval() # disable dropout (or leave in train mode to finetune) ``` ##### Filling masks using pipeline ```python from transformers import pipeline camembert_fill_mask = pipeline("fill-mask", model="camembert/camembert-base-wikipedia-4gb", tokenizer="camembert/camembert-base-wikipedia-4gb") results = camembert_fill_mask("Le camembert est un fromage de <mask>!") # results #[{'sequence': '<s> Le camembert est un fromage de chèvre!</s>', 'score': 0.4937814474105835, 'token': 19370}, #{'sequence': '<s> Le camembert est un fromage de brebis!</s>', 'score': 0.06255942583084106, 'token': 30616}, #{'sequence': '<s> Le camembert est un fromage de montagne!</s>', 'score': 0.04340197145938873, 'token': 2364}, # {'sequence': '<s> Le camembert est un fromage de Noël!</s>', 'score': 0.02823255956172943, 'token': 3236}, #{'sequence': '<s> Le camembert est un fromage de vache!</s>', 'score': 0.021357402205467224, 'token': 12329}] ``` ##### Extract contextual embedding features from Camembert output ```python import torch # Tokenize in sub-words with SentencePiece tokenized_sentence = tokenizer.tokenize("J'aime le camembert !") # ['▁J', "'", 'aime', '▁le', '▁ca', 'member', 't', '▁!'] # 1-hot encode and add special starting and end tokens encoded_sentence = tokenizer.encode(tokenized_sentence) # [5, 221, 10, 10600, 14, 8952, 10540, 75, 1114, 6] # NB: Can be done in one step : tokenize.encode("J'aime le camembert !") # Feed tokens to Camembert as a torch tensor (batch dim 1) encoded_sentence = torch.tensor(encoded_sentence).unsqueeze(0) embeddings, _ = camembert(encoded_sentence) # embeddings.detach() # embeddings.size torch.Size([1, 10, 768]) #tensor([[[-0.0928, 0.0506, -0.0094, ..., -0.2388, 0.1177, -0.1302], # [ 0.0662, 0.1030, -0.2355, ..., -0.4224, -0.0574, -0.2802], # [-0.0729, 0.0547, 0.0192, ..., -0.1743, 0.0998, -0.2677], # ..., ``` ##### Extract contextual embedding features from all Camembert layers ```python from transformers import CamembertConfig # (Need to reload the model with new config) config = CamembertConfig.from_pretrained("camembert/camembert-base-wikipedia-4gb", output_hidden_states=True) camembert = CamembertModel.from_pretrained("camembert/camembert-base-wikipedia-4gb", config=config) embeddings, _, all_layer_embeddings = camembert(encoded_sentence) # all_layer_embeddings list of len(all_layer_embeddings) == 13 (input embedding layer + 12 self attention layers) all_layer_embeddings[5] # layer 5 contextual embedding : size torch.Size([1, 10, 768]) #tensor([[[-0.0059, -0.0227, 0.0065, ..., -0.0770, 0.0369, 0.0095], # [ 0.2838, -0.1531, -0.3642, ..., -0.0027, -0.8502, -0.7914], # [-0.0073, -0.0338, -0.0011, ..., 0.0533, -0.0250, -0.0061], # ..., ``` ## Authors CamemBERT was trained and evaluated by Louis Martin\*, Benjamin Muller\*, Pedro Javier Ortiz Suárez\*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot. ## Citation If you use our work, please cite: ```bibtex @inproceedings{martin2020camembert, title={CamemBERT: a Tasty French Language Model}, author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t}, booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics}, year={2020} } ```
5,209
[ [ -0.0155029296875, -0.06573486328125, 0.0222320556640625, 0.0240325927734375, -0.0145721435546875, -0.00998687744140625, -0.031005859375, -0.002349853515625, 0.03826904296875, 0.029266357421875, -0.04058837890625, -0.04833984375, -0.0465087890625, 0.002222061...
declare-lab/flan-alpaca-xxl
2023-07-04T23:45:33.000Z
[ "transformers", "pytorch", "t5", "text2text-generation", "dataset:tatsu-lab/alpaca", "arxiv:2306.04757", "arxiv:2210.11416", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text2text-generation
declare-lab
null
null
declare-lab/flan-alpaca-xxl
35
2,034
transformers
2023-03-28T13:19:10
--- license: apache-2.0 datasets: - tatsu-lab/alpaca --- ## 🍮 🦙 Flan-Alpaca: Instruction Tuning from Humans and Machines 📣 We developed Flacuna by fine-tuning Vicuna-13B on the Flan collection. Flacuna is better than Vicuna at problem-solving. Access the model here https://huggingface.co/declare-lab/flacuna-13b-v1.0. 📣 Curious to know the performance of 🍮 🦙 **Flan-Alpaca** on large-scale LLM evaluation benchmark, **InstructEval**? Read our paper [https://arxiv.org/pdf/2306.04757.pdf](https://arxiv.org/pdf/2306.04757.pdf). We evaluated more than 10 open-source instruction-tuned LLMs belonging to various LLM families including Pythia, LLaMA, T5, UL2, OPT, and Mosaic. Codes and datasets: [https://github.com/declare-lab/instruct-eval](https://github.com/declare-lab/instruct-eval) 📣 **FLAN-T5** is also useful in text-to-audio generation. Find our work at [https://github.com/declare-lab/tango](https://github.com/declare-lab/tango) if you are interested. Our [repository](https://github.com/declare-lab/flan-alpaca) contains code for extending the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) synthetic instruction tuning to existing instruction-tuned models such as [Flan-T5](https://arxiv.org/abs/2210.11416). We have a [live interactive demo](https://huggingface.co/spaces/joaogante/transformers_streaming) thanks to [Joao Gante](https://huggingface.co/joaogante)! We are also benchmarking many instruction-tuned models at [declare-lab/flan-eval](https://github.com/declare-lab/flan-eval). Our pretrained models are fully available on HuggingFace 🤗 : | Model | Parameters | Instruction Data | Training GPUs | |----------------------------------------------------------------------------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------------|-----------------| | [Flan-Alpaca-Base](https://huggingface.co/declare-lab/flan-alpaca-base) | 220M | [Flan](https://github.com/google-research/FLAN), [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) | 1x A6000 | | [Flan-Alpaca-Large](https://huggingface.co/declare-lab/flan-alpaca-large) | 770M | [Flan](https://github.com/google-research/FLAN), [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) | 1x A6000 | | [Flan-Alpaca-XL](https://huggingface.co/declare-lab/flan-alpaca-xl) | 3B | [Flan](https://github.com/google-research/FLAN), [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) | 1x A6000 | | [Flan-Alpaca-XXL](https://huggingface.co/declare-lab/flan-alpaca-xxl) | 11B | [Flan](https://github.com/google-research/FLAN), [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) | 4x A6000 (FSDP) | | [Flan-GPT4All-XL](https://huggingface.co/declare-lab/flan-gpt4all-xl) | 3B | [Flan](https://github.com/google-research/FLAN), [GPT4All](https://github.com/nomic-ai/gpt4all) | 1x A6000 | | [Flan-ShareGPT-XL](https://huggingface.co/declare-lab/flan-sharegpt-xl) | 3B | [Flan](https://github.com/google-research/FLAN), [ShareGPT](https://github.com/domeccleston/sharegpt)/[Vicuna](https://github.com/lm-sys/FastChat) | 1x A6000 | | [Flan-Alpaca-GPT4-XL*](https://huggingface.co/declare-lab/flan-alpaca-gpt4-xl) | 3B | [Flan](https://github.com/google-research/FLAN), [GPT4-Alpaca](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) | 1x A6000 | *recommended for better performance ### Why? [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html) represents an exciting new direction to approximate the performance of large language models (LLMs) like ChatGPT cheaply and easily. Concretely, they leverage an LLM such as GPT-3 to generate instructions as synthetic training data. The synthetic data which covers more than 50k tasks can then be used to finetune a smaller model. However, the original implementation is less accessible due to licensing constraints of the underlying [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) model. Furthermore, users have noted [potential noise](https://github.com/tloen/alpaca-lora/issues/65) in the synthetic dataset. Hence, it may be better to explore a fully accessible model that is already trained on high-quality (but less diverse) instructions such as [Flan-T5](https://arxiv.org/abs/2210.11416). ### Usage ``` from transformers import pipeline prompt = "Write an email about an alpaca that likes flan" model = pipeline(model="declare-lab/flan-alpaca-gpt4-xl") model(prompt, max_length=128, do_sample=True) # Dear AlpacaFriend, # My name is Alpaca and I'm 10 years old. # I'm excited to announce that I'm a big fan of flan! # We like to eat it as a snack and I believe that it can help with our overall growth. # I'd love to hear your feedback on this idea. # Have a great day! # Best, AL Paca ```
5,408
[ [ -0.052276611328125, -0.062744140625, 0.0227813720703125, 0.018218994140625, -0.00241851806640625, -0.00006240606307983398, -0.022247314453125, -0.055999755859375, 0.03375244140625, 0.01424407958984375, -0.034454345703125, -0.041717529296875, -0.042083740234375, ...
komfysach/groow-tokens-2
2023-10-31T11:01:22.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us", "has_space" ]
text-to-image
komfysach
null
null
komfysach/groow-tokens-2
0
2,034
diffusers
2023-10-31T10:55:27
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### groow_tokens_2 Dreambooth model trained by komfysach with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
505
[ [ -0.021728515625, -0.06744384765625, 0.044830322265625, 0.0242462158203125, -0.0325927734375, 0.03192138671875, 0.0297393798828125, -0.0179290771484375, 0.04791259765625, 0.015716552734375, -0.015350341796875, -0.0224456787109375, -0.05157470703125, -0.025115...
Helsinki-NLP/opus-tatoeba-en-tr
2023-08-16T12:09:37.000Z
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "en", "tr", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
Helsinki-NLP
null
null
Helsinki-NLP/opus-tatoeba-en-tr
5
2,033
transformers
2022-03-02T23:29:04
--- language: - en - tr tags: - translation license: apache-2.0 --- ### en-tr * source group: English * target group: Turkish * OPUS readme: [eng-tur](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-tur/README.md) * model: transformer-align * source language(s): eng * target language(s): tur * model: transformer-align * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus+bt-2021-04-10.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-tur/opus+bt-2021-04-10.zip) * test set translations: [opus+bt-2021-04-10.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-tur/opus+bt-2021-04-10.test.txt) * test set scores: [opus+bt-2021-04-10.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-tur/opus+bt-2021-04-10.eval.txt) ## Benchmarks | testset | BLEU | chr-F | #sent | #words | BP | |---------|-------|-------|-------|--------|----| | newsdev2016-entr.eng-tur | 21.5 | 0.575 | 1001 | 16127 | 1.000 | | newstest2016-entr.eng-tur | 21.4 | 0.558 | 3000 | 50782 | 0.986 | | newstest2017-entr.eng-tur | 22.8 | 0.572 | 3007 | 51977 | 0.960 | | newstest2018-entr.eng-tur | 20.8 | 0.561 | 3000 | 53731 | 0.963 | | Tatoeba-test.eng-tur | 41.5 | 0.684 | 10000 | 60469 | 0.932 | ### System Info: - hf_name: en-tr - source_languages: eng - target_languages: tur - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-tur/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'tr'] - src_constituents: ('English', {'eng'}) - tgt_constituents: ('Turkish', {'tur'}) - src_multilingual: False - tgt_multilingual: False - long_pair: eng-tur - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-tur/opus+bt-2021-04-10.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-tur/opus+bt-2021-04-10.test.txt - src_alpha3: eng - tgt_alpha3: tur - chrF2_score: 0.684 - bleu: 41.5 - src_name: English - tgt_name: Turkish - train_date: 2021-04-10 00:00:00 - src_alpha2: en - tgt_alpha2: tr - prefer_old: False - short_pair: en-tr - helsinki_git_sha: a6bd0607aec9603811b2b635aec3f566f3add79d - transformers_git_sha: 12b4d66a80419db30a15e7b9d4208ceb9887c03b - port_machine: LM0-400-22516.local - port_time: 2021-10-05-12:13
2,380
[ [ -0.026397705078125, -0.05438232421875, 0.01708984375, 0.03094482421875, -0.03887939453125, -0.0155487060546875, -0.02655029296875, -0.0283660888671875, 0.025177001953125, 0.017425537109375, -0.0419921875, -0.0596923828125, -0.047393798828125, 0.0291748046875...
microsoft/speecht5_asr
2023-03-22T20:10:20.000Z
[ "transformers", "pytorch", "speecht5", "automatic-speech-recognition", "audio", "dataset:librispeech_asr", "arxiv:2110.07205", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
automatic-speech-recognition
microsoft
null
null
microsoft/speecht5_asr
19
2,032
transformers
2023-02-02T12:46:15
--- license: mit tags: - audio - automatic-speech-recognition datasets: - librispeech_asr --- # SpeechT5 (ASR task) SpeechT5 model fine-tuned for automatic speech recognition (speech-to-text) on LibriSpeech. This model was introduced in [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei. SpeechT5 was first released in [this repository](https://github.com/microsoft/SpeechT5/), [original weights](https://huggingface.co/ajyy/SpeechT5/). The license used is [MIT](https://github.com/microsoft/SpeechT5/blob/main/LICENSE). Disclaimer: The team releasing SpeechT5 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model Description Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models, we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder. Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. To align the textual and speech information into this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text states with latent units as the interface between encoder and decoder. Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification. ## Intended Uses & Limitations You can use this model for automatic speech recognition. See the [model hub](https://huggingface.co/models?search=speecht5) to look for fine-tuned versions on a task that interests you. Currently, both the feature extractor and model support PyTorch. ## Citation **BibTeX:** ```bibtex @inproceedings{ao-etal-2022-speecht5, title = {{S}peech{T}5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing}, author = {Ao, Junyi and Wang, Rui and Zhou, Long and Wang, Chengyi and Ren, Shuo and Wu, Yu and Liu, Shujie and Ko, Tom and Li, Qing and Zhang, Yu and Wei, Zhihua and Qian, Yao and Li, Jinyu and Wei, Furu}, booktitle = {Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, month = {May}, year = {2022}, pages={5723--5738}, } ``` ## How to Get Started With the Model Use the code below to convert a mono 16 kHz speech waveform to text. ```python from transformers import SpeechT5Processor, SpeechT5ForSpeechToText from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate example_speech = dataset[0]["audio"]["array"] processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_asr") model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr") inputs = processor(audio=example_speech, sampling_rate=sampling_rate, return_tensors="pt") predicted_ids = model.generate(**inputs, max_length=100) transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) print(transcription[0]) ```
3,981
[ [ -0.02471923828125, -0.040863037109375, 0.01079559326171875, 0.01080322265625, -0.0275115966796875, -0.01519012451171875, -0.0274505615234375, -0.035736083984375, -0.01348114013671875, 0.023956298828125, -0.0390625, -0.0396728515625, -0.036590576171875, 0.014...
google/tapas-base
2021-11-29T10:03:33.000Z
[ "transformers", "pytorch", "tf", "tapas", "feature-extraction", "TapasModel", "en", "arxiv:2004.02349", "arxiv:2010.00571", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
feature-extraction
google
null
null
google/tapas-base
6
2,030
transformers
2022-03-02T23:29:05
--- language: en tags: - tapas - TapasModel license: apache-2.0 --- # TAPAS base model This model has 2 versions which can be used. The latest version, which is the default one, corresponds to the `tapas_inter_masklm_base_reset` checkpoint of the [original Github repository](https://github.com/google-research/tapas). This model was pre-trained on MLM and an additional step which the authors call intermediate pre-training. It uses relative position embeddings by default (i.e. resetting the position index at every cell of the table). The other (non-default) version which can be used is the one with absolute position embeddings: - `revision="no_reset"`, which corresponds to `tapas_inter_masklm_base` Disclaimer: The team releasing TAPAS did not write a model card for this model so this model card has been written by the Hugging Face team and contributors. ## Model description TAPAS is a BERT-like transformers model pretrained on a large corpus of English data from Wikipedia in a self-supervised fashion. This means it was pretrained on the raw tables and associated texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a (flattened) table and associated context, the model randomly masks 15% of the words in the input, then runs the entire (partially masked) sequence through the model. The model then has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of a table and associated text. - Intermediate pre-training: to encourage numerical reasoning on tables, the authors additionally pre-trained the model by creating a balanced dataset of millions of syntactically created training examples. Here, the model must predict (classify) whether a sentence is supported or refuted by the contents of a table. The training examples are created based on synthetic as well as counterfactual statements. This way, the model learns an inner representation of the English language used in tables and associated texts, which can then be used to extract features useful for downstream tasks such as answering questions about a table, or determining whether a sentence is entailed or refuted by the contents of a table. Fine-tuning is done by adding one or more classification heads on top of the pre-trained model, and then jointly train these randomly initialized classification heads with the base model on a downstream task. ## Intended uses & limitations You can use the raw model for getting hidden representatons about table-question pairs, but it's mostly intended to be fine-tuned on a downstream task such as question answering or sequence classification. See the [model hub](https://huggingface.co/models?filter=tapas) to look for fine-tuned versions on a task that interests you. ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence [SEP] Flattened table [SEP] ``` ### Pre-training The model was pre-trained on 32 Cloud TPU v3 cores for 1,000,000 steps with maximum sequence length 512 and batch size of 512. In this setup, pre-training on MLM only takes around 3 days. Aditionally, the model has been further pre-trained on a second task (table entailment). See the original TAPAS [paper](https://www.aclweb.org/anthology/2020.acl-main.398/) and the [follow-up paper](https://www.aclweb.org/anthology/2020.findings-emnlp.27/) for more details. The optimizer used is Adam with a learning rate of 5e-5, and a warmup ratio of 0.01. ### BibTeX entry and citation info ```bibtex @misc{herzig2020tapas, title={TAPAS: Weakly Supervised Table Parsing via Pre-training}, author={Jonathan Herzig and Paweł Krzysztof Nowak and Thomas Müller and Francesco Piccinno and Julian Martin Eisenschlos}, year={2020}, eprint={2004.02349}, archivePrefix={arXiv}, primaryClass={cs.IR} } ``` ```bibtex @misc{eisenschlos2020understanding, title={Understanding tables with intermediate pre-training}, author={Julian Martin Eisenschlos and Syrine Krichene and Thomas Müller}, year={2020}, eprint={2010.00571}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
4,672
[ [ -0.037384033203125, -0.06219482421875, 0.022613525390625, 0.01264190673828125, -0.03546142578125, -0.0151519775390625, -0.00862884521484375, -0.034393310546875, 0.0276336669921875, 0.04461669921875, -0.04150390625, -0.028106689453125, -0.049102783203125, 0.0...
michiyasunaga/BioLinkBERT-large
2022-03-31T00:54:57.000Z
[ "transformers", "pytorch", "bert", "feature-extraction", "exbert", "linkbert", "biolinkbert", "fill-mask", "question-answering", "text-classification", "token-classification", "en", "dataset:pubmed", "arxiv:2203.15827", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
michiyasunaga
null
null
michiyasunaga/BioLinkBERT-large
23
2,029
transformers
2022-03-08T06:20:38
--- license: apache-2.0 language: en datasets: - pubmed tags: - bert - exbert - linkbert - biolinkbert - feature-extraction - fill-mask - question-answering - text-classification - token-classification widget: - text: "Sunitinib is a tyrosine kinase inhibitor" --- ## BioLinkBERT-large BioLinkBERT-large model pretrained on [PubMed](https://pubmed.ncbi.nlm.nih.gov/) abstracts along with citation link information. It is introduced in the paper [LinkBERT: Pretraining Language Models with Document Links (ACL 2022)](https://arxiv.org/abs/2203.15827). The code and data are available in [this repository](https://github.com/michiyasunaga/LinkBERT). This model achieves state-of-the-art performance on several biomedical NLP benchmarks such as [BLURB](https://microsoft.github.io/BLURB/) and [MedQA-USMLE](https://github.com/jind11/MedQA). ## Model description LinkBERT is a transformer encoder (BERT-like) model pretrained on a large corpus of documents. It is an improvement of BERT that newly captures **document links** such as hyperlinks and citation links to include knowledge that spans across multiple documents. Specifically, it was pretrained by feeding linked documents into the same language model context, besides a single document. LinkBERT can be used as a drop-in replacement for BERT. It achieves better performance for general language understanding tasks (e.g. text classification), and is also particularly effective for **knowledge-intensive** tasks (e.g. question answering) and **cross-document** tasks (e.g. reading comprehension, document retrieval). ## Intended uses & limitations The model can be used by fine-tuning on a downstream task, such as question answering, sequence classification, and token classification. You can also use the raw model for feature extraction (i.e. obtaining embeddings for input text). ### How to use To use the model to get the features of a given text in PyTorch: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('michiyasunaga/BioLinkBERT-large') model = AutoModel.from_pretrained('michiyasunaga/BioLinkBERT-large') inputs = tokenizer("Sunitinib is a tyrosine kinase inhibitor", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ``` For fine-tuning, you can use [this repository](https://github.com/michiyasunaga/LinkBERT) or follow any other BERT fine-tuning codebases. ## Evaluation results When fine-tuned on downstream tasks, LinkBERT achieves the following results. **Biomedical benchmarks ([BLURB](https://microsoft.github.io/BLURB/), [MedQA](https://github.com/jind11/MedQA), [MMLU](https://github.com/hendrycks/test), etc.):** BioLinkBERT attains new state-of-the-art. | | BLURB score | PubMedQA | BioASQ | MedQA-USMLE | | ---------------------- | -------- | -------- | ------- | -------- | | PubmedBERT-base | 81.10 | 55.8 | 87.5 | 38.1 | | **BioLinkBERT-base** | **83.39** | **70.2** | **91.4** | **40.0** | | **BioLinkBERT-large** | **84.30** | **72.2** | **94.8** | **44.6** | | | MMLU-professional medicine | | ---------------------- | -------- | | GPT-3 (175 params) | 38.7 | | UnifiedQA (11B params) | 43.2 | | **BioLinkBERT-large (340M params)** | **50.7** | ## Citation If you find LinkBERT useful in your project, please cite the following: ```bibtex @InProceedings{yasunaga2022linkbert, author = {Michihiro Yasunaga and Jure Leskovec and Percy Liang}, title = {LinkBERT: Pretraining Language Models with Document Links}, year = {2022}, booktitle = {Association for Computational Linguistics (ACL)}, } ```
3,827
[ [ -0.01910400390625, -0.036651611328125, 0.028106689453125, -0.004360198974609375, -0.00408935546875, -0.0068817138671875, 0.0022525787353515625, -0.044677734375, 0.0277252197265625, 0.00006920099258422852, -0.0215301513671875, -0.048370361328125, -0.0538940429687...
Helsinki-NLP/opus-mt-tn-en
2023-08-16T12:07:02.000Z
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "tn", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
Helsinki-NLP
null
null
Helsinki-NLP/opus-mt-tn-en
0
2,025
transformers
2022-03-02T23:29:04
--- tags: - translation license: apache-2.0 --- ### opus-mt-tn-en * source languages: tn * target languages: en * OPUS readme: [tn-en](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/tn-en/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-21.zip](https://object.pouta.csc.fi/OPUS-MT-models/tn-en/opus-2020-01-21.zip) * test set translations: [opus-2020-01-21.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/tn-en/opus-2020-01-21.test.txt) * test set scores: [opus-2020-01-21.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/tn-en/opus-2020-01-21.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.tn.en | 43.4 | 0.589 |
816
[ [ -0.02105712890625, -0.029693603515625, 0.02056884765625, 0.0258331298828125, -0.034149169921875, -0.0298919677734375, -0.0355224609375, -0.0021190643310546875, 0.004276275634765625, 0.038818359375, -0.049774169921875, -0.04168701171875, -0.045684814453125, 0...
komfysach/groow-tokens-6
2023-11-01T08:51:04.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
komfysach
null
null
komfysach/groow-tokens-6
0
2,025
diffusers
2023-11-01T08:47:26
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### groow_tokens_6 Dreambooth model trained by komfysach with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
505
[ [ -0.0244140625, -0.063720703125, 0.045684814453125, 0.0216217041015625, -0.031524658203125, 0.0289459228515625, 0.029541015625, -0.0177154541015625, 0.04803466796875, 0.0155792236328125, -0.016876220703125, -0.0245361328125, -0.048187255859375, -0.01783752441...
spacy/en_core_web_sm
2023-10-10T06:49:49.000Z
[ "spacy", "token-classification", "en", "license:mit", "model-index", "has_space", "region:us" ]
token-classification
spacy
null
null
spacy/en_core_web_sm
26
2,022
spacy
2022-03-02T23:29:05
--- tags: - spacy - token-classification language: - en license: mit model-index: - name: en_core_web_sm results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.8454836771 - name: NER Recall type: recall value: 0.8456530449 - name: NER F Score type: f_score value: 0.8455683525 - task: name: TAG type: token-classification metrics: - name: TAG (XPOS) Accuracy type: accuracy value: 0.97246532 - task: name: UNLABELED_DEPENDENCIES type: token-classification metrics: - name: Unlabeled Attachment Score (UAS) type: f_score value: 0.9175304332 - task: name: LABELED_DEPENDENCIES type: token-classification metrics: - name: Labeled Attachment Score (LAS) type: f_score value: 0.89874821 - task: name: SENTS type: token-classification metrics: - name: Sentences F-Score type: f_score value: 0.9059485531 --- ### Details: https://spacy.io/models/en#en_core_web_sm English pipeline optimized for CPU. Components: tok2vec, tagger, parser, senter, ner, attribute_ruler, lemmatizer. | Feature | Description | | --- | --- | | **Name** | `en_core_web_sm` | | **Version** | `3.7.0` | | **spaCy** | `>=3.7.0,<3.8.0` | | **Default Pipeline** | `tok2vec`, `tagger`, `parser`, `attribute_ruler`, `lemmatizer`, `ner` | | **Components** | `tok2vec`, `tagger`, `parser`, `senter`, `attribute_ruler`, `lemmatizer`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | [OntoNotes 5](https://catalog.ldc.upenn.edu/LDC2013T19) (Ralph Weischedel, Martha Palmer, Mitchell Marcus, Eduard Hovy, Sameer Pradhan, Lance Ramshaw, Nianwen Xue, Ann Taylor, Jeff Kaufman, Michelle Franchini, Mohammed El-Bachouti, Robert Belvin, Ann Houston)<br />[ClearNLP Constituent-to-Dependency Conversion](https://github.com/clir/clearnlp-guidelines/blob/master/md/components/dependency_conversion.md) (Emory University)<br />[WordNet 3.0](https://wordnet.princeton.edu/) (Princeton University) | | **License** | `MIT` | | **Author** | [Explosion](https://explosion.ai) | ### Label Scheme <details> <summary>View label scheme (113 labels for 3 components)</summary> | Component | Labels | | --- | --- | | **`tagger`** | `$`, `''`, `,`, `-LRB-`, `-RRB-`, `.`, `:`, `ADD`, `AFX`, `CC`, `CD`, `DT`, `EX`, `FW`, `HYPH`, `IN`, `JJ`, `JJR`, `JJS`, `LS`, `MD`, `NFP`, `NN`, `NNP`, `NNPS`, `NNS`, `PDT`, `POS`, `PRP`, `PRP$`, `RB`, `RBR`, `RBS`, `RP`, `SYM`, `TO`, `UH`, `VB`, `VBD`, `VBG`, `VBN`, `VBP`, `VBZ`, `WDT`, `WP`, `WP$`, `WRB`, `XX`, `_SP`, ```` | | **`parser`** | `ROOT`, `acl`, `acomp`, `advcl`, `advmod`, `agent`, `amod`, `appos`, `attr`, `aux`, `auxpass`, `case`, `cc`, `ccomp`, `compound`, `conj`, `csubj`, `csubjpass`, `dative`, `dep`, `det`, `dobj`, `expl`, `intj`, `mark`, `meta`, `neg`, `nmod`, `npadvmod`, `nsubj`, `nsubjpass`, `nummod`, `oprd`, `parataxis`, `pcomp`, `pobj`, `poss`, `preconj`, `predet`, `prep`, `prt`, `punct`, `quantmod`, `relcl`, `xcomp` | | **`ner`** | `CARDINAL`, `DATE`, `EVENT`, `FAC`, `GPE`, `LANGUAGE`, `LAW`, `LOC`, `MONEY`, `NORP`, `ORDINAL`, `ORG`, `PERCENT`, `PERSON`, `PRODUCT`, `QUANTITY`, `TIME`, `WORK_OF_ART` | </details> ### Accuracy | Type | Score | | --- | --- | | `TOKEN_ACC` | 99.86 | | `TOKEN_P` | 99.57 | | `TOKEN_R` | 99.58 | | `TOKEN_F` | 99.57 | | `TAG_ACC` | 97.25 | | `SENTS_P` | 92.02 | | `SENTS_R` | 89.21 | | `SENTS_F` | 90.59 | | `DEP_UAS` | 91.75 | | `DEP_LAS` | 89.87 | | `ENTS_P` | 84.55 | | `ENTS_R` | 84.57 | | `ENTS_F` | 84.56 |
3,648
[ [ -0.041839599609375, -0.031768798828125, 0.019805908203125, 0.01505279541015625, -0.01568603515625, -0.002178192138671875, -0.00946807861328125, -0.0128173828125, 0.0180816650390625, 0.03204345703125, -0.04937744140625, -0.06109619140625, -0.05169677734375, 0...
monologg/koelectra-base-discriminator
2021-10-20T16:55:57.000Z
[ "transformers", "pytorch", "electra", "pretraining", "korean", "ko", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
monologg
null
null
monologg/koelectra-base-discriminator
1
2,020
transformers
2022-03-02T23:29:05
--- language: ko license: apache-2.0 tags: - korean --- # KoELECTRA (Base Discriminator) Pretrained ELECTRA Language Model for Korean (`koelectra-base-discriminator`) For more detail, please see [original repository](https://github.com/monologg/KoELECTRA/blob/master/README_EN.md). ## Usage ### Load model and tokenizer ```python >>> from transformers import ElectraModel, ElectraTokenizer >>> model = ElectraModel.from_pretrained("monologg/koelectra-base-discriminator") >>> tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-base-discriminator") ``` ### Tokenizer example ```python >>> from transformers import ElectraTokenizer >>> tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-base-discriminator") >>> tokenizer.tokenize("[CLS] 한국어 ELECTRA를 공유합니다. [SEP]") ['[CLS]', '한국어', 'E', '##L', '##EC', '##T', '##RA', '##를', '공유', '##합니다', '.', '[SEP]'] >>> tokenizer.convert_tokens_to_ids(['[CLS]', '한국어', 'E', '##L', '##EC', '##T', '##RA', '##를', '공유', '##합니다', '.', '[SEP]']) [2, 18429, 41, 6240, 15229, 6204, 20894, 5689, 12622, 10690, 18, 3] ``` ## Example using ElectraForPreTraining ```python import torch from transformers import ElectraForPreTraining, ElectraTokenizer discriminator = ElectraForPreTraining.from_pretrained("monologg/koelectra-base-discriminator") tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-base-discriminator") sentence = "나는 방금 밥을 먹었다." fake_sentence = "나는 내일 밥을 먹었다." fake_tokens = tokenizer.tokenize(fake_sentence) fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt") discriminator_outputs = discriminator(fake_inputs) predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2) print(list(zip(fake_tokens, predictions.tolist()[1:-1]))) ```
1,764
[ [ -0.017364501953125, -0.0251007080078125, 0.0029964447021484375, 0.02459716796875, -0.047210693359375, 0.0200042724609375, -0.00634002685546875, 0.0102081298828125, 0.02203369140625, 0.0372314453125, -0.0312347412109375, -0.04345703125, -0.041961669921875, 0....
nielsr/layoutlmv2-finetuned-funsd
2023-09-11T12:30:41.000Z
[ "transformers", "pytorch", "tensorboard", "safetensors", "layoutlmv2", "token-classification", "generated_from_trainer", "dataset:funsd", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
nielsr
null
null
nielsr/layoutlmv2-finetuned-funsd
10
2,019
transformers
2022-03-02T23:29:05
--- tags: - generated_from_trainer datasets: - funsd model_index: - name: layoutlmv2-finetuned-funsd results: - task: name: Token Classification type: token-classification dataset: name: funsd type: funsd args: funsd base_model: microsoft/layoutlmv2-base-uncased --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # layoutlmv2-finetuned-funsd This model is a fine-tuned version of [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) on the funsd dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.9.0.dev0 - Pytorch 1.8.0+cu101 - Datasets 1.9.0 - Tokenizers 0.10.3
1,311
[ [ -0.0233612060546875, -0.034515380859375, 0.005706787109375, 0.0235748291015625, -0.035736083984375, -0.029937744140625, 0.0046539306640625, -0.0006985664367675781, 0.00890350341796875, 0.03717041015625, -0.055389404296875, -0.0384521484375, -0.0280609130859375, ...
entropy/roberta_zinc_480m
2023-09-18T23:16:49.000Z
[ "transformers", "pytorch", "roberta", "fill-mask", "chemistry", "molecule", "drug", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
entropy
null
null
entropy/roberta_zinc_480m
2
2,019
transformers
2023-02-24T04:22:29
--- tags: - chemistry - molecule - drug --- # Roberta Zinc 480m This is a Roberta style masked language model trained on ~480m SMILES strings from the [ZINC database](https://zinc.docking.org/). The model has ~102m parameters and was trained for 150000 iterations with a batch size of 4096 to a validation loss of ~0.122. This model is useful for generating embeddings from SMILES strings. ```python from transformers import RobertaTokenizerFast, RobertaForMaskedLM, DataCollatorWithPadding tokenizer = RobertaTokenizerFast.from_pretrained("entropy/roberta_zinc_480m", max_len=128) model = RobertaForMaskedLM.from_pretrained('entropy/roberta_zinc_480m') collator = DataCollatorWithPadding(tokenizer, padding=True, return_tensors='pt') smiles = ['Brc1cc2c(NCc3ccccc3)ncnc2s1', 'Brc1cc2c(NCc3ccccn3)ncnc2s1', 'Brc1cc2c(NCc3cccs3)ncnc2s1', 'Brc1cc2c(NCc3ccncc3)ncnc2s1', 'Brc1cc2c(Nc3ccccc3)ncnc2s1'] inputs = collator(tokenizer(smiles)) outputs = model(**inputs, output_hidden_states=True) full_embeddings = outputs[1][-1] mask = inputs['attention_mask'] embeddings = ((full_embeddings * mask.unsqueeze(-1)).sum(1) / mask.sum(-1).unsqueeze(-1)) ``` ## Decoder There is also a [decoder model](https://huggingface.co/entropy/roberta_zinc_decoder) trained to reconstruct inputs from embeddings --- license: mit ---
1,325
[ [ -0.016082763671875, -0.040191650390625, 0.029296875, 0.0208282470703125, -0.0216217041015625, -0.0069427490234375, -0.030120849609375, -0.006549835205078125, 0.0130157470703125, 0.028411865234375, -0.056640625, -0.053985595703125, -0.05133056640625, 0.007469...
heegyu/koalpaca-355m
2023-03-28T05:11:50.000Z
[ "transformers", "pytorch", "gpt2", "text-generation", "ko", "dataset:Bingsu/ko_alpaca_data", "license:apache-2.0", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
heegyu
null
null
heegyu/koalpaca-355m
3
2,019
transformers
2023-03-28T01:28:44
--- license: apache-2.0 widget: - text: <usr>알버트 아인슈타인에 대해서 알아? <sys> - text: <usr>다음을 동물, 식물, 광물로 분류하십시오. 참나무, 구리 광석, 코끼리 <sys> datasets: - Bingsu/ko_alpaca_data language: - ko --- - [Ajoublue-GPT2-medium](https://huggingface.co/heegyu/ajoublue-gpt2-medium) 모델을 [koalpaca](https://huggingface.co/datasets/Bingsu/ko_alpaca_data)데이터셋에 파인튜닝한 모델 - [Huggingface Space](https://huggingface.co/spaces/heegyu/koalpaca-355m)에서 사용해볼 수 있습니다. - 5 epoch 학습, learning rate = 5e-5 ``` from transformers import pipeline print('start model loading') model_name = '../checkpoint/koalpaca/ajoublue-gpt2-medium/epoch-4-last/' generator = pipeline( 'text-generation', model=model_name, tokenizer=model_name ) def query(instruction, input=None): if input: prompt = f"<usr>{instruction}\n{input}\n<sys>" else: prompt = f"<usr>{instruction}\n<sys>" print(generator( prompt, do_sample=True, top_p=0.9, early_stopping=True, max_length=256, )[0]['generated_text']) query("슈카월드에 대해서 알아?") query("세상에서 가장 유명한 사람은?") query("알버트 아인슈타인에 대해서 설명해줘") query("다음 영화에 대해서 설명해줘", "기생충") ``` 결과 ``` <usr>슈카월드에 대해서 알아? <sys>슈카월드는 미국 캘리포니아 주의 한 도시에서 열린 어린이 놀이박람회입니다. 이 박람회는 세계에서 가장 큰 어린이 놀이박람회로 유명합니다. <usr>세상에서 가장 유명한 사람은? <sys>가장 유명한 사람은 니콜라 테슬라입니다. <usr>알버트 아인슈타인에 대해서 설명해줘 <sys>알버트 아인슈타인은 1856년, 물리학 분야에서 최초로 노벨상을 수상한 물리학자입니다. 그는 상대성 이론을 비롯한 다수의 저서를 발표하며 현대 물리학의 기초를 확립하였습니다. <usr>다음 영화에 대해서 설명해줘 기생충 <sys>"기생충"은 가족과 학교에서 도망친 사람들 사이에서 벌어지는 일을 그린 영화로, 현실적이면서도 감각적인 연출과 다양한 캐릭터들의 매력을 살리는 방식으로 관객들을 사로잡습니다. <usr>섭씨 온도를 화씨로 변경해줘 섭씨 온도: 15도 <sys>"섭씨 온도를 화씨로 변경해주세요." -> "화씨 온도가 33도입니다." ```
1,682
[ [ -0.03582763671875, -0.054718017578125, 0.039276123046875, 0.0304718017578125, -0.0283660888671875, -0.0202789306640625, 0.01096343994140625, -0.019256591796875, 0.015838623046875, 0.0240478515625, -0.0428466796875, -0.032958984375, -0.04766845703125, 0.00185...
Helsinki-NLP/opus-mt-en-mul
2023-08-16T11:30:35.000Z
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "en", "ca", "es", "os", "eo", "ro", "fy", "cy", "is", "lb", "su", "an", "sq", "fr", "ht", "rm", "cv", "ig", "am", "eu", "tr", "ps", "af", "ny", "ch", "uk", "sl", "lt", ...
translation
Helsinki-NLP
null
null
Helsinki-NLP/opus-mt-en-mul
9
2,016
transformers
2022-03-02T23:29:04
--- language: - en - ca - es - os - eo - ro - fy - cy - is - lb - su - an - sq - fr - ht - rm - cv - ig - am - eu - tr - ps - af - ny - ch - uk - sl - lt - tk - sg - ar - lg - bg - be - ka - gd - ja - si - br - mh - km - th - ty - rw - te - mk - or - wo - kl - mr - ru - yo - hu - fo - zh - ti - co - ee - oc - sn - mt - ts - pl - gl - nb - bn - tt - bo - lo - id - gn - nv - hy - kn - to - io - so - vi - da - fj - gv - sm - nl - mi - pt - hi - se - as - ta - et - kw - ga - sv - ln - na - mn - gu - wa - lv - jv - el - my - ba - it - hr - ur - ce - nn - fi - mg - rn - xh - ab - de - cs - he - zu - yi - ml - mul tags: - translation license: apache-2.0 --- ### eng-mul * source group: English * target group: Multiple languages * OPUS readme: [eng-mul](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-mul/README.md) * model: transformer * source language(s): eng * target language(s): abk acm ady afb afh_Latn afr akl_Latn aln amh ang_Latn apc ara arg arq ary arz asm ast avk_Latn awa aze_Latn bak bam_Latn bel bel_Latn ben bho bod bos_Latn bre brx brx_Latn bul bul_Latn cat ceb ces cha che chr chv cjy_Hans cjy_Hant cmn cmn_Hans cmn_Hant cor cos crh crh_Latn csb_Latn cym dan deu dsb dtp dws_Latn egl ell enm_Latn epo est eus ewe ext fao fij fin fkv_Latn fra frm_Latn frr fry fuc fuv gan gcf_Latn gil gla gle glg glv gom gos got_Goth grc_Grek grn gsw guj hat hau_Latn haw heb hif_Latn hil hin hnj_Latn hoc hoc_Latn hrv hsb hun hye iba ibo ido ido_Latn ike_Latn ile_Latn ilo ina_Latn ind isl ita izh jav jav_Java jbo jbo_Cyrl jbo_Latn jdt_Cyrl jpn kab kal kan kat kaz_Cyrl kaz_Latn kek_Latn kha khm khm_Latn kin kir_Cyrl kjh kpv krl ksh kum kur_Arab kur_Latn lad lad_Latn lao lat_Latn lav ldn_Latn lfn_Cyrl lfn_Latn lij lin lit liv_Latn lkt lld_Latn lmo ltg ltz lug lzh lzh_Hans mad mah mai mal mar max_Latn mdf mfe mhr mic min mkd mlg mlt mnw moh mon mri mwl mww mya myv nan nau nav nds niu nld nno nob nob_Hebr nog non_Latn nov_Latn npi nya oci ori orv_Cyrl oss ota_Arab ota_Latn pag pan_Guru pap pau pdc pes pes_Latn pes_Thaa pms pnb pol por ppl_Latn prg_Latn pus quc qya qya_Latn rap rif_Latn roh rom ron rue run rus sag sah san_Deva scn sco sgs shs_Latn shy_Latn sin sjn_Latn slv sma sme smo sna snd_Arab som spa sqi srp_Cyrl srp_Latn stq sun swe swg swh tah tam tat tat_Arab tat_Latn tel tet tgk_Cyrl tha tir tlh_Latn tly_Latn tmw_Latn toi_Latn ton tpw_Latn tso tuk tuk_Latn tur tvl tyv tzl tzl_Latn udm uig_Arab uig_Cyrl ukr umb urd uzb_Cyrl uzb_Latn vec vie vie_Hani vol_Latn vro war wln wol wuu xal xho yid yor yue yue_Hans yue_Hant zho zho_Hans zho_Hant zlm_Latn zsm_Latn zul zza * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus2m-2020-08-01.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-mul/opus2m-2020-08-01.zip) * test set translations: [opus2m-2020-08-01.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-mul/opus2m-2020-08-01.test.txt) * test set scores: [opus2m-2020-08-01.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-mul/opus2m-2020-08-01.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newsdev2014-enghin.eng.hin | 5.0 | 0.288 | | newsdev2015-enfi-engfin.eng.fin | 9.3 | 0.418 | | newsdev2016-enro-engron.eng.ron | 17.2 | 0.488 | | newsdev2016-entr-engtur.eng.tur | 8.2 | 0.402 | | newsdev2017-enlv-englav.eng.lav | 12.9 | 0.444 | | newsdev2017-enzh-engzho.eng.zho | 17.6 | 0.170 | | newsdev2018-enet-engest.eng.est | 10.9 | 0.423 | | newsdev2019-engu-engguj.eng.guj | 5.2 | 0.284 | | newsdev2019-enlt-englit.eng.lit | 11.0 | 0.431 | | newsdiscussdev2015-enfr-engfra.eng.fra | 22.6 | 0.521 | | newsdiscusstest2015-enfr-engfra.eng.fra | 25.9 | 0.546 | | newssyscomb2009-engces.eng.ces | 10.3 | 0.394 | | newssyscomb2009-engdeu.eng.deu | 13.3 | 0.459 | | newssyscomb2009-engfra.eng.fra | 21.5 | 0.522 | | newssyscomb2009-enghun.eng.hun | 8.1 | 0.371 | | newssyscomb2009-engita.eng.ita | 22.1 | 0.540 | | newssyscomb2009-engspa.eng.spa | 23.8 | 0.531 | | news-test2008-engces.eng.ces | 9.0 | 0.376 | | news-test2008-engdeu.eng.deu | 14.2 | 0.451 | | news-test2008-engfra.eng.fra | 19.8 | 0.500 | | news-test2008-engspa.eng.spa | 22.8 | 0.518 | | newstest2009-engces.eng.ces | 9.8 | 0.392 | | newstest2009-engdeu.eng.deu | 13.7 | 0.454 | | newstest2009-engfra.eng.fra | 20.7 | 0.514 | | newstest2009-enghun.eng.hun | 8.4 | 0.370 | | newstest2009-engita.eng.ita | 22.4 | 0.538 | | newstest2009-engspa.eng.spa | 23.5 | 0.532 | | newstest2010-engces.eng.ces | 10.0 | 0.393 | | newstest2010-engdeu.eng.deu | 15.2 | 0.463 | | newstest2010-engfra.eng.fra | 22.0 | 0.524 | | newstest2010-engspa.eng.spa | 27.2 | 0.556 | | newstest2011-engces.eng.ces | 10.8 | 0.392 | | newstest2011-engdeu.eng.deu | 14.2 | 0.449 | | newstest2011-engfra.eng.fra | 24.3 | 0.544 | | newstest2011-engspa.eng.spa | 28.3 | 0.559 | | newstest2012-engces.eng.ces | 9.9 | 0.377 | | newstest2012-engdeu.eng.deu | 14.3 | 0.449 | | newstest2012-engfra.eng.fra | 23.2 | 0.530 | | newstest2012-engrus.eng.rus | 16.0 | 0.463 | | newstest2012-engspa.eng.spa | 27.8 | 0.555 | | newstest2013-engces.eng.ces | 11.0 | 0.392 | | newstest2013-engdeu.eng.deu | 16.4 | 0.469 | | newstest2013-engfra.eng.fra | 22.6 | 0.515 | | newstest2013-engrus.eng.rus | 12.1 | 0.414 | | newstest2013-engspa.eng.spa | 24.9 | 0.532 | | newstest2014-hien-enghin.eng.hin | 7.2 | 0.311 | | newstest2015-encs-engces.eng.ces | 10.9 | 0.396 | | newstest2015-ende-engdeu.eng.deu | 18.3 | 0.490 | | newstest2015-enfi-engfin.eng.fin | 10.1 | 0.421 | | newstest2015-enru-engrus.eng.rus | 14.5 | 0.445 | | newstest2016-encs-engces.eng.ces | 12.2 | 0.408 | | newstest2016-ende-engdeu.eng.deu | 21.4 | 0.517 | | newstest2016-enfi-engfin.eng.fin | 11.2 | 0.435 | | newstest2016-enro-engron.eng.ron | 16.6 | 0.472 | | newstest2016-enru-engrus.eng.rus | 13.4 | 0.435 | | newstest2016-entr-engtur.eng.tur | 8.1 | 0.385 | | newstest2017-encs-engces.eng.ces | 9.6 | 0.377 | | newstest2017-ende-engdeu.eng.deu | 17.9 | 0.482 | | newstest2017-enfi-engfin.eng.fin | 11.8 | 0.440 | | newstest2017-enlv-englav.eng.lav | 9.6 | 0.412 | | newstest2017-enru-engrus.eng.rus | 14.1 | 0.446 | | newstest2017-entr-engtur.eng.tur | 8.0 | 0.378 | | newstest2017-enzh-engzho.eng.zho | 16.8 | 0.175 | | newstest2018-encs-engces.eng.ces | 9.8 | 0.380 | | newstest2018-ende-engdeu.eng.deu | 23.8 | 0.536 | | newstest2018-enet-engest.eng.est | 11.8 | 0.433 | | newstest2018-enfi-engfin.eng.fin | 7.8 | 0.398 | | newstest2018-enru-engrus.eng.rus | 12.2 | 0.434 | | newstest2018-entr-engtur.eng.tur | 7.5 | 0.383 | | newstest2018-enzh-engzho.eng.zho | 18.3 | 0.179 | | newstest2019-encs-engces.eng.ces | 10.7 | 0.389 | | newstest2019-ende-engdeu.eng.deu | 21.0 | 0.512 | | newstest2019-enfi-engfin.eng.fin | 10.4 | 0.420 | | newstest2019-engu-engguj.eng.guj | 5.8 | 0.297 | | newstest2019-enlt-englit.eng.lit | 8.0 | 0.388 | | newstest2019-enru-engrus.eng.rus | 13.0 | 0.415 | | newstest2019-enzh-engzho.eng.zho | 15.0 | 0.192 | | newstestB2016-enfi-engfin.eng.fin | 9.0 | 0.414 | | newstestB2017-enfi-engfin.eng.fin | 9.5 | 0.415 | | Tatoeba-test.eng-abk.eng.abk | 4.2 | 0.275 | | Tatoeba-test.eng-ady.eng.ady | 0.4 | 0.006 | | Tatoeba-test.eng-afh.eng.afh | 1.0 | 0.058 | | Tatoeba-test.eng-afr.eng.afr | 47.0 | 0.663 | | Tatoeba-test.eng-akl.eng.akl | 2.7 | 0.080 | | Tatoeba-test.eng-amh.eng.amh | 8.5 | 0.455 | | Tatoeba-test.eng-ang.eng.ang | 6.2 | 0.138 | | Tatoeba-test.eng-ara.eng.ara | 6.3 | 0.325 | | Tatoeba-test.eng-arg.eng.arg | 1.5 | 0.107 | | Tatoeba-test.eng-asm.eng.asm | 2.1 | 0.265 | | Tatoeba-test.eng-ast.eng.ast | 15.7 | 0.393 | | Tatoeba-test.eng-avk.eng.avk | 0.2 | 0.095 | | Tatoeba-test.eng-awa.eng.awa | 0.1 | 0.002 | | Tatoeba-test.eng-aze.eng.aze | 19.0 | 0.500 | | Tatoeba-test.eng-bak.eng.bak | 12.7 | 0.379 | | Tatoeba-test.eng-bam.eng.bam | 8.3 | 0.037 | | Tatoeba-test.eng-bel.eng.bel | 13.5 | 0.396 | | Tatoeba-test.eng-ben.eng.ben | 10.0 | 0.383 | | Tatoeba-test.eng-bho.eng.bho | 0.1 | 0.003 | | Tatoeba-test.eng-bod.eng.bod | 0.0 | 0.147 | | Tatoeba-test.eng-bre.eng.bre | 7.6 | 0.275 | | Tatoeba-test.eng-brx.eng.brx | 0.8 | 0.060 | | Tatoeba-test.eng-bul.eng.bul | 32.1 | 0.542 | | Tatoeba-test.eng-cat.eng.cat | 37.0 | 0.595 | | Tatoeba-test.eng-ceb.eng.ceb | 9.6 | 0.409 | | Tatoeba-test.eng-ces.eng.ces | 24.0 | 0.475 | | Tatoeba-test.eng-cha.eng.cha | 3.9 | 0.228 | | Tatoeba-test.eng-che.eng.che | 0.7 | 0.013 | | Tatoeba-test.eng-chm.eng.chm | 2.6 | 0.212 | | Tatoeba-test.eng-chr.eng.chr | 6.0 | 0.190 | | Tatoeba-test.eng-chv.eng.chv | 6.5 | 0.369 | | Tatoeba-test.eng-cor.eng.cor | 0.9 | 0.086 | | Tatoeba-test.eng-cos.eng.cos | 4.2 | 0.174 | | Tatoeba-test.eng-crh.eng.crh | 9.9 | 0.361 | | Tatoeba-test.eng-csb.eng.csb | 3.4 | 0.230 | | Tatoeba-test.eng-cym.eng.cym | 18.0 | 0.418 | | Tatoeba-test.eng-dan.eng.dan | 42.5 | 0.624 | | Tatoeba-test.eng-deu.eng.deu | 25.2 | 0.505 | | Tatoeba-test.eng-dsb.eng.dsb | 0.9 | 0.121 | | Tatoeba-test.eng-dtp.eng.dtp | 0.3 | 0.084 | | Tatoeba-test.eng-dws.eng.dws | 0.2 | 0.040 | | Tatoeba-test.eng-egl.eng.egl | 0.4 | 0.085 | | Tatoeba-test.eng-ell.eng.ell | 28.7 | 0.543 | | Tatoeba-test.eng-enm.eng.enm | 3.3 | 0.295 | | Tatoeba-test.eng-epo.eng.epo | 33.4 | 0.570 | | Tatoeba-test.eng-est.eng.est | 30.3 | 0.545 | | Tatoeba-test.eng-eus.eng.eus | 18.5 | 0.486 | | Tatoeba-test.eng-ewe.eng.ewe | 6.8 | 0.272 | | Tatoeba-test.eng-ext.eng.ext | 5.0 | 0.228 | | Tatoeba-test.eng-fao.eng.fao | 5.2 | 0.277 | | Tatoeba-test.eng-fas.eng.fas | 6.9 | 0.265 | | Tatoeba-test.eng-fij.eng.fij | 31.5 | 0.365 | | Tatoeba-test.eng-fin.eng.fin | 18.5 | 0.459 | | Tatoeba-test.eng-fkv.eng.fkv | 0.9 | 0.132 | | Tatoeba-test.eng-fra.eng.fra | 31.5 | 0.546 | | Tatoeba-test.eng-frm.eng.frm | 0.9 | 0.128 | | Tatoeba-test.eng-frr.eng.frr | 3.0 | 0.025 | | Tatoeba-test.eng-fry.eng.fry | 14.4 | 0.387 | | Tatoeba-test.eng-ful.eng.ful | 0.4 | 0.061 | | Tatoeba-test.eng-gcf.eng.gcf | 0.3 | 0.075 | | Tatoeba-test.eng-gil.eng.gil | 47.4 | 0.706 | | Tatoeba-test.eng-gla.eng.gla | 10.9 | 0.341 | | Tatoeba-test.eng-gle.eng.gle | 26.8 | 0.493 | | Tatoeba-test.eng-glg.eng.glg | 32.5 | 0.565 | | Tatoeba-test.eng-glv.eng.glv | 21.5 | 0.395 | | Tatoeba-test.eng-gos.eng.gos | 0.3 | 0.124 | | Tatoeba-test.eng-got.eng.got | 0.2 | 0.010 | | Tatoeba-test.eng-grc.eng.grc | 0.0 | 0.005 | | Tatoeba-test.eng-grn.eng.grn | 1.5 | 0.129 | | Tatoeba-test.eng-gsw.eng.gsw | 0.6 | 0.106 | | Tatoeba-test.eng-guj.eng.guj | 15.4 | 0.347 | | Tatoeba-test.eng-hat.eng.hat | 31.1 | 0.527 | | Tatoeba-test.eng-hau.eng.hau | 6.5 | 0.385 | | Tatoeba-test.eng-haw.eng.haw | 0.2 | 0.066 | | Tatoeba-test.eng-hbs.eng.hbs | 28.7 | 0.531 | | Tatoeba-test.eng-heb.eng.heb | 21.3 | 0.443 | | Tatoeba-test.eng-hif.eng.hif | 2.8 | 0.268 | | Tatoeba-test.eng-hil.eng.hil | 12.0 | 0.463 | | Tatoeba-test.eng-hin.eng.hin | 13.0 | 0.401 | | Tatoeba-test.eng-hmn.eng.hmn | 0.2 | 0.073 | | Tatoeba-test.eng-hoc.eng.hoc | 0.2 | 0.077 | | Tatoeba-test.eng-hsb.eng.hsb | 5.7 | 0.308 | | Tatoeba-test.eng-hun.eng.hun | 17.1 | 0.431 | | Tatoeba-test.eng-hye.eng.hye | 15.0 | 0.378 | | Tatoeba-test.eng-iba.eng.iba | 16.0 | 0.437 | | Tatoeba-test.eng-ibo.eng.ibo | 2.9 | 0.221 | | Tatoeba-test.eng-ido.eng.ido | 11.5 | 0.403 | | Tatoeba-test.eng-iku.eng.iku | 2.3 | 0.089 | | Tatoeba-test.eng-ile.eng.ile | 4.3 | 0.282 | | Tatoeba-test.eng-ilo.eng.ilo | 26.4 | 0.522 | | Tatoeba-test.eng-ina.eng.ina | 20.9 | 0.493 | | Tatoeba-test.eng-isl.eng.isl | 12.5 | 0.375 | | Tatoeba-test.eng-ita.eng.ita | 33.9 | 0.592 | | Tatoeba-test.eng-izh.eng.izh | 4.6 | 0.050 | | Tatoeba-test.eng-jav.eng.jav | 7.8 | 0.328 | | Tatoeba-test.eng-jbo.eng.jbo | 0.1 | 0.123 | | Tatoeba-test.eng-jdt.eng.jdt | 6.4 | 0.008 | | Tatoeba-test.eng-jpn.eng.jpn | 0.0 | 0.000 | | Tatoeba-test.eng-kab.eng.kab | 5.9 | 0.261 | | Tatoeba-test.eng-kal.eng.kal | 13.4 | 0.382 | | Tatoeba-test.eng-kan.eng.kan | 4.8 | 0.358 | | Tatoeba-test.eng-kat.eng.kat | 1.8 | 0.115 | | Tatoeba-test.eng-kaz.eng.kaz | 8.8 | 0.354 | | Tatoeba-test.eng-kek.eng.kek | 3.7 | 0.188 | | Tatoeba-test.eng-kha.eng.kha | 0.5 | 0.094 | | Tatoeba-test.eng-khm.eng.khm | 0.4 | 0.243 | | Tatoeba-test.eng-kin.eng.kin | 5.2 | 0.362 | | Tatoeba-test.eng-kir.eng.kir | 17.2 | 0.416 | | Tatoeba-test.eng-kjh.eng.kjh | 0.6 | 0.009 | | Tatoeba-test.eng-kok.eng.kok | 5.5 | 0.005 | | Tatoeba-test.eng-kom.eng.kom | 2.4 | 0.012 | | Tatoeba-test.eng-krl.eng.krl | 2.0 | 0.099 | | Tatoeba-test.eng-ksh.eng.ksh | 0.4 | 0.074 | | Tatoeba-test.eng-kum.eng.kum | 0.9 | 0.007 | | Tatoeba-test.eng-kur.eng.kur | 9.1 | 0.174 | | Tatoeba-test.eng-lad.eng.lad | 1.2 | 0.154 | | Tatoeba-test.eng-lah.eng.lah | 0.1 | 0.001 | | Tatoeba-test.eng-lao.eng.lao | 0.6 | 0.426 | | Tatoeba-test.eng-lat.eng.lat | 8.2 | 0.366 | | Tatoeba-test.eng-lav.eng.lav | 20.4 | 0.475 | | Tatoeba-test.eng-ldn.eng.ldn | 0.3 | 0.059 | | Tatoeba-test.eng-lfn.eng.lfn | 0.5 | 0.104 | | Tatoeba-test.eng-lij.eng.lij | 0.2 | 0.094 | | Tatoeba-test.eng-lin.eng.lin | 1.2 | 0.276 | | Tatoeba-test.eng-lit.eng.lit | 17.4 | 0.488 | | Tatoeba-test.eng-liv.eng.liv | 0.3 | 0.039 | | Tatoeba-test.eng-lkt.eng.lkt | 0.3 | 0.041 | | Tatoeba-test.eng-lld.eng.lld | 0.1 | 0.083 | | Tatoeba-test.eng-lmo.eng.lmo | 1.4 | 0.154 | | Tatoeba-test.eng-ltz.eng.ltz | 19.1 | 0.395 | | Tatoeba-test.eng-lug.eng.lug | 4.2 | 0.382 | | Tatoeba-test.eng-mad.eng.mad | 2.1 | 0.075 | | Tatoeba-test.eng-mah.eng.mah | 9.5 | 0.331 | | Tatoeba-test.eng-mai.eng.mai | 9.3 | 0.372 | | Tatoeba-test.eng-mal.eng.mal | 8.3 | 0.437 | | Tatoeba-test.eng-mar.eng.mar | 13.5 | 0.410 | | Tatoeba-test.eng-mdf.eng.mdf | 2.3 | 0.008 | | Tatoeba-test.eng-mfe.eng.mfe | 83.6 | 0.905 | | Tatoeba-test.eng-mic.eng.mic | 7.6 | 0.214 | | Tatoeba-test.eng-mkd.eng.mkd | 31.8 | 0.540 | | Tatoeba-test.eng-mlg.eng.mlg | 31.3 | 0.464 | | Tatoeba-test.eng-mlt.eng.mlt | 11.7 | 0.427 | | Tatoeba-test.eng-mnw.eng.mnw | 0.1 | 0.000 | | Tatoeba-test.eng-moh.eng.moh | 0.6 | 0.067 | | Tatoeba-test.eng-mon.eng.mon | 8.5 | 0.323 | | Tatoeba-test.eng-mri.eng.mri | 8.5 | 0.320 | | Tatoeba-test.eng-msa.eng.msa | 24.5 | 0.498 | | Tatoeba-test.eng.multi | 22.4 | 0.451 | | Tatoeba-test.eng-mwl.eng.mwl | 3.8 | 0.169 | | Tatoeba-test.eng-mya.eng.mya | 0.2 | 0.123 | | Tatoeba-test.eng-myv.eng.myv | 1.1 | 0.014 | | Tatoeba-test.eng-nau.eng.nau | 0.6 | 0.109 | | Tatoeba-test.eng-nav.eng.nav | 1.8 | 0.149 | | Tatoeba-test.eng-nds.eng.nds | 11.3 | 0.365 | | Tatoeba-test.eng-nep.eng.nep | 0.5 | 0.004 | | Tatoeba-test.eng-niu.eng.niu | 34.4 | 0.501 | | Tatoeba-test.eng-nld.eng.nld | 37.6 | 0.598 | | Tatoeba-test.eng-nog.eng.nog | 0.2 | 0.010 | | Tatoeba-test.eng-non.eng.non | 0.2 | 0.096 | | Tatoeba-test.eng-nor.eng.nor | 36.3 | 0.577 | | Tatoeba-test.eng-nov.eng.nov | 0.9 | 0.180 | | Tatoeba-test.eng-nya.eng.nya | 9.8 | 0.524 | | Tatoeba-test.eng-oci.eng.oci | 6.3 | 0.288 | | Tatoeba-test.eng-ori.eng.ori | 5.3 | 0.273 | | Tatoeba-test.eng-orv.eng.orv | 0.2 | 0.007 | | Tatoeba-test.eng-oss.eng.oss | 3.0 | 0.230 | | Tatoeba-test.eng-ota.eng.ota | 0.2 | 0.053 | | Tatoeba-test.eng-pag.eng.pag | 20.2 | 0.513 | | Tatoeba-test.eng-pan.eng.pan | 6.4 | 0.301 | | Tatoeba-test.eng-pap.eng.pap | 44.7 | 0.624 | | Tatoeba-test.eng-pau.eng.pau | 0.8 | 0.098 | | Tatoeba-test.eng-pdc.eng.pdc | 2.9 | 0.143 | | Tatoeba-test.eng-pms.eng.pms | 0.6 | 0.124 | | Tatoeba-test.eng-pol.eng.pol | 22.7 | 0.500 | | Tatoeba-test.eng-por.eng.por | 31.6 | 0.570 | | Tatoeba-test.eng-ppl.eng.ppl | 0.5 | 0.085 | | Tatoeba-test.eng-prg.eng.prg | 0.1 | 0.078 | | Tatoeba-test.eng-pus.eng.pus | 0.9 | 0.137 | | Tatoeba-test.eng-quc.eng.quc | 2.7 | 0.255 | | Tatoeba-test.eng-qya.eng.qya | 0.4 | 0.084 | | Tatoeba-test.eng-rap.eng.rap | 1.9 | 0.050 | | Tatoeba-test.eng-rif.eng.rif | 1.3 | 0.102 | | Tatoeba-test.eng-roh.eng.roh | 1.4 | 0.169 | | Tatoeba-test.eng-rom.eng.rom | 7.8 | 0.329 | | Tatoeba-test.eng-ron.eng.ron | 27.0 | 0.530 | | Tatoeba-test.eng-rue.eng.rue | 0.1 | 0.009 | | Tatoeba-test.eng-run.eng.run | 9.8 | 0.434 | | Tatoeba-test.eng-rus.eng.rus | 22.2 | 0.465 | | Tatoeba-test.eng-sag.eng.sag | 4.8 | 0.155 | | Tatoeba-test.eng-sah.eng.sah | 0.2 | 0.007 | | Tatoeba-test.eng-san.eng.san | 1.7 | 0.143 | | Tatoeba-test.eng-scn.eng.scn | 1.5 | 0.083 | | Tatoeba-test.eng-sco.eng.sco | 30.3 | 0.514 | | Tatoeba-test.eng-sgs.eng.sgs | 1.6 | 0.104 | | Tatoeba-test.eng-shs.eng.shs | 0.7 | 0.049 | | Tatoeba-test.eng-shy.eng.shy | 0.6 | 0.064 | | Tatoeba-test.eng-sin.eng.sin | 5.4 | 0.317 | | Tatoeba-test.eng-sjn.eng.sjn | 0.3 | 0.074 | | Tatoeba-test.eng-slv.eng.slv | 12.8 | 0.313 | | Tatoeba-test.eng-sma.eng.sma | 0.8 | 0.063 | | Tatoeba-test.eng-sme.eng.sme | 13.2 | 0.290 | | Tatoeba-test.eng-smo.eng.smo | 12.1 | 0.416 | | Tatoeba-test.eng-sna.eng.sna | 27.1 | 0.533 | | Tatoeba-test.eng-snd.eng.snd | 6.0 | 0.359 | | Tatoeba-test.eng-som.eng.som | 16.0 | 0.274 | | Tatoeba-test.eng-spa.eng.spa | 36.7 | 0.603 | | Tatoeba-test.eng-sqi.eng.sqi | 32.3 | 0.573 | | Tatoeba-test.eng-stq.eng.stq | 0.6 | 0.198 | | Tatoeba-test.eng-sun.eng.sun | 39.0 | 0.447 | | Tatoeba-test.eng-swa.eng.swa | 1.1 | 0.109 | | Tatoeba-test.eng-swe.eng.swe | 42.7 | 0.614 | | Tatoeba-test.eng-swg.eng.swg | 0.6 | 0.118 | | Tatoeba-test.eng-tah.eng.tah | 12.4 | 0.294 | | Tatoeba-test.eng-tam.eng.tam | 5.0 | 0.404 | | Tatoeba-test.eng-tat.eng.tat | 9.9 | 0.326 | | Tatoeba-test.eng-tel.eng.tel | 4.7 | 0.326 | | Tatoeba-test.eng-tet.eng.tet | 0.7 | 0.100 | | Tatoeba-test.eng-tgk.eng.tgk | 5.5 | 0.304 | | Tatoeba-test.eng-tha.eng.tha | 2.2 | 0.456 | | Tatoeba-test.eng-tir.eng.tir | 1.5 | 0.197 | | Tatoeba-test.eng-tlh.eng.tlh | 0.0 | 0.032 | | Tatoeba-test.eng-tly.eng.tly | 0.3 | 0.061 | | Tatoeba-test.eng-toi.eng.toi | 8.3 | 0.219 | | Tatoeba-test.eng-ton.eng.ton | 32.7 | 0.619 | | Tatoeba-test.eng-tpw.eng.tpw | 1.4 | 0.136 | | Tatoeba-test.eng-tso.eng.tso | 9.6 | 0.465 | | Tatoeba-test.eng-tuk.eng.tuk | 9.4 | 0.383 | | Tatoeba-test.eng-tur.eng.tur | 24.1 | 0.542 | | Tatoeba-test.eng-tvl.eng.tvl | 8.9 | 0.398 | | Tatoeba-test.eng-tyv.eng.tyv | 10.4 | 0.249 | | Tatoeba-test.eng-tzl.eng.tzl | 0.2 | 0.098 | | Tatoeba-test.eng-udm.eng.udm | 6.5 | 0.212 | | Tatoeba-test.eng-uig.eng.uig | 2.1 | 0.266 | | Tatoeba-test.eng-ukr.eng.ukr | 24.3 | 0.479 | | Tatoeba-test.eng-umb.eng.umb | 4.4 | 0.274 | | Tatoeba-test.eng-urd.eng.urd | 8.6 | 0.344 | | Tatoeba-test.eng-uzb.eng.uzb | 6.9 | 0.343 | | Tatoeba-test.eng-vec.eng.vec | 1.0 | 0.094 | | Tatoeba-test.eng-vie.eng.vie | 23.2 | 0.420 | | Tatoeba-test.eng-vol.eng.vol | 0.3 | 0.086 | | Tatoeba-test.eng-war.eng.war | 11.4 | 0.415 | | Tatoeba-test.eng-wln.eng.wln | 8.4 | 0.218 | | Tatoeba-test.eng-wol.eng.wol | 11.5 | 0.252 | | Tatoeba-test.eng-xal.eng.xal | 0.1 | 0.007 | | Tatoeba-test.eng-xho.eng.xho | 19.5 | 0.552 | | Tatoeba-test.eng-yid.eng.yid | 4.0 | 0.256 | | Tatoeba-test.eng-yor.eng.yor | 8.8 | 0.247 | | Tatoeba-test.eng-zho.eng.zho | 21.8 | 0.192 | | Tatoeba-test.eng-zul.eng.zul | 34.3 | 0.655 | | Tatoeba-test.eng-zza.eng.zza | 0.5 | 0.080 | ### System Info: - hf_name: eng-mul - source_languages: eng - target_languages: mul - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-mul/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'ca', 'es', 'os', 'eo', 'ro', 'fy', 'cy', 'is', 'lb', 'su', 'an', 'sq', 'fr', 'ht', 'rm', 'cv', 'ig', 'am', 'eu', 'tr', 'ps', 'af', 'ny', 'ch', 'uk', 'sl', 'lt', 'tk', 'sg', 'ar', 'lg', 'bg', 'be', 'ka', 'gd', 'ja', 'si', 'br', 'mh', 'km', 'th', 'ty', 'rw', 'te', 'mk', 'or', 'wo', 'kl', 'mr', 'ru', 'yo', 'hu', 'fo', 'zh', 'ti', 'co', 'ee', 'oc', 'sn', 'mt', 'ts', 'pl', 'gl', 'nb', 'bn', 'tt', 'bo', 'lo', 'id', 'gn', 'nv', 'hy', 'kn', 'to', 'io', 'so', 'vi', 'da', 'fj', 'gv', 'sm', 'nl', 'mi', 'pt', 'hi', 'se', 'as', 'ta', 'et', 'kw', 'ga', 'sv', 'ln', 'na', 'mn', 'gu', 'wa', 'lv', 'jv', 'el', 'my', 'ba', 'it', 'hr', 'ur', 'ce', 'nn', 'fi', 'mg', 'rn', 'xh', 'ab', 'de', 'cs', 'he', 'zu', 'yi', 'ml', 'mul'] - src_constituents: {'eng'} - tgt_constituents: {'sjn_Latn', 'cat', 'nan', 'spa', 'ile_Latn', 'pap', 'mwl', 'uzb_Latn', 'mww', 'hil', 'lij', 'avk_Latn', 'lad_Latn', 'lat_Latn', 'bos_Latn', 'oss', 'epo', 'ron', 'fry', 'cym', 'toi_Latn', 'awa', 'swg', 'zsm_Latn', 'zho_Hant', 'gcf_Latn', 'uzb_Cyrl', 'isl', 'lfn_Latn', 'shs_Latn', 'nov_Latn', 'bho', 'ltz', 'lzh', 'kur_Latn', 'sun', 'arg', 'pes_Thaa', 'sqi', 'uig_Arab', 'csb_Latn', 'fra', 'hat', 'liv_Latn', 'non_Latn', 'sco', 'cmn_Hans', 'pnb', 'roh', 'chv', 'ibo', 'bul_Latn', 'amh', 'lfn_Cyrl', 'eus', 'fkv_Latn', 'tur', 'pus', 'afr', 'brx_Latn', 'nya', 'acm', 'ota_Latn', 'cha', 'ukr', 'xal', 'slv', 'lit', 'zho_Hans', 'tmw_Latn', 'kjh', 'ota_Arab', 'war', 'tuk', 'sag', 'myv', 'hsb', 'lzh_Hans', 'ara', 'tly_Latn', 'lug', 'brx', 'bul', 'bel', 'vol_Latn', 'kat', 'gan', 'got_Goth', 'vro', 'ext', 'afh_Latn', 'gla', 'jpn', 'udm', 'mai', 'ary', 'sin', 'tvl', 'hif_Latn', 'cjy_Hant', 'bre', 'ceb', 'mah', 'nob_Hebr', 'crh_Latn', 'prg_Latn', 'khm', 'ang_Latn', 'tha', 'tah', 'tzl', 'aln', 'kin', 'tel', 'ady', 'mkd', 'ori', 'wol', 'aze_Latn', 'jbo', 'niu', 'kal', 'mar', 'vie_Hani', 'arz', 'yue', 'kha', 'san_Deva', 'jbo_Latn', 'gos', 'hau_Latn', 'rus', 'quc', 'cmn', 'yor', 'hun', 'uig_Cyrl', 'fao', 'mnw', 'zho', 'orv_Cyrl', 'iba', 'bel_Latn', 'tir', 'afb', 'crh', 'mic', 'cos', 'swh', 'sah', 'krl', 'ewe', 'apc', 'zza', 'chr', 'grc_Grek', 'tpw_Latn', 'oci', 'mfe', 'sna', 'kir_Cyrl', 'tat_Latn', 'gom', 'ido_Latn', 'sgs', 'pau', 'tgk_Cyrl', 'nog', 'mlt', 'pdc', 'tso', 'srp_Cyrl', 'pol', 'ast', 'glg', 'pms', 'fuc', 'nob', 'qya', 'ben', 'tat', 'kab', 'min', 'srp_Latn', 'wuu', 'dtp', 'jbo_Cyrl', 'tet', 'bod', 'yue_Hans', 'zlm_Latn', 'lao', 'ind', 'grn', 'nav', 'kaz_Cyrl', 'rom', 'hye', 'kan', 'ton', 'ido', 'mhr', 'scn', 'som', 'rif_Latn', 'vie', 'enm_Latn', 'lmo', 'npi', 'pes', 'dan', 'fij', 'ina_Latn', 'cjy_Hans', 'jdt_Cyrl', 'gsw', 'glv', 'khm_Latn', 'smo', 'umb', 'sma', 'gil', 'nld', 'snd_Arab', 'arq', 'mri', 'kur_Arab', 'por', 'hin', 'shy_Latn', 'sme', 'rap', 'tyv', 'dsb', 'moh', 'asm', 'lad', 'yue_Hant', 'kpv', 'tam', 'est', 'frm_Latn', 'hoc_Latn', 'bam_Latn', 'kek_Latn', 'ksh', 'tlh_Latn', 'ltg', 'pan_Guru', 'hnj_Latn', 'cor', 'gle', 'swe', 'lin', 'qya_Latn', 'kum', 'mad', 'cmn_Hant', 'fuv', 'nau', 'mon', 'akl_Latn', 'guj', 'kaz_Latn', 'wln', 'tuk_Latn', 'jav_Java', 'lav', 'jav', 'ell', 'frr', 'mya', 'bak', 'rue', 'ita', 'hrv', 'izh', 'ilo', 'dws_Latn', 'urd', 'stq', 'tat_Arab', 'haw', 'che', 'pag', 'nno', 'fin', 'mlg', 'ppl_Latn', 'run', 'xho', 'abk', 'deu', 'hoc', 'lkt', 'lld_Latn', 'tzl_Latn', 'mdf', 'ike_Latn', 'ces', 'ldn_Latn', 'egl', 'heb', 'vec', 'zul', 'max_Latn', 'pes_Latn', 'yid', 'mal', 'nds'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-mul/opus2m-2020-08-01.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-mul/opus2m-2020-08-01.test.txt - src_alpha3: eng - tgt_alpha3: mul - short_pair: en-mul - chrF2_score: 0.451 - bleu: 22.4 - brevity_penalty: 0.987 - ref_len: 68724.0 - src_name: English - tgt_name: Multiple languages - train_date: 2020-08-01 - src_alpha2: en - tgt_alpha2: mul - prefer_old: False - long_pair: eng-mul - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
24,199
[ [ -0.06005859375, -0.0430908203125, 0.0184783935546875, 0.03240966796875, -0.01016998291015625, -0.00855255126953125, 0.0096435546875, -0.036590576171875, 0.044525146484375, -0.005512237548828125, -0.034149169921875, -0.01099395751953125, -0.03277587890625, 0....
Yntec/CartoonStyleClassic
2023-08-07T13:00:22.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "Zovya", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
Yntec
null
null
Yntec/CartoonStyleClassic
1
2,016
diffusers
2023-08-06T08:16:22
--- license: creativeml-openrail-m library_name: diffusers pipeline_tag: text-to-image tags: - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image - Zovya --- # Cartoon Style Classic. Preview and prompt: ![sample](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/PDayvf1iuFtO0QUM3m9jB.png) pretty cute little girl in tricycle, Screenshot of an surreal jean 70s round minimalist architecture, Sharp light, 35mm still from a sci fi blockbuster color movie made in 2022, beautiful portrait, Dorothy, set in 1860, in front of a spaceship that has just landed on an alien planet, are all wearing, a robot stands nearby This model with the kl-f8-anime2 VAE baked in. Original pages: https://civitai.com/models/33030/cartoon-style-classic
789
[ [ -0.04022216796875, -0.04718017578125, 0.03338623046875, 0.0038661956787109375, -0.0212554931640625, 0.0100860595703125, 0.0195159912109375, -0.01232147216796875, 0.0460205078125, 0.044281005859375, -0.057861328125, -0.0282745361328125, -0.0170745849609375, -...
heegyu/kodialogpt-v1
2022-11-22T08:29:51.000Z
[ "transformers", "pytorch", "gpt2", "text-generation", "license:cc-by-nc-sa-4.0", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
heegyu
null
null
heegyu/kodialogpt-v1
1
2,014
transformers
2022-11-06T00:18:43
--- license: cc-by-nc-sa-4.0 widget: - text: "0: 안녕하세요?\n1: 반갑습니다.\n0: 지금 뭐 하고 계세요?\n1: " --- [skt/kogpt2-base-v2](https://huggingface.co/skt/kogpt2-base-v2)를 공개된 한국어 대화 데이터셋으로 파인튜닝한 모델입니다.<br/> - AIHub 주제별대화, 트위터, 감정대화, SNS대화 - 국립국어원 모두의 말뭉치 온라인대화 - 이전에 제가 만든 [kodialogpt-v0](https://huggingface.co/heegyu/kodialogpt)는 AIHub 주제별대화 8만건 가량만을 이용했지만, 이 모델은 총 170만개의 대화 데이터를 갖고 1에폭 학습시킨 모델입니다. - 학습 코드: https://github.com/HeegyuKim/open-domain-dialog<br/> ## 사용예시 ``` generator = pipeline("text-generation", model="heegyu/kodialogpt-v1") generation_args = dict( repetition_penalty=1.3, no_repeat_ngram_size=4, eos_token_id=375, # \n max_new_tokens=32, do_sample=True, top_p=0.7, early_stopping=True ) generator( ["0 : **는 게임 좋아하니\n1 :", "0 : 어제 강남에서 살인사건 났대 ㅜㅜ 너무 무서워\n1 : 헐 왜? 무슨 일 있었어?\n0 : 사진보니까 막 피흘리는 사람있고 경찰들이 떠서 제압하고 난리도 아니었다던데??\n1 :", "0 : 자기야 어제는 나한테 왜 그랬어?\n1 : 뭔 일 있었어?\n0 : 어떻게 나한테 말도 없이 그럴 수 있어? 나 진짜 실망했어\n1 : "], **generation_args ) ``` 결과 ``` [[{'generated_text': '0 : **는 게임 좋아하니\n1 : 엉... 게임은 맨날 하는데 내일도 하겠지...? ᄏᄏ'}], [{'generated_text': '0 : 어제 강남에서 살인사건 났대 ㅜㅜ 너무 무서워\n1 : 헐 왜? 무슨 일 있었어?\n0 : 사진보니까 막 피흘리는 사람있고 경찰들이 떠서 제압하고 난리도 아니었다던데??\n1 : 와 대박이네... 그게 가능하다니.. 얼마나 무섭고 놀라울까..'}], [{'generated_text': '0 : 자기야 어제는 나한테 왜 그랬어?\n1 : 뭔 일 있었어?\n0 : 어떻게 나한테 말도 없이 그럴 수 있어? 나 진짜 실망했어\n1 : ᄏᄏ뭐가? 누가?'}]] ``` 학습에 사용한 하이퍼파라미터
1,385
[ [ -0.025360107421875, -0.0775146484375, 0.0288848876953125, 0.036163330078125, -0.04510498046875, 0.00330352783203125, 0.0023040771484375, -0.0195465087890625, 0.033721923828125, 0.032318115234375, -0.053955078125, -0.04974365234375, -0.042816162109375, -0.001...
timm/convnext_base.fb_in22k
2023-03-31T22:02:27.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-22k", "arxiv:2201.03545", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/convnext_base.fb_in22k
0
2,014
timm
2022-12-13T07:06:50
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-22k --- # Model card for convnext_base.fb_in22k A ConvNeXt image classification model. Pretrained on ImageNet-22k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 110.0 - GMACs: 15.4 - Activations (M): 28.8 - Image size: 224 x 224 - **Papers:** - A ConvNet for the 2020s: https://arxiv.org/abs/2201.03545 - **Original:** https://github.com/facebookresearch/ConvNeXt - **Dataset:** ImageNet-22k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('convnext_base.fb_in22k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'convnext_base.fb_in22k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 56, 56]) # torch.Size([1, 256, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'convnext_base.fb_in22k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). All timing numbers from eager model PyTorch 1.13 on RTX 3090 w/ AMP. | model |top1 |top5 |img_size|param_count|gmacs |macts |samples_per_sec|batch_size| |------------------------------------------------------------------------------------------------------------------------------|------|------|--------|-----------|------|------|---------------|----------| | [convnextv2_huge.fcmae_ft_in22k_in1k_512](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in22k_in1k_512) |88.848|98.742|512 |660.29 |600.81|413.07|28.58 |48 | | [convnextv2_huge.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in22k_in1k_384) |88.668|98.738|384 |660.29 |337.96|232.35|50.56 |64 | | [convnext_xxlarge.clip_laion2b_soup_ft_in1k](https://huggingface.co/timm/convnext_xxlarge.clip_laion2b_soup_ft_in1k) |88.612|98.704|256 |846.47 |198.09|124.45|122.45 |256 | | [convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384) |88.312|98.578|384 |200.13 |101.11|126.74|196.84 |256 | | [convnextv2_large.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in22k_in1k_384) |88.196|98.532|384 |197.96 |101.1 |126.74|128.94 |128 | | [convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320) |87.968|98.47 |320 |200.13 |70.21 |88.02 |283.42 |256 | | [convnext_xlarge.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_xlarge.fb_in22k_ft_in1k_384) |87.75 |98.556|384 |350.2 |179.2 |168.99|124.85 |192 | | [convnextv2_base.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in22k_in1k_384) |87.646|98.422|384 |88.72 |45.21 |84.49 |209.51 |256 | | [convnext_large.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_large.fb_in22k_ft_in1k_384) |87.476|98.382|384 |197.77 |101.1 |126.74|194.66 |256 | | [convnext_large_mlp.clip_laion2b_augreg_ft_in1k](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_augreg_ft_in1k) |87.344|98.218|256 |200.13 |44.94 |56.33 |438.08 |256 | | [convnextv2_large.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in22k_in1k) |87.26 |98.248|224 |197.96 |34.4 |43.13 |376.84 |256 | | [convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384) |87.138|98.212|384 |88.59 |45.21 |84.49 |365.47 |256 | | [convnext_xlarge.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_xlarge.fb_in22k_ft_in1k) |87.002|98.208|224 |350.2 |60.98 |57.5 |368.01 |256 | | [convnext_base.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_base.fb_in22k_ft_in1k_384) |86.796|98.264|384 |88.59 |45.21 |84.49 |366.54 |256 | | [convnextv2_base.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in22k_in1k) |86.74 |98.022|224 |88.72 |15.38 |28.75 |624.23 |256 | | [convnext_large.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_large.fb_in22k_ft_in1k) |86.636|98.028|224 |197.77 |34.4 |43.13 |581.43 |256 | | [convnext_base.clip_laiona_augreg_ft_in1k_384](https://huggingface.co/timm/convnext_base.clip_laiona_augreg_ft_in1k_384) |86.504|97.97 |384 |88.59 |45.21 |84.49 |368.14 |256 | | [convnext_base.clip_laion2b_augreg_ft_in12k_in1k](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k) |86.344|97.97 |256 |88.59 |20.09 |37.55 |816.14 |256 | | [convnextv2_huge.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in1k) |86.256|97.75 |224 |660.29 |115.0 |79.07 |154.72 |256 | | [convnext_small.in12k_ft_in1k_384](https://huggingface.co/timm/convnext_small.in12k_ft_in1k_384) |86.182|97.92 |384 |50.22 |25.58 |63.37 |516.19 |256 | | [convnext_base.clip_laion2b_augreg_ft_in1k](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in1k) |86.154|97.68 |256 |88.59 |20.09 |37.55 |819.86 |256 | | [convnext_base.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_base.fb_in22k_ft_in1k) |85.822|97.866|224 |88.59 |15.38 |28.75 |1037.66 |256 | | [convnext_small.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_small.fb_in22k_ft_in1k_384) |85.778|97.886|384 |50.22 |25.58 |63.37 |518.95 |256 | | [convnextv2_large.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in1k) |85.742|97.584|224 |197.96 |34.4 |43.13 |375.23 |256 | | [convnext_small.in12k_ft_in1k](https://huggingface.co/timm/convnext_small.in12k_ft_in1k) |85.174|97.506|224 |50.22 |8.71 |21.56 |1474.31 |256 | | [convnext_tiny.in12k_ft_in1k_384](https://huggingface.co/timm/convnext_tiny.in12k_ft_in1k_384) |85.118|97.608|384 |28.59 |13.14 |39.48 |856.76 |256 | | [convnextv2_tiny.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in22k_in1k_384) |85.112|97.63 |384 |28.64 |13.14 |39.48 |491.32 |256 | | [convnextv2_base.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in1k) |84.874|97.09 |224 |88.72 |15.38 |28.75 |625.33 |256 | | [convnext_small.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_small.fb_in22k_ft_in1k) |84.562|97.394|224 |50.22 |8.71 |21.56 |1478.29 |256 | | [convnext_large.fb_in1k](https://huggingface.co/timm/convnext_large.fb_in1k) |84.282|96.892|224 |197.77 |34.4 |43.13 |584.28 |256 | | [convnext_tiny.in12k_ft_in1k](https://huggingface.co/timm/convnext_tiny.in12k_ft_in1k) |84.186|97.124|224 |28.59 |4.47 |13.44 |2433.7 |256 | | [convnext_tiny.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_tiny.fb_in22k_ft_in1k_384) |84.084|97.14 |384 |28.59 |13.14 |39.48 |862.95 |256 | | [convnextv2_tiny.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in22k_in1k) |83.894|96.964|224 |28.64 |4.47 |13.44 |1452.72 |256 | | [convnext_base.fb_in1k](https://huggingface.co/timm/convnext_base.fb_in1k) |83.82 |96.746|224 |88.59 |15.38 |28.75 |1054.0 |256 | | [convnextv2_nano.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in22k_in1k_384) |83.37 |96.742|384 |15.62 |7.22 |24.61 |801.72 |256 | | [convnext_small.fb_in1k](https://huggingface.co/timm/convnext_small.fb_in1k) |83.142|96.434|224 |50.22 |8.71 |21.56 |1464.0 |256 | | [convnextv2_tiny.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in1k) |82.92 |96.284|224 |28.64 |4.47 |13.44 |1425.62 |256 | | [convnext_tiny.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_tiny.fb_in22k_ft_in1k) |82.898|96.616|224 |28.59 |4.47 |13.44 |2480.88 |256 | | [convnext_nano.in12k_ft_in1k](https://huggingface.co/timm/convnext_nano.in12k_ft_in1k) |82.282|96.344|224 |15.59 |2.46 |8.37 |3926.52 |256 | | [convnext_tiny_hnf.a2h_in1k](https://huggingface.co/timm/convnext_tiny_hnf.a2h_in1k) |82.216|95.852|224 |28.59 |4.47 |13.44 |2529.75 |256 | | [convnext_tiny.fb_in1k](https://huggingface.co/timm/convnext_tiny.fb_in1k) |82.066|95.854|224 |28.59 |4.47 |13.44 |2346.26 |256 | | [convnextv2_nano.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in22k_in1k) |82.03 |96.166|224 |15.62 |2.46 |8.37 |2300.18 |256 | | [convnextv2_nano.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in1k) |81.83 |95.738|224 |15.62 |2.46 |8.37 |2321.48 |256 | | [convnext_nano_ols.d1h_in1k](https://huggingface.co/timm/convnext_nano_ols.d1h_in1k) |80.866|95.246|224 |15.65 |2.65 |9.38 |3523.85 |256 | | [convnext_nano.d1h_in1k](https://huggingface.co/timm/convnext_nano.d1h_in1k) |80.768|95.334|224 |15.59 |2.46 |8.37 |3915.58 |256 | | [convnextv2_pico.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_pico.fcmae_ft_in1k) |80.304|95.072|224 |9.07 |1.37 |6.1 |3274.57 |256 | | [convnext_pico.d1_in1k](https://huggingface.co/timm/convnext_pico.d1_in1k) |79.526|94.558|224 |9.05 |1.37 |6.1 |5686.88 |256 | | [convnext_pico_ols.d1_in1k](https://huggingface.co/timm/convnext_pico_ols.d1_in1k) |79.522|94.692|224 |9.06 |1.43 |6.5 |5422.46 |256 | | [convnextv2_femto.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_femto.fcmae_ft_in1k) |78.488|93.98 |224 |5.23 |0.79 |4.57 |4264.2 |256 | | [convnext_femto_ols.d1_in1k](https://huggingface.co/timm/convnext_femto_ols.d1_in1k) |77.86 |93.83 |224 |5.23 |0.82 |4.87 |6910.6 |256 | | [convnext_femto.d1_in1k](https://huggingface.co/timm/convnext_femto.d1_in1k) |77.454|93.68 |224 |5.22 |0.79 |4.57 |7189.92 |256 | | [convnextv2_atto.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_atto.fcmae_ft_in1k) |76.664|93.044|224 |3.71 |0.55 |3.81 |4728.91 |256 | | [convnext_atto_ols.a2_in1k](https://huggingface.co/timm/convnext_atto_ols.a2_in1k) |75.88 |92.846|224 |3.7 |0.58 |4.11 |7963.16 |256 | | [convnext_atto.d2_in1k](https://huggingface.co/timm/convnext_atto.d2_in1k) |75.664|92.9 |224 |3.7 |0.55 |3.81 |8439.22 |256 | ## Citation ```bibtex @article{liu2022convnet, author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, title = {A ConvNet for the 2020s}, journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2022}, } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
15,601
[ [ -0.06658935546875, -0.03314208984375, -0.003753662109375, 0.038360595703125, -0.031646728515625, -0.01403045654296875, -0.0128326416015625, -0.03558349609375, 0.06536865234375, 0.01751708984375, -0.043670654296875, -0.041351318359375, -0.05035400390625, -0.0...
m3hrdadfi/bert-fa-base-uncased-wikitriplet-mean-tokens
2021-05-28T06:02:17.000Z
[ "transformers", "pytorch", "jax", "bert", "feature-extraction", "fa", "license:apache-2.0", "endpoints_compatible", "region:us" ]
feature-extraction
m3hrdadfi
null
null
m3hrdadfi/bert-fa-base-uncased-wikitriplet-mean-tokens
1
2,013
transformers
2022-03-02T23:29:05
--- language: fa license: apache-2.0 --- # ParsBERT + Sentence Transformers Please follow the [Sentence-Transformer](https://github.com/m3hrdadfi/sentence-transformers) repo for the latest information about previous and current models. ```bibtex @misc{SentenceTransformerWiki, author = {Mehrdad Farahani}, title = {Sentence Embeddings with ParsBERT}, year = {2020}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {https://github.com/m3hrdadfi/sentence-transformers}, } ```
508
[ [ -0.00200653076171875, -0.056243896484375, 0.0435791015625, 0.041656494140625, -0.0236663818359375, -0.01169586181640625, -0.01296234130859375, -0.0104827880859375, 0.0194091796875, 0.04449462890625, -0.03387451171875, -0.0204925537109375, -0.0333251953125, 0...
timm/davit_base.msft_in1k
2023-01-27T21:47:54.000Z
[ "timm", "pytorch", "image-classification", "dataset:imagenet-1k", "arxiv:2204.03645", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/davit_base.msft_in1k
2
2,012
timm
2023-01-27T21:47:23
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for davit_base.msft_in1k A DaViT image classification model. Trained on ImageNet-1k by paper authors. Thanks to [Fredo Guan](https://github.com/fffffgggg54) for bringing the classification backbone to `timm`. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 88.0 - GMACs: 15.5 - Activations (M): 40.7 - Image size: 224 x 224 - **Papers:** - DaViT: Dual Attention Vision Transformers: https://arxiv.org/abs/2204.03645 - **Original:** https://github.com/dingmyu/davit - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open( urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png')) model = timm.create_model('davit_base.msft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open( urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png')) model = timm.create_model( 'davit_base.msft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 96, 56, 56]) # torch.Size([1, 192, 28, 28]) # torch.Size([1, 384, 14, 14]) # torch.Size([1, 768, 7, 7] print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open( urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png')) model = timm.create_model( 'davit_base.msft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled (ie.e a (batch_size, num_features, H, W) tensor output = model.forward_head(output, pre_logits=True) # output is (batch_size, num_features) tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top1_err|top5 |top5_err|param_count|img_size|crop_pct|interpolation| |---------------------|------|--------|------|--------|-----------|--------|--------|-------------| |davit_base.msft_in1k |84.634|15.366 |97.014|2.986 |87.95 |224 |0.95 |bicubic | |davit_small.msft_in1k|84.25 |15.75 |96.94 |3.06 |49.75 |224 |0.95 |bicubic | |davit_tiny.msft_in1k |82.676|17.324 |96.276|3.724 |28.36 |224 |0.95 |bicubic | ## Citation ```bibtex @inproceedings{ding2022davit, title={DaViT: Dual Attention Vision Transformer}, author={Ding, Mingyu and Xiao, Bin and Codella, Noel and Luo, Ping and Wang, Jingdong and Yuan, Lu}, booktitle={ECCV}, year={2022}, } ```
4,047
[ [ -0.0408935546875, -0.036712646484375, 0.007244110107421875, 0.01209259033203125, -0.02716064453125, -0.01171875, -0.0094451904296875, -0.014801025390625, 0.015045166015625, 0.028961181640625, -0.04095458984375, -0.040740966796875, -0.05078125, -0.00697326660...
Hum-Works/lodestone-base-4096-v1
2023-10-26T22:00:30.000Z
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "mteb", "custom_code", "en", "dataset:s2orc", "dataset:flax-sentence-embeddings/stackexchange_title_body_jsonl", "dataset:flax-sentence-embeddings/stackexchange_titlebody_best_voted_answer_jsonl", "dataset...
sentence-similarity
Hum-Works
null
null
Hum-Works/lodestone-base-4096-v1
9
2,009
sentence-transformers
2023-08-25T16:33:26
--- license: apache-2.0 pipeline_tag: sentence-similarity inference: false tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb language: en datasets: - s2orc - flax-sentence-embeddings/stackexchange_title_body_jsonl - flax-sentence-embeddings/stackexchange_titlebody_best_voted_answer_jsonl - flax-sentence-embeddings/stackexchange_title_best_voted_answer_jsonl - flax-sentence-embeddings/stackexchange_titlebody_best_and_down_voted_answer_jsonl - sentence-transformers/reddit-title-body - msmarco - gooaq - yahoo_answers_topics - code_search_net - search_qa - eli5 - snli - multi_nli - wikihow - natural_questions - trivia_qa - embedding-data/sentence-compression - embedding-data/flickr30k-captions - embedding-data/altlex - embedding-data/simple-wiki - embedding-data/QQP - embedding-data/SPECTER - embedding-data/PAQ_pairs - embedding-data/WikiAnswers - sentence-transformers/embedding-training-data model-index: - name: lodestone-base-4096-v1 results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 69.7313432835821 - type: ap value: 31.618259511417733 - type: f1 value: 63.30313825394228 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 86.89837499999999 - type: ap value: 82.39500885672128 - type: f1 value: 86.87317947399657 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 44.05 - type: f1 value: 42.67624383248947 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 26.173999999999996 - type: map_at_10 value: 40.976 - type: map_at_100 value: 42.067 - type: map_at_1000 value: 42.075 - type: map_at_3 value: 35.917 - type: map_at_5 value: 38.656 - type: mrr_at_1 value: 26.814 - type: mrr_at_10 value: 41.252 - type: mrr_at_100 value: 42.337 - type: mrr_at_1000 value: 42.345 - type: mrr_at_3 value: 36.226 - type: mrr_at_5 value: 38.914 - type: ndcg_at_1 value: 26.173999999999996 - type: ndcg_at_10 value: 49.819 - type: ndcg_at_100 value: 54.403999999999996 - type: ndcg_at_1000 value: 54.59 - type: ndcg_at_3 value: 39.231 - type: ndcg_at_5 value: 44.189 - type: precision_at_1 value: 26.173999999999996 - type: precision_at_10 value: 7.838000000000001 - type: precision_at_100 value: 0.9820000000000001 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 16.287 - type: precision_at_5 value: 12.191 - type: recall_at_1 value: 26.173999999999996 - type: recall_at_10 value: 78.378 - type: recall_at_100 value: 98.222 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 48.862 - type: recall_at_5 value: 60.953 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 42.31689035788179 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 31.280245136660984 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 58.79109720839415 - type: mrr value: 71.79615705931495 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 76.44918756608115 - type: cos_sim_spearman value: 70.86607256286257 - type: euclidean_pearson value: 74.12154678100815 - type: euclidean_spearman value: 70.86607256286257 - type: manhattan_pearson value: 74.0078626964417 - type: manhattan_spearman value: 70.68353828321327 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 75.40584415584415 - type: f1 value: 74.29514617572676 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 37.41860080664014 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 29.319217023090705 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 26.595000000000002 - type: map_at_10 value: 36.556 - type: map_at_100 value: 37.984 - type: map_at_1000 value: 38.134 - type: map_at_3 value: 33.417 - type: map_at_5 value: 35.160000000000004 - type: mrr_at_1 value: 32.761 - type: mrr_at_10 value: 41.799 - type: mrr_at_100 value: 42.526 - type: mrr_at_1000 value: 42.582 - type: mrr_at_3 value: 39.39 - type: mrr_at_5 value: 40.727000000000004 - type: ndcg_at_1 value: 32.761 - type: ndcg_at_10 value: 42.549 - type: ndcg_at_100 value: 47.915 - type: ndcg_at_1000 value: 50.475 - type: ndcg_at_3 value: 37.93 - type: ndcg_at_5 value: 39.939 - type: precision_at_1 value: 32.761 - type: precision_at_10 value: 8.312 - type: precision_at_100 value: 1.403 - type: precision_at_1000 value: 0.197 - type: precision_at_3 value: 18.741 - type: precision_at_5 value: 13.447999999999999 - type: recall_at_1 value: 26.595000000000002 - type: recall_at_10 value: 54.332 - type: recall_at_100 value: 76.936 - type: recall_at_1000 value: 93.914 - type: recall_at_3 value: 40.666000000000004 - type: recall_at_5 value: 46.513 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 22.528000000000002 - type: map_at_10 value: 30.751 - type: map_at_100 value: 31.855 - type: map_at_1000 value: 31.972 - type: map_at_3 value: 28.465 - type: map_at_5 value: 29.738 - type: mrr_at_1 value: 28.662 - type: mrr_at_10 value: 35.912 - type: mrr_at_100 value: 36.726 - type: mrr_at_1000 value: 36.777 - type: mrr_at_3 value: 34.013 - type: mrr_at_5 value: 35.156 - type: ndcg_at_1 value: 28.662 - type: ndcg_at_10 value: 35.452 - type: ndcg_at_100 value: 40.1 - type: ndcg_at_1000 value: 42.323 - type: ndcg_at_3 value: 32.112 - type: ndcg_at_5 value: 33.638 - type: precision_at_1 value: 28.662 - type: precision_at_10 value: 6.688 - type: precision_at_100 value: 1.13 - type: precision_at_1000 value: 0.16 - type: precision_at_3 value: 15.562999999999999 - type: precision_at_5 value: 11.019 - type: recall_at_1 value: 22.528000000000002 - type: recall_at_10 value: 43.748 - type: recall_at_100 value: 64.235 - type: recall_at_1000 value: 78.609 - type: recall_at_3 value: 33.937 - type: recall_at_5 value: 38.234 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGamingRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 33.117999999999995 - type: map_at_10 value: 44.339 - type: map_at_100 value: 45.367000000000004 - type: map_at_1000 value: 45.437 - type: map_at_3 value: 41.195 - type: map_at_5 value: 42.922 - type: mrr_at_1 value: 38.37 - type: mrr_at_10 value: 47.786 - type: mrr_at_100 value: 48.522 - type: mrr_at_1000 value: 48.567 - type: mrr_at_3 value: 45.371 - type: mrr_at_5 value: 46.857 - type: ndcg_at_1 value: 38.37 - type: ndcg_at_10 value: 50.019999999999996 - type: ndcg_at_100 value: 54.36299999999999 - type: ndcg_at_1000 value: 55.897 - type: ndcg_at_3 value: 44.733000000000004 - type: ndcg_at_5 value: 47.292 - type: precision_at_1 value: 38.37 - type: precision_at_10 value: 8.288 - type: precision_at_100 value: 1.139 - type: precision_at_1000 value: 0.132 - type: precision_at_3 value: 20.293 - type: precision_at_5 value: 14.107 - type: recall_at_1 value: 33.117999999999995 - type: recall_at_10 value: 63.451 - type: recall_at_100 value: 82.767 - type: recall_at_1000 value: 93.786 - type: recall_at_3 value: 48.964999999999996 - type: recall_at_5 value: 55.358 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGisRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 16.028000000000002 - type: map_at_10 value: 23.186999999999998 - type: map_at_100 value: 24.236 - type: map_at_1000 value: 24.337 - type: map_at_3 value: 20.816000000000003 - type: map_at_5 value: 22.311 - type: mrr_at_1 value: 17.514 - type: mrr_at_10 value: 24.84 - type: mrr_at_100 value: 25.838 - type: mrr_at_1000 value: 25.924999999999997 - type: mrr_at_3 value: 22.542 - type: mrr_at_5 value: 24.04 - type: ndcg_at_1 value: 17.514 - type: ndcg_at_10 value: 27.391 - type: ndcg_at_100 value: 32.684999999999995 - type: ndcg_at_1000 value: 35.367 - type: ndcg_at_3 value: 22.820999999999998 - type: ndcg_at_5 value: 25.380999999999997 - type: precision_at_1 value: 17.514 - type: precision_at_10 value: 4.463 - type: precision_at_100 value: 0.745 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 10.019 - type: precision_at_5 value: 7.457999999999999 - type: recall_at_1 value: 16.028000000000002 - type: recall_at_10 value: 38.81 - type: recall_at_100 value: 63.295 - type: recall_at_1000 value: 83.762 - type: recall_at_3 value: 26.604 - type: recall_at_5 value: 32.727000000000004 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 11.962 - type: map_at_10 value: 17.218 - type: map_at_100 value: 18.321 - type: map_at_1000 value: 18.455 - type: map_at_3 value: 15.287999999999998 - type: map_at_5 value: 16.417 - type: mrr_at_1 value: 14.677000000000001 - type: mrr_at_10 value: 20.381 - type: mrr_at_100 value: 21.471999999999998 - type: mrr_at_1000 value: 21.566 - type: mrr_at_3 value: 18.448999999999998 - type: mrr_at_5 value: 19.587 - type: ndcg_at_1 value: 14.677000000000001 - type: ndcg_at_10 value: 20.86 - type: ndcg_at_100 value: 26.519 - type: ndcg_at_1000 value: 30.020000000000003 - type: ndcg_at_3 value: 17.208000000000002 - type: ndcg_at_5 value: 19.037000000000003 - type: precision_at_1 value: 14.677000000000001 - type: precision_at_10 value: 3.856 - type: precision_at_100 value: 0.7889999999999999 - type: precision_at_1000 value: 0.124 - type: precision_at_3 value: 8.043 - type: precision_at_5 value: 6.069999999999999 - type: recall_at_1 value: 11.962 - type: recall_at_10 value: 28.994999999999997 - type: recall_at_100 value: 54.071999999999996 - type: recall_at_1000 value: 79.309 - type: recall_at_3 value: 19.134999999999998 - type: recall_at_5 value: 23.727999999999998 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 22.764 - type: map_at_10 value: 31.744 - type: map_at_100 value: 33.037 - type: map_at_1000 value: 33.156 - type: map_at_3 value: 29.015 - type: map_at_5 value: 30.434 - type: mrr_at_1 value: 28.296 - type: mrr_at_10 value: 37.03 - type: mrr_at_100 value: 37.902 - type: mrr_at_1000 value: 37.966 - type: mrr_at_3 value: 34.568 - type: mrr_at_5 value: 35.786 - type: ndcg_at_1 value: 28.296 - type: ndcg_at_10 value: 37.289 - type: ndcg_at_100 value: 42.787 - type: ndcg_at_1000 value: 45.382 - type: ndcg_at_3 value: 32.598 - type: ndcg_at_5 value: 34.521 - type: precision_at_1 value: 28.296 - type: precision_at_10 value: 6.901 - type: precision_at_100 value: 1.135 - type: precision_at_1000 value: 0.152 - type: precision_at_3 value: 15.367 - type: precision_at_5 value: 11.03 - type: recall_at_1 value: 22.764 - type: recall_at_10 value: 48.807 - type: recall_at_100 value: 71.859 - type: recall_at_1000 value: 89.606 - type: recall_at_3 value: 35.594 - type: recall_at_5 value: 40.541 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 19.742 - type: map_at_10 value: 27.741 - type: map_at_100 value: 29.323 - type: map_at_1000 value: 29.438 - type: map_at_3 value: 25.217 - type: map_at_5 value: 26.583000000000002 - type: mrr_at_1 value: 24.657999999999998 - type: mrr_at_10 value: 32.407000000000004 - type: mrr_at_100 value: 33.631 - type: mrr_at_1000 value: 33.686 - type: mrr_at_3 value: 30.194 - type: mrr_at_5 value: 31.444 - type: ndcg_at_1 value: 24.657999999999998 - type: ndcg_at_10 value: 32.614 - type: ndcg_at_100 value: 39.61 - type: ndcg_at_1000 value: 42.114000000000004 - type: ndcg_at_3 value: 28.516000000000002 - type: ndcg_at_5 value: 30.274 - type: precision_at_1 value: 24.657999999999998 - type: precision_at_10 value: 6.176 - type: precision_at_100 value: 1.1400000000000001 - type: precision_at_1000 value: 0.155 - type: precision_at_3 value: 13.927 - type: precision_at_5 value: 9.954 - type: recall_at_1 value: 19.742 - type: recall_at_10 value: 42.427 - type: recall_at_100 value: 72.687 - type: recall_at_1000 value: 89.89 - type: recall_at_3 value: 30.781 - type: recall_at_5 value: 35.606 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 19.72608333333333 - type: map_at_10 value: 27.165333333333336 - type: map_at_100 value: 28.292499999999997 - type: map_at_1000 value: 28.416333333333327 - type: map_at_3 value: 24.783833333333334 - type: map_at_5 value: 26.101750000000003 - type: mrr_at_1 value: 23.721500000000002 - type: mrr_at_10 value: 30.853333333333328 - type: mrr_at_100 value: 31.741750000000003 - type: mrr_at_1000 value: 31.812999999999995 - type: mrr_at_3 value: 28.732249999999997 - type: mrr_at_5 value: 29.945166666666665 - type: ndcg_at_1 value: 23.721500000000002 - type: ndcg_at_10 value: 31.74883333333333 - type: ndcg_at_100 value: 36.883583333333334 - type: ndcg_at_1000 value: 39.6145 - type: ndcg_at_3 value: 27.639583333333334 - type: ndcg_at_5 value: 29.543666666666667 - type: precision_at_1 value: 23.721500000000002 - type: precision_at_10 value: 5.709083333333333 - type: precision_at_100 value: 0.9859166666666666 - type: precision_at_1000 value: 0.1413333333333333 - type: precision_at_3 value: 12.85683333333333 - type: precision_at_5 value: 9.258166666666668 - type: recall_at_1 value: 19.72608333333333 - type: recall_at_10 value: 41.73583333333334 - type: recall_at_100 value: 64.66566666666668 - type: recall_at_1000 value: 84.09833333333336 - type: recall_at_3 value: 30.223083333333328 - type: recall_at_5 value: 35.153083333333335 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackStatsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 17.582 - type: map_at_10 value: 22.803 - type: map_at_100 value: 23.503 - type: map_at_1000 value: 23.599999999999998 - type: map_at_3 value: 21.375 - type: map_at_5 value: 22.052 - type: mrr_at_1 value: 20.399 - type: mrr_at_10 value: 25.369999999999997 - type: mrr_at_100 value: 26.016000000000002 - type: mrr_at_1000 value: 26.090999999999998 - type: mrr_at_3 value: 23.952 - type: mrr_at_5 value: 24.619 - type: ndcg_at_1 value: 20.399 - type: ndcg_at_10 value: 25.964 - type: ndcg_at_100 value: 29.607 - type: ndcg_at_1000 value: 32.349 - type: ndcg_at_3 value: 23.177 - type: ndcg_at_5 value: 24.276 - type: precision_at_1 value: 20.399 - type: precision_at_10 value: 4.018 - type: precision_at_100 value: 0.629 - type: precision_at_1000 value: 0.093 - type: precision_at_3 value: 9.969 - type: precision_at_5 value: 6.748 - type: recall_at_1 value: 17.582 - type: recall_at_10 value: 33.35 - type: recall_at_100 value: 50.219 - type: recall_at_1000 value: 71.06099999999999 - type: recall_at_3 value: 25.619999999999997 - type: recall_at_5 value: 28.291 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackTexRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 11.071 - type: map_at_10 value: 16.201999999999998 - type: map_at_100 value: 17.112 - type: map_at_1000 value: 17.238 - type: map_at_3 value: 14.508 - type: map_at_5 value: 15.440999999999999 - type: mrr_at_1 value: 13.833 - type: mrr_at_10 value: 19.235 - type: mrr_at_100 value: 20.108999999999998 - type: mrr_at_1000 value: 20.196 - type: mrr_at_3 value: 17.515 - type: mrr_at_5 value: 18.505 - type: ndcg_at_1 value: 13.833 - type: ndcg_at_10 value: 19.643 - type: ndcg_at_100 value: 24.298000000000002 - type: ndcg_at_1000 value: 27.614 - type: ndcg_at_3 value: 16.528000000000002 - type: ndcg_at_5 value: 17.991 - type: precision_at_1 value: 13.833 - type: precision_at_10 value: 3.6990000000000003 - type: precision_at_100 value: 0.713 - type: precision_at_1000 value: 0.116 - type: precision_at_3 value: 7.9030000000000005 - type: precision_at_5 value: 5.891 - type: recall_at_1 value: 11.071 - type: recall_at_10 value: 27.019 - type: recall_at_100 value: 48.404 - type: recall_at_1000 value: 72.641 - type: recall_at_3 value: 18.336 - type: recall_at_5 value: 21.991 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackUnixRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 18.573 - type: map_at_10 value: 25.008999999999997 - type: map_at_100 value: 26.015 - type: map_at_1000 value: 26.137 - type: map_at_3 value: 22.798 - type: map_at_5 value: 24.092 - type: mrr_at_1 value: 22.108 - type: mrr_at_10 value: 28.646 - type: mrr_at_100 value: 29.477999999999998 - type: mrr_at_1000 value: 29.57 - type: mrr_at_3 value: 26.415 - type: mrr_at_5 value: 27.693 - type: ndcg_at_1 value: 22.108 - type: ndcg_at_10 value: 29.42 - type: ndcg_at_100 value: 34.385 - type: ndcg_at_1000 value: 37.572 - type: ndcg_at_3 value: 25.274 - type: ndcg_at_5 value: 27.315 - type: precision_at_1 value: 22.108 - type: precision_at_10 value: 5.093 - type: precision_at_100 value: 0.859 - type: precision_at_1000 value: 0.124 - type: precision_at_3 value: 11.474 - type: precision_at_5 value: 8.321000000000002 - type: recall_at_1 value: 18.573 - type: recall_at_10 value: 39.433 - type: recall_at_100 value: 61.597 - type: recall_at_1000 value: 84.69 - type: recall_at_3 value: 27.849 - type: recall_at_5 value: 33.202999999999996 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 22.807 - type: map_at_10 value: 30.014000000000003 - type: map_at_100 value: 31.422 - type: map_at_1000 value: 31.652 - type: map_at_3 value: 27.447 - type: map_at_5 value: 28.711 - type: mrr_at_1 value: 27.668 - type: mrr_at_10 value: 34.489 - type: mrr_at_100 value: 35.453 - type: mrr_at_1000 value: 35.526 - type: mrr_at_3 value: 32.477000000000004 - type: mrr_at_5 value: 33.603 - type: ndcg_at_1 value: 27.668 - type: ndcg_at_10 value: 34.983 - type: ndcg_at_100 value: 40.535 - type: ndcg_at_1000 value: 43.747 - type: ndcg_at_3 value: 31.026999999999997 - type: ndcg_at_5 value: 32.608 - type: precision_at_1 value: 27.668 - type: precision_at_10 value: 6.837999999999999 - type: precision_at_100 value: 1.411 - type: precision_at_1000 value: 0.23600000000000002 - type: precision_at_3 value: 14.295 - type: precision_at_5 value: 10.435 - type: recall_at_1 value: 22.807 - type: recall_at_10 value: 43.545 - type: recall_at_100 value: 69.39800000000001 - type: recall_at_1000 value: 90.706 - type: recall_at_3 value: 32.183 - type: recall_at_5 value: 36.563 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 13.943 - type: map_at_10 value: 20.419999999999998 - type: map_at_100 value: 21.335 - type: map_at_1000 value: 21.44 - type: map_at_3 value: 17.865000000000002 - type: map_at_5 value: 19.36 - type: mrr_at_1 value: 15.712000000000002 - type: mrr_at_10 value: 22.345000000000002 - type: mrr_at_100 value: 23.227999999999998 - type: mrr_at_1000 value: 23.304 - type: mrr_at_3 value: 19.901 - type: mrr_at_5 value: 21.325 - type: ndcg_at_1 value: 15.712000000000002 - type: ndcg_at_10 value: 24.801000000000002 - type: ndcg_at_100 value: 29.799 - type: ndcg_at_1000 value: 32.513999999999996 - type: ndcg_at_3 value: 19.750999999999998 - type: ndcg_at_5 value: 22.252 - type: precision_at_1 value: 15.712000000000002 - type: precision_at_10 value: 4.1770000000000005 - type: precision_at_100 value: 0.738 - type: precision_at_1000 value: 0.106 - type: precision_at_3 value: 8.688 - type: precision_at_5 value: 6.617000000000001 - type: recall_at_1 value: 13.943 - type: recall_at_10 value: 36.913000000000004 - type: recall_at_100 value: 60.519 - type: recall_at_1000 value: 81.206 - type: recall_at_3 value: 23.006999999999998 - type: recall_at_5 value: 29.082 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: None metrics: - type: map_at_1 value: 9.468 - type: map_at_10 value: 16.029 - type: map_at_100 value: 17.693 - type: map_at_1000 value: 17.886 - type: map_at_3 value: 13.15 - type: map_at_5 value: 14.568 - type: mrr_at_1 value: 21.173000000000002 - type: mrr_at_10 value: 31.028 - type: mrr_at_100 value: 32.061 - type: mrr_at_1000 value: 32.119 - type: mrr_at_3 value: 27.534999999999997 - type: mrr_at_5 value: 29.431 - type: ndcg_at_1 value: 21.173000000000002 - type: ndcg_at_10 value: 23.224 - type: ndcg_at_100 value: 30.225 - type: ndcg_at_1000 value: 33.961000000000006 - type: ndcg_at_3 value: 18.174 - type: ndcg_at_5 value: 19.897000000000002 - type: precision_at_1 value: 21.173000000000002 - type: precision_at_10 value: 7.4719999999999995 - type: precision_at_100 value: 1.5010000000000001 - type: precision_at_1000 value: 0.219 - type: precision_at_3 value: 13.312 - type: precision_at_5 value: 10.619 - type: recall_at_1 value: 9.468 - type: recall_at_10 value: 28.823 - type: recall_at_100 value: 53.26499999999999 - type: recall_at_1000 value: 74.536 - type: recall_at_3 value: 16.672 - type: recall_at_5 value: 21.302 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: None metrics: - type: map_at_1 value: 6.343 - type: map_at_10 value: 12.717 - type: map_at_100 value: 16.48 - type: map_at_1000 value: 17.381 - type: map_at_3 value: 9.568999999999999 - type: map_at_5 value: 11.125 - type: mrr_at_1 value: 48.75 - type: mrr_at_10 value: 58.425000000000004 - type: mrr_at_100 value: 59.075 - type: mrr_at_1000 value: 59.095 - type: mrr_at_3 value: 56.291999999999994 - type: mrr_at_5 value: 57.679 - type: ndcg_at_1 value: 37.875 - type: ndcg_at_10 value: 27.77 - type: ndcg_at_100 value: 30.288999999999998 - type: ndcg_at_1000 value: 36.187999999999995 - type: ndcg_at_3 value: 31.385999999999996 - type: ndcg_at_5 value: 29.923 - type: precision_at_1 value: 48.75 - type: precision_at_10 value: 22.375 - type: precision_at_100 value: 6.3420000000000005 - type: precision_at_1000 value: 1.4489999999999998 - type: precision_at_3 value: 35.5 - type: precision_at_5 value: 30.55 - type: recall_at_1 value: 6.343 - type: recall_at_10 value: 16.936 - type: recall_at_100 value: 35.955999999999996 - type: recall_at_1000 value: 55.787 - type: recall_at_3 value: 10.771 - type: recall_at_5 value: 13.669999999999998 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 41.99 - type: f1 value: 36.823402174564954 - task: type: Retrieval dataset: type: fever name: MTEB FEVER config: default split: test revision: None metrics: - type: map_at_1 value: 40.088 - type: map_at_10 value: 52.69200000000001 - type: map_at_100 value: 53.296 - type: map_at_1000 value: 53.325 - type: map_at_3 value: 49.905 - type: map_at_5 value: 51.617000000000004 - type: mrr_at_1 value: 43.009 - type: mrr_at_10 value: 56.203 - type: mrr_at_100 value: 56.75 - type: mrr_at_1000 value: 56.769000000000005 - type: mrr_at_3 value: 53.400000000000006 - type: mrr_at_5 value: 55.163 - type: ndcg_at_1 value: 43.009 - type: ndcg_at_10 value: 59.39 - type: ndcg_at_100 value: 62.129999999999995 - type: ndcg_at_1000 value: 62.793 - type: ndcg_at_3 value: 53.878 - type: ndcg_at_5 value: 56.887 - type: precision_at_1 value: 43.009 - type: precision_at_10 value: 8.366 - type: precision_at_100 value: 0.983 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 22.377 - type: precision_at_5 value: 15.035000000000002 - type: recall_at_1 value: 40.088 - type: recall_at_10 value: 76.68700000000001 - type: recall_at_100 value: 88.91 - type: recall_at_1000 value: 93.782 - type: recall_at_3 value: 61.809999999999995 - type: recall_at_5 value: 69.131 - task: type: Retrieval dataset: type: fiqa name: MTEB FiQA2018 config: default split: test revision: None metrics: - type: map_at_1 value: 10.817 - type: map_at_10 value: 18.9 - type: map_at_100 value: 20.448 - type: map_at_1000 value: 20.660999999999998 - type: map_at_3 value: 15.979 - type: map_at_5 value: 17.415 - type: mrr_at_1 value: 23.148 - type: mrr_at_10 value: 31.208000000000002 - type: mrr_at_100 value: 32.167 - type: mrr_at_1000 value: 32.242 - type: mrr_at_3 value: 28.498 - type: mrr_at_5 value: 29.964000000000002 - type: ndcg_at_1 value: 23.148 - type: ndcg_at_10 value: 25.325999999999997 - type: ndcg_at_100 value: 31.927 - type: ndcg_at_1000 value: 36.081 - type: ndcg_at_3 value: 21.647 - type: ndcg_at_5 value: 22.762999999999998 - type: precision_at_1 value: 23.148 - type: precision_at_10 value: 7.546 - type: precision_at_100 value: 1.415 - type: precision_at_1000 value: 0.216 - type: precision_at_3 value: 14.969 - type: precision_at_5 value: 11.327 - type: recall_at_1 value: 10.817 - type: recall_at_10 value: 32.164 - type: recall_at_100 value: 57.655 - type: recall_at_1000 value: 82.797 - type: recall_at_3 value: 19.709 - type: recall_at_5 value: 24.333 - task: type: Retrieval dataset: type: hotpotqa name: MTEB HotpotQA config: default split: test revision: None metrics: - type: map_at_1 value: 25.380999999999997 - type: map_at_10 value: 33.14 - type: map_at_100 value: 33.948 - type: map_at_1000 value: 34.028000000000006 - type: map_at_3 value: 31.019999999999996 - type: map_at_5 value: 32.23 - type: mrr_at_1 value: 50.763000000000005 - type: mrr_at_10 value: 57.899 - type: mrr_at_100 value: 58.426 - type: mrr_at_1000 value: 58.457 - type: mrr_at_3 value: 56.093 - type: mrr_at_5 value: 57.116 - type: ndcg_at_1 value: 50.763000000000005 - type: ndcg_at_10 value: 41.656 - type: ndcg_at_100 value: 45.079 - type: ndcg_at_1000 value: 46.916999999999994 - type: ndcg_at_3 value: 37.834 - type: ndcg_at_5 value: 39.732 - type: precision_at_1 value: 50.763000000000005 - type: precision_at_10 value: 8.648 - type: precision_at_100 value: 1.135 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 23.105999999999998 - type: precision_at_5 value: 15.363 - type: recall_at_1 value: 25.380999999999997 - type: recall_at_10 value: 43.241 - type: recall_at_100 value: 56.745000000000005 - type: recall_at_1000 value: 69.048 - type: recall_at_3 value: 34.659 - type: recall_at_5 value: 38.406 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 79.544 - type: ap value: 73.82920133396664 - type: f1 value: 79.51048124883265 - task: type: Retrieval dataset: type: msmarco name: MTEB MSMARCO config: default split: dev revision: None metrics: - type: map_at_1 value: 11.174000000000001 - type: map_at_10 value: 19.451999999999998 - type: map_at_100 value: 20.612 - type: map_at_1000 value: 20.703 - type: map_at_3 value: 16.444 - type: map_at_5 value: 18.083 - type: mrr_at_1 value: 11.447000000000001 - type: mrr_at_10 value: 19.808 - type: mrr_at_100 value: 20.958 - type: mrr_at_1000 value: 21.041999999999998 - type: mrr_at_3 value: 16.791 - type: mrr_at_5 value: 18.459 - type: ndcg_at_1 value: 11.447000000000001 - type: ndcg_at_10 value: 24.556 - type: ndcg_at_100 value: 30.637999999999998 - type: ndcg_at_1000 value: 33.14 - type: ndcg_at_3 value: 18.325 - type: ndcg_at_5 value: 21.278 - type: precision_at_1 value: 11.447000000000001 - type: precision_at_10 value: 4.215 - type: precision_at_100 value: 0.732 - type: precision_at_1000 value: 0.095 - type: precision_at_3 value: 8.052 - type: precision_at_5 value: 6.318 - type: recall_at_1 value: 11.174000000000001 - type: recall_at_10 value: 40.543 - type: recall_at_100 value: 69.699 - type: recall_at_1000 value: 89.403 - type: recall_at_3 value: 23.442 - type: recall_at_5 value: 30.536 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 89.6671226630187 - type: f1 value: 89.57660424361246 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 60.284997720018254 - type: f1 value: 40.30637400152823 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.33557498318763 - type: f1 value: 60.24039910680179 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.37390719569603 - type: f1 value: 72.33097333477316 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 34.68158939060552 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 30.340061711905236 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.01814326295803 - type: mrr value: 33.20555240055367 - task: type: Retrieval dataset: type: nfcorpus name: MTEB NFCorpus config: default split: test revision: None metrics: - type: map_at_1 value: 3.3910000000000005 - type: map_at_10 value: 7.7219999999999995 - type: map_at_100 value: 10.286 - type: map_at_1000 value: 11.668000000000001 - type: map_at_3 value: 5.552 - type: map_at_5 value: 6.468 - type: mrr_at_1 value: 34.365 - type: mrr_at_10 value: 42.555 - type: mrr_at_100 value: 43.295 - type: mrr_at_1000 value: 43.357 - type: mrr_at_3 value: 40.299 - type: mrr_at_5 value: 41.182 - type: ndcg_at_1 value: 31.424000000000003 - type: ndcg_at_10 value: 24.758 - type: ndcg_at_100 value: 23.677999999999997 - type: ndcg_at_1000 value: 33.377 - type: ndcg_at_3 value: 28.302 - type: ndcg_at_5 value: 26.342 - type: precision_at_1 value: 33.437 - type: precision_at_10 value: 19.256999999999998 - type: precision_at_100 value: 6.662999999999999 - type: precision_at_1000 value: 1.9900000000000002 - type: precision_at_3 value: 27.761000000000003 - type: precision_at_5 value: 23.715 - type: recall_at_1 value: 3.3910000000000005 - type: recall_at_10 value: 11.068 - type: recall_at_100 value: 25.878 - type: recall_at_1000 value: 60.19 - type: recall_at_3 value: 6.1690000000000005 - type: recall_at_5 value: 7.767 - task: type: Retrieval dataset: type: nq name: MTEB NQ config: default split: test revision: None metrics: - type: map_at_1 value: 15.168000000000001 - type: map_at_10 value: 26.177 - type: map_at_100 value: 27.564 - type: map_at_1000 value: 27.628999999999998 - type: map_at_3 value: 22.03 - type: map_at_5 value: 24.276 - type: mrr_at_1 value: 17.439 - type: mrr_at_10 value: 28.205000000000002 - type: mrr_at_100 value: 29.357 - type: mrr_at_1000 value: 29.408 - type: mrr_at_3 value: 24.377 - type: mrr_at_5 value: 26.540000000000003 - type: ndcg_at_1 value: 17.41 - type: ndcg_at_10 value: 32.936 - type: ndcg_at_100 value: 39.196999999999996 - type: ndcg_at_1000 value: 40.892 - type: ndcg_at_3 value: 24.721 - type: ndcg_at_5 value: 28.615000000000002 - type: precision_at_1 value: 17.41 - type: precision_at_10 value: 6.199000000000001 - type: precision_at_100 value: 0.9690000000000001 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 11.790000000000001 - type: precision_at_5 value: 9.264 - type: recall_at_1 value: 15.168000000000001 - type: recall_at_10 value: 51.914 - type: recall_at_100 value: 79.804 - type: recall_at_1000 value: 92.75999999999999 - type: recall_at_3 value: 30.212 - type: recall_at_5 value: 39.204 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 67.306 - type: map_at_10 value: 80.634 - type: map_at_100 value: 81.349 - type: map_at_1000 value: 81.37299999999999 - type: map_at_3 value: 77.691 - type: map_at_5 value: 79.512 - type: mrr_at_1 value: 77.56 - type: mrr_at_10 value: 84.177 - type: mrr_at_100 value: 84.35000000000001 - type: mrr_at_1000 value: 84.353 - type: mrr_at_3 value: 83.003 - type: mrr_at_5 value: 83.799 - type: ndcg_at_1 value: 77.58 - type: ndcg_at_10 value: 84.782 - type: ndcg_at_100 value: 86.443 - type: ndcg_at_1000 value: 86.654 - type: ndcg_at_3 value: 81.67 - type: ndcg_at_5 value: 83.356 - type: precision_at_1 value: 77.58 - type: precision_at_10 value: 12.875 - type: precision_at_100 value: 1.503 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 35.63 - type: precision_at_5 value: 23.483999999999998 - type: recall_at_1 value: 67.306 - type: recall_at_10 value: 92.64 - type: recall_at_100 value: 98.681 - type: recall_at_1000 value: 99.79 - type: recall_at_3 value: 83.682 - type: recall_at_5 value: 88.424 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 50.76319866126382 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 55.024711941648995 - task: type: Retrieval dataset: type: scidocs name: MTEB SCIDOCS config: default split: test revision: None metrics: - type: map_at_1 value: 3.9379999999999997 - type: map_at_10 value: 8.817 - type: map_at_100 value: 10.546999999999999 - type: map_at_1000 value: 10.852 - type: map_at_3 value: 6.351999999999999 - type: map_at_5 value: 7.453 - type: mrr_at_1 value: 19.400000000000002 - type: mrr_at_10 value: 27.371000000000002 - type: mrr_at_100 value: 28.671999999999997 - type: mrr_at_1000 value: 28.747 - type: mrr_at_3 value: 24.583 - type: mrr_at_5 value: 26.143 - type: ndcg_at_1 value: 19.400000000000002 - type: ndcg_at_10 value: 15.264 - type: ndcg_at_100 value: 22.63 - type: ndcg_at_1000 value: 28.559 - type: ndcg_at_3 value: 14.424999999999999 - type: ndcg_at_5 value: 12.520000000000001 - type: precision_at_1 value: 19.400000000000002 - type: precision_at_10 value: 7.8100000000000005 - type: precision_at_100 value: 1.854 - type: precision_at_1000 value: 0.329 - type: precision_at_3 value: 13.100000000000001 - type: precision_at_5 value: 10.68 - type: recall_at_1 value: 3.9379999999999997 - type: recall_at_10 value: 15.903 - type: recall_at_100 value: 37.645 - type: recall_at_1000 value: 66.86 - type: recall_at_3 value: 7.993 - type: recall_at_5 value: 10.885 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 80.12689060151425 - type: cos_sim_spearman value: 70.46515535094771 - type: euclidean_pearson value: 77.17160003557223 - type: euclidean_spearman value: 70.4651757047438 - type: manhattan_pearson value: 77.18129609281937 - type: manhattan_spearman value: 70.46610403752913 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 70.451157033355 - type: cos_sim_spearman value: 63.99899601697852 - type: euclidean_pearson value: 67.46985359967678 - type: euclidean_spearman value: 64.00001637764805 - type: manhattan_pearson value: 67.56534741780037 - type: manhattan_spearman value: 64.06533893575366 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 77.65086614464292 - type: cos_sim_spearman value: 78.20169706921848 - type: euclidean_pearson value: 77.77758172155283 - type: euclidean_spearman value: 78.20169706921848 - type: manhattan_pearson value: 77.75077884860052 - type: manhattan_spearman value: 78.16875216484164 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 76.26381598259717 - type: cos_sim_spearman value: 70.78377709313477 - type: euclidean_pearson value: 74.82646556532096 - type: euclidean_spearman value: 70.78377658155212 - type: manhattan_pearson value: 74.81784766108225 - type: manhattan_spearman value: 70.79351454692176 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 79.00532026789739 - type: cos_sim_spearman value: 80.02708383244838 - type: euclidean_pearson value: 79.48345422610525 - type: euclidean_spearman value: 80.02708383244838 - type: manhattan_pearson value: 79.44519739854803 - type: manhattan_spearman value: 79.98344094559687 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 77.32783048164805 - type: cos_sim_spearman value: 78.79729961288045 - type: euclidean_pearson value: 78.72111945793154 - type: euclidean_spearman value: 78.79729904606872 - type: manhattan_pearson value: 78.72464311117116 - type: manhattan_spearman value: 78.822591248334 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 82.04318630630854 - type: cos_sim_spearman value: 83.87886389259836 - type: euclidean_pearson value: 83.40385877895086 - type: euclidean_spearman value: 83.87886389259836 - type: manhattan_pearson value: 83.46337128901547 - type: manhattan_spearman value: 83.9723106941644 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 63.003511169944595 - type: cos_sim_spearman value: 64.39318805580227 - type: euclidean_pearson value: 65.4797990735967 - type: euclidean_spearman value: 64.39318805580227 - type: manhattan_pearson value: 65.44604544280844 - type: manhattan_spearman value: 64.38742899984233 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 76.63101237585029 - type: cos_sim_spearman value: 75.57446967644269 - type: euclidean_pearson value: 76.93491768734478 - type: euclidean_spearman value: 75.57446967644269 - type: manhattan_pearson value: 76.92187567800636 - type: manhattan_spearman value: 75.57239337194585 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 78.5376604868993 - type: mrr value: 92.94422897364073 - task: type: Retrieval dataset: type: scifact name: MTEB SciFact config: default split: test revision: None metrics: - type: map_at_1 value: 38.872 - type: map_at_10 value: 50.417 - type: map_at_100 value: 51.202000000000005 - type: map_at_1000 value: 51.25999999999999 - type: map_at_3 value: 47.02 - type: map_at_5 value: 49.326 - type: mrr_at_1 value: 41.0 - type: mrr_at_10 value: 51.674 - type: mrr_at_100 value: 52.32599999999999 - type: mrr_at_1000 value: 52.376999999999995 - type: mrr_at_3 value: 48.778 - type: mrr_at_5 value: 50.744 - type: ndcg_at_1 value: 41.0 - type: ndcg_at_10 value: 56.027 - type: ndcg_at_100 value: 59.362 - type: ndcg_at_1000 value: 60.839 - type: ndcg_at_3 value: 50.019999999999996 - type: ndcg_at_5 value: 53.644999999999996 - type: precision_at_1 value: 41.0 - type: precision_at_10 value: 8.1 - type: precision_at_100 value: 0.987 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 20.444000000000003 - type: precision_at_5 value: 14.466999999999999 - type: recall_at_1 value: 38.872 - type: recall_at_10 value: 71.906 - type: recall_at_100 value: 86.367 - type: recall_at_1000 value: 98.0 - type: recall_at_3 value: 56.206 - type: recall_at_5 value: 65.05 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.7039603960396 - type: cos_sim_ap value: 90.40809844250262 - type: cos_sim_f1 value: 84.53181583031557 - type: cos_sim_precision value: 87.56698821007502 - type: cos_sim_recall value: 81.69999999999999 - type: dot_accuracy value: 99.7039603960396 - type: dot_ap value: 90.40809844250262 - type: dot_f1 value: 84.53181583031557 - type: dot_precision value: 87.56698821007502 - type: dot_recall value: 81.69999999999999 - type: euclidean_accuracy value: 99.7039603960396 - type: euclidean_ap value: 90.4080982863383 - type: euclidean_f1 value: 84.53181583031557 - type: euclidean_precision value: 87.56698821007502 - type: euclidean_recall value: 81.69999999999999 - type: manhattan_accuracy value: 99.7 - type: manhattan_ap value: 90.39771161966652 - type: manhattan_f1 value: 84.32989690721648 - type: manhattan_precision value: 87.02127659574468 - type: manhattan_recall value: 81.8 - type: max_accuracy value: 99.7039603960396 - type: max_ap value: 90.40809844250262 - type: max_f1 value: 84.53181583031557 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 59.663210666678715 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 32.107791216468776 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 46.440691925067604 - type: mrr value: 47.03390257618199 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.067177519784074 - type: cos_sim_spearman value: 31.234728424648967 - type: dot_pearson value: 31.06717083018107 - type: dot_spearman value: 31.234728424648967 - task: type: Retrieval dataset: type: trec-covid name: MTEB TRECCOVID config: default split: test revision: None metrics: - type: map_at_1 value: 0.136 - type: map_at_10 value: 0.767 - type: map_at_100 value: 3.3689999999999998 - type: map_at_1000 value: 8.613999999999999 - type: map_at_3 value: 0.369 - type: map_at_5 value: 0.514 - type: mrr_at_1 value: 48.0 - type: mrr_at_10 value: 63.908 - type: mrr_at_100 value: 64.615 - type: mrr_at_1000 value: 64.615 - type: mrr_at_3 value: 62.0 - type: mrr_at_5 value: 63.4 - type: ndcg_at_1 value: 44.0 - type: ndcg_at_10 value: 38.579 - type: ndcg_at_100 value: 26.409 - type: ndcg_at_1000 value: 26.858999999999998 - type: ndcg_at_3 value: 47.134 - type: ndcg_at_5 value: 43.287 - type: precision_at_1 value: 48.0 - type: precision_at_10 value: 40.400000000000006 - type: precision_at_100 value: 26.640000000000004 - type: precision_at_1000 value: 12.04 - type: precision_at_3 value: 52.666999999999994 - type: precision_at_5 value: 46.800000000000004 - type: recall_at_1 value: 0.136 - type: recall_at_10 value: 1.0070000000000001 - type: recall_at_100 value: 6.318 - type: recall_at_1000 value: 26.522000000000002 - type: recall_at_3 value: 0.41700000000000004 - type: recall_at_5 value: 0.606 - task: type: Retrieval dataset: type: webis-touche2020 name: MTEB Touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 1.9949999999999999 - type: map_at_10 value: 8.304 - type: map_at_100 value: 13.644 - type: map_at_1000 value: 15.43 - type: map_at_3 value: 4.788 - type: map_at_5 value: 6.22 - type: mrr_at_1 value: 22.448999999999998 - type: mrr_at_10 value: 37.658 - type: mrr_at_100 value: 38.491 - type: mrr_at_1000 value: 38.503 - type: mrr_at_3 value: 32.312999999999995 - type: mrr_at_5 value: 35.68 - type: ndcg_at_1 value: 21.429000000000002 - type: ndcg_at_10 value: 18.995 - type: ndcg_at_100 value: 32.029999999999994 - type: ndcg_at_1000 value: 44.852 - type: ndcg_at_3 value: 19.464000000000002 - type: ndcg_at_5 value: 19.172 - type: precision_at_1 value: 22.448999999999998 - type: precision_at_10 value: 17.143 - type: precision_at_100 value: 6.877999999999999 - type: precision_at_1000 value: 1.524 - type: precision_at_3 value: 21.769 - type: precision_at_5 value: 20.0 - type: recall_at_1 value: 1.9949999999999999 - type: recall_at_10 value: 13.395999999999999 - type: recall_at_100 value: 44.348 - type: recall_at_1000 value: 82.622 - type: recall_at_3 value: 5.896 - type: recall_at_5 value: 8.554 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 67.9394 - type: ap value: 12.943337263423334 - type: f1 value: 52.28243093094156 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 56.414827391058296 - type: f1 value: 56.666412409573105 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 47.009746255495465 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 84.02574953805807 - type: cos_sim_ap value: 67.66599910763128 - type: cos_sim_f1 value: 63.491277990844985 - type: cos_sim_precision value: 59.77172140694154 - type: cos_sim_recall value: 67.70448548812665 - type: dot_accuracy value: 84.02574953805807 - type: dot_ap value: 67.66600090945406 - type: dot_f1 value: 63.491277990844985 - type: dot_precision value: 59.77172140694154 - type: dot_recall value: 67.70448548812665 - type: euclidean_accuracy value: 84.02574953805807 - type: euclidean_ap value: 67.6659842364448 - type: euclidean_f1 value: 63.491277990844985 - type: euclidean_precision value: 59.77172140694154 - type: euclidean_recall value: 67.70448548812665 - type: manhattan_accuracy value: 84.0317100792752 - type: manhattan_ap value: 67.66351692448987 - type: manhattan_f1 value: 63.48610948306178 - type: manhattan_precision value: 57.11875131828729 - type: manhattan_recall value: 71.45118733509234 - type: max_accuracy value: 84.0317100792752 - type: max_ap value: 67.66600090945406 - type: max_f1 value: 63.491277990844985 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 87.53832421314084 - type: cos_sim_ap value: 83.11416594316626 - type: cos_sim_f1 value: 75.41118114347518 - type: cos_sim_precision value: 73.12839059674504 - type: cos_sim_recall value: 77.8410840776101 - type: dot_accuracy value: 87.53832421314084 - type: dot_ap value: 83.11416226342155 - type: dot_f1 value: 75.41118114347518 - type: dot_precision value: 73.12839059674504 - type: dot_recall value: 77.8410840776101 - type: euclidean_accuracy value: 87.53832421314084 - type: euclidean_ap value: 83.11416284455395 - type: euclidean_f1 value: 75.41118114347518 - type: euclidean_precision value: 73.12839059674504 - type: euclidean_recall value: 77.8410840776101 - type: manhattan_accuracy value: 87.49369348391353 - type: manhattan_ap value: 83.08066812574694 - type: manhattan_f1 value: 75.36561228603892 - type: manhattan_precision value: 71.9202518363064 - type: manhattan_recall value: 79.15768401601478 - type: max_accuracy value: 87.53832421314084 - type: max_ap value: 83.11416594316626 - type: max_f1 value: 75.41118114347518 --- # lodestone-base-4096-v1 [Hum-Works/lodestone-base-4096-v1](https://huggingface.co/Hum-Works/lodestone-base-4096-v1). [Griffin McCauley](https://huggingface.co/gmccaul1), [Will Fortin](https://huggingface.co/willathum), [Dylan DiGioia](https://huggingface.co/dylanAtHum) 2023 This new [sentence-transformers](https://www.SBERT.net) model from [Hum](https://www.hum.works/) maps long sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Abstract In the hopes of furthering Hum's overarching mission of increasing the accessibility and interconnectivity of human knowledge, this model was developed as part of a project intending to boost the maximum input sequence length of sentence embedding models by leveraging recent architectural advances in the design of transformer models such as the incorporation of FlashAttention, Attention with Linear Biases (ALiBi), and Gated Linear Units (GLU). These modifications and enhancements were implemented by the team at MosaicML who designed and constructed the pre-trained [`mosaic-bert-base-seqlen-2048`](https://huggingface.co/mosaicml/mosaic-bert-base-seqlen-2048) model, and more information regarding the details of their development and testing specifications can be found on the model card. While the fine-tuning procedure followed during the course of this project loosely mirrors that of the of the original [Flax-sentence-embeddings](https://huggingface.co/flax-sentence-embeddings) team responsible for the creation of many other popular sentence-transformers models (e.g. [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2), [all-distilroberta-v1](https://huggingface.co/sentence-transformers/all-distilroberta-v1), and [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)), our methodology includes novel techniques for data loading, batch sampling, and model checkpointing intended to improve training efficiency with regards to memory allocation and data storage. Through combining these well-established and proven fine-tuning practices with novel advances in transformer architectural elements, our `lodestone-base-4096-v1` model is able to achieve comparable performance metrics on standard text embedding evaluation benchmarks while also supporting a longer and more robust input sequence length of 4096 while retaining a smaller, more manageable size capable of being run on either a GPU or CPU. ## Usage Using this model becomes relatively easy when you have [sentence-transformers](https://www.SBERT.net) installed. *At the time of publishing, sentence-transformers does not support remote code which is required for flash-attention used by the model. A fork of the sentence-transformers repository that allows remote code execution is provided for convenience. It can be installed using the following command:* ``` pip install git+https://github.com/Hum-Works/sentence-transformers.git pip install einops ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer('Hum-Works/lodestone-base-4096-v1', trust_remote_code=True, revision='v1.0.0') sentences = ["This is an example sentence", "Each sentence is converted"] embeddings = model.encode(sentences) print(embeddings) ``` *Note: The model will use the openAI/Triton implementation of FlashAttention if installed. This is more performant than the fallback, torch implementation. Some platforms and GPUs may not be supported by Triton - up to date compatibility can be found on [Triton’s github page](https://github.com/openai/triton#compatibility).* ------ ## Background The project aims to train sentence embedding models on very large sentence level datasets using a self-supervised contrastive learning objective. We used the pretrained [`mosaic-bert-base-seqlen-2048`](https://huggingface.co/mosaicml/mosaic-bert-base-seqlen-2048) model and fine-tuned it on a nearly 1.5B sentence pairs dataset. We use a contrastive learning objective: given a sentence from the pair, the model should predict which out of a set of randomly sampled other sentences, was actually paired with it in our dataset. ## Intended uses Our model is intended to be used as a long sentence and paragraph encoder. Given an input text, it outputs a vector containing the semantic information. The sentence vector may be used for information retrieval, clustering, or sentence similarity tasks. ## Training procedure ### Pre-training We use the pretrained [`mosaic-bert-base-seqlen-2048`](https://huggingface.co/mosaicml/mosaic-bert-base-seqlen-2048). Please refer to the model card for more detailed information about the pre-training procedure. ### Fine-tuning We fine-tune the model using a contrastive objective. Formally, we compute the dot product of each possible sentence pairing in the batch. We then apply the cross entropy loss by comparing with true pairs. #### Hyperparameters We trained our model on an ml.g5.4xlarge EC2 instance with 1 NVIDIA A10G Tensor Core GPU. We train the model during 1.4 million steps using a batch size of 16. We use a learning rate warm up of 500. The sequence length during training was limited to 2048 tokens. We used the AdamW optimizer with a 2e-5 learning rate and weight decay of 0.01 (i.e. the default parameter values for SentenceTransformer.fit()). The full training script is accessible in this current repository: `Training.py`. ## Model Architecture By incorporating FlashAttention, [Attention with Linear Biases (ALiBi)](https://arxiv.org/abs/2108.12409), and Gated Linear Units (GLU), this model is able to handle input sequences of 4096, 8x longer than that supported by most comparable sentence embedding models. The model was trained using a sequence length maximum of 2048, but the final model has a maximum sequence length of 4096. This is accomplished by taking advantage of ALiBi’s positional attention extrapolation which has been shown to allow sequence lengths of 2x the initial trained length. ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 4096, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False}) (2): Normalize() ) ``` #### Training data We use the concatenation from multiple datasets to fine-tune our model. The total number of sentence pairs is nearly 1.5 billion sentences. We sampled each dataset given a weighted probability proportional to its relative contribution to the entire dataset. The breakdown of the dataset can be seen below, and the entire dataset can be publicly accessed and uploaded via the `Dataloading.ipynb` located within this repository. | Dataset | Paper | Number of training tuples | |--------------------------------------------------------|:----------------------------------------:|:--------------------------:| | [Reddit comments (2015-2018)](https://github.com/PolyAI-LDN/conversational-datasets/tree/master/reddit) | [paper](https://arxiv.org/abs/1904.06472) | 726,484,430 | | **[S2ORC](https://github.com/allenai/s2orc) Citation pairs (Abstracts)** | [paper](https://aclanthology.org/2020.acl-main.447/) | 252,102,397 | | **[Reddit posts](https://huggingface.co/datasets/sentence-transformers/reddit-title-body) (Title, Body) pairs** | - | 127,445,911 | | **[Amazon reviews (2018)](https://huggingface.co/datasets/sentence-transformers/embedding-training-data) (Title, Review) pairs** | - | 87,877,725 | | [WikiAnswers](https://github.com/afader/oqa#wikianswers-corpus) Duplicate question pairs | [paper](https://doi.org/10.1145/2623330.2623677) | 77,427,422 | | [PAQ](https://github.com/facebookresearch/PAQ) (Question, Answer) pairs | [paper](https://arxiv.org/abs/2102.07033) | 64,371,441 | | [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Titles) | [paper](https://aclanthology.org/2020.acl-main.447/) | 52,603,982 | | [S2ORC](https://github.com/allenai/s2orc) (Title, Abstract) | [paper](https://aclanthology.org/2020.acl-main.447/) | 41,769,185 | | [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_title_body_jsonl) (Title, Body) pairs | - | 25,368,423 | | [MS MARCO](https://microsoft.github.io/msmarco/) triplets | [paper](https://doi.org/10.1145/3404835.3462804) | 9,144,553 | | **[Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_title_best_voted_answer_jsonl) (Title, Most Upvoted Answer) pairs** | - | 4,784,250 | | **[Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_titlebody_best_voted_answer_jsonl) (Title+Body, Most Upvoted Answer) pairs** | - | 4,551,660 | | [GOOAQ: Open Question Answering with Diverse Answer Types](https://github.com/allenai/gooaq) | [paper](https://arxiv.org/pdf/2104.08727.pdf) | 3,012,496 | | **[Amazon QA](https://huggingface.co/datasets/sentence-transformers/embedding-training-data)** | - | 2,507,114 | | [Code Search](https://huggingface.co/datasets/code_search_net) | - | 1,375,067 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 1,198,260 | | **[AG News]((Title, Description) pairs of news articles from the AG News dataset)** | - | 1,157,745 | | [COCO](https://cocodataset.org/#home) Image captions | [paper](https://link.springer.com/chapter/10.1007%2F978-3-319-10602-1_48) | 828,395| | [SPECTER](https://github.com/allenai/specter) citation triplets | [paper](https://doi.org/10.18653/v1/2020.acl-main.207) | 684,100 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Question, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 681,164 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Question) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 659,896 | | **[CC News](https://huggingface.co/datasets/sentence-transformers/embedding-training-data) (Title, article) pairs** | - | 614,664 | | **[NPR](https://huggingface.co/datasets/sentence-transformers/embedding-training-data) (Title, Body) pairs** | - | 594,384 | | [SearchQA](https://huggingface.co/datasets/search_qa) | [paper](https://arxiv.org/abs/1704.05179) | 582,261 | | **[MS Marco](https://microsoft.github.io/msmarco/) (Query, Answer Passage) pairs** | [paper](https://doi.org/10.1145/3404835.3462804) | 532,751 | | [Stack Exchange](https://docs.google.com/spreadsheets/d/1vXJrIg38cEaKjOG5y4I4PQwAQFUmCkohbViJ9zj_Emg/edit#gid=0) (Title, Body) pairs | - | 364,000 | | [Eli5](https://huggingface.co/datasets/eli5) | [paper](https://doi.org/10.18653/v1/p19-1346) | 325,475 | | [Flickr 30k](https://shannon.cs.illinois.edu/DenotationGraph/) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/229/33) | 317,695 | | **[CNN & DailyMail](https://huggingface.co/datasets/sentence-transformers/embedding-training-data) (highlight sentences, article) pairs** | - | 311,971 | | [Stack Exchange](https://docs.google.com/spreadsheets/d/1vXJrIg38cEaKjOG5y4I4PQwAQFUmCkohbViJ9zj_Emg/edit#gid=0) Duplicate questions (titles) | - | 304,524 | | AllNLI ([SNLI](https://nlp.stanford.edu/projects/snli/) and [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) | [paper SNLI](https://doi.org/10.18653/v1/d15-1075), [paper MultiNLI](https://doi.org/10.18653/v1/n18-1101) | 277,230 | | [Stack Exchange](https://docs.google.com/spreadsheets/d/1vXJrIg38cEaKjOG5y4I4PQwAQFUmCkohbViJ9zj_Emg/edit#gid=0) Duplicate questions (bodies) | - | 250,518 | | [Stack Exchange](https://docs.google.com/spreadsheets/d/1vXJrIg38cEaKjOG5y4I4PQwAQFUmCkohbViJ9zj_Emg/edit#gid=0) Duplicate questions (titles+bodies) | - | 250,459 | | **[XSUM](https://huggingface.co/datasets/sentence-transformers/embedding-training-data) (Summary, News Article) pairs** | - | 226,711 | | **[Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_titlebody_best_and_down_voted_answer_jsonl) (Title+Body, Most Upvoted Answer, Most Downvoted Answer) triplets** | - | 216,454 | | [Sentence Compression](https://github.com/google-research-datasets/sentence-compression) | [paper](https://www.aclweb.org/anthology/D13-1155/) | 180,000 | | **[FEVER](https://docs.google.com/spreadsheets/d/1vXJrIg38cEaKjOG5y4I4PQwAQFUmCkohbViJ9zj_Emg/edit#gid=0) training data** | - | 139,051 | | [Wikihow](https://github.com/pvl/wikihow_pairs_dataset) | [paper](https://arxiv.org/abs/1810.09305) | 128,542 | | **[SearchQA](https://huggingface.co/datasets/search_qa) (Question, Top-Snippet)** | [paper](https://arxiv.org/abs/1704.05179) | 117,384 | | [Altlex](https://github.com/chridey/altlex/) | [paper](https://aclanthology.org/P16-1135.pdf) | 112,696 | | **[Quora Question Duplicates](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs)** | - | 103,663 | | [Quora Question Triplets](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) | - | 103,663 | | [Simple Wikipedia](https://cs.pomona.edu/~dkauchak/simplification/) | [paper](https://www.aclweb.org/anthology/P11-2117/) | 102,225 | | [Natural Questions (NQ)](https://ai.google.com/research/NaturalQuestions) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/1455) | 100,231 | | [SQuAD2.0](https://rajpurkar.github.io/SQuAD-explorer/) | [paper](https://aclanthology.org/P18-2124.pdf) | 87,599 | | [TriviaQA](https://huggingface.co/datasets/trivia_qa) | - | 73,346 | | **Total** | | **1,492,453,113** | #### Replication The entire fine-tuning process for this model can be replicated by following the steps outlined in the `Replication.txt` file within this repository. This document explains how to modify the [sentence-transformers](https://www.SBERT.net) library, configure the pre-trained [`mosaic-bert-base-seqlen-2048`](https://huggingface.co/mosaicml/mosaic-bert-base-seqlen-2048) model, load all of the training data, and execute the training script. #### Limitations Due to technical constraints (e.g. limited GPU memory capacity), this model was trained with a smaller batch size of 16, making it so that each step during training was less well-informed than it would have been on a higher performance system. This smaller than ideal hyperparameter value will generally cause the model to be more likely to get stuck in a local minimum and for the parameter configuration to take a longer time to converge to the optimum. In order to counteract this potential risk, we trained the model for a larger number of steps than many of its contemporaries to ensure a greater chance of achieving strong performance, but this is an area which could be improved if further fine-tuning was performed. It is also worth noting that, while this model is able to handle longer input sequences of up to 4096 word pieces, the training dataset used consists of sentence and paragraph pairs and triplets which do not necessarily reach that maximum sequence length. Since the data was not tailored specifically for this larger input size, further fine-tuning may be required to ensure highly accurate embeddings for longer texts of that magnitude. Finally, as stated on https://huggingface.co/datasets/sentence-transformers/reddit-title-body, an additional reminder and warning regarding the Reddit posts data is that one should "Be aware that this dataset is not filtered for biases, hate-speech, spam, racial slurs etc. It depicts the content as it is posted on Reddit." Thus, while we believe this has not induced any pathological behaviors in the model's performance due to its relatively low prevalence of records in the whole dataset of nearly 1.5B sentence pairs and the fact that this model was trained to produce semantic embeddings rather than generative text outputs, it is always important to be aware of vulnerabilities to bias.
79,945
[ [ -0.022369384765625, -0.06475830078125, 0.03656005859375, 0.027099609375, -0.0175628662109375, -0.0225372314453125, -0.0264892578125, -0.023193359375, 0.018585205078125, 0.0293731689453125, -0.041748046875, -0.0457763671875, -0.0538330078125, 0.01158142089843...
ai-forever/ruBert-large
2023-11-03T12:50:30.000Z
[ "transformers", "pytorch", "bert", "fill-mask", "PyTorch", "Transformers", "exbert", "ru", "arxiv:2309.10931", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
ai-forever
null
null
ai-forever/ruBert-large
7
2,007
transformers
2022-03-02T23:29:05
--- language: - ru tags: - PyTorch - Transformers - bert - exbert thumbnail: "https://github.com/sberbank-ai/model-zoo" pipeline_tag: fill-mask --- # ruBert-large The model architecture design, pretraining, and evaluation are documented in our preprint: [**A Family of Pretrained Transformer Language Models for Russian**](https://arxiv.org/abs/2309.10931). The model is pretrained by the [SberDevices](https://sberdevices.ru/) team. * Task: `mask filling` * Type: `encoder` * Tokenizer: `BPE` * Dict size: `120 138` * Num Parameters: `427 M` * Training Data Volume `30 GB` # Authors + NLP core team RnD [Telegram channel](https://t.me/nlpcoreteam): + Dmitry Zmitrovich # Cite us ``` @misc{zmitrovich2023family, title={A Family of Pretrained Transformer Language Models for Russian}, author={Dmitry Zmitrovich and Alexander Abramov and Andrey Kalmykov and Maria Tikhonova and Ekaterina Taktasheva and Danil Astafurov and Mark Baushenko and Artem Snegirev and Tatiana Shavrina and Sergey Markov and Vladislav Mikhailov and Alena Fenogenova}, year={2023}, eprint={2309.10931}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
1,170
[ [ -0.03070068359375, -0.01235198974609375, 0.015625, 0.01476287841796875, -0.02935791015625, -0.0024566650390625, -0.0311737060546875, -0.0150604248046875, -0.0069427490234375, 0.026580810546875, -0.0360107421875, -0.023345947265625, -0.046966552734375, -0.006...
aubmindlab/bert-base-arabertv01
2023-06-09T12:24:20.000Z
[ "transformers", "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "ar", "dataset:wikipedia", "dataset:OSIAN", "dataset:1.5B_Arabic_Corpus", "arxiv:2003.00104", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
aubmindlab
null
null
aubmindlab/bert-base-arabertv01
1
2,006
transformers
2022-03-02T23:29:05
--- language: ar datasets: - wikipedia - OSIAN - 1.5B_Arabic_Corpus widget: - text: " عاصمة لبنان هي [MASK] ." --- # !!! A newer version of this model is available !!! [AraBERTv02](https://huggingface.co/aubmindlab/bert-base-arabertv02) # AraBERT v1 & v2 : Pre-training BERT for Arabic Language Understanding <img src="https://raw.githubusercontent.com/aub-mind/arabert/master/arabert_logo.png" width="100" align="left"/> **AraBERT** is an Arabic pretrained lanaguage model based on [Google's BERT architechture](https://github.com/google-research/bert). AraBERT uses the same BERT-Base config. More details are available in the [AraBERT Paper](https://arxiv.org/abs/2003.00104) and in the [AraBERT Meetup](https://github.com/WissamAntoun/pydata_khobar_meetup) There are two versions of the model, AraBERTv0.1 and AraBERTv1, with the difference being that AraBERTv1 uses pre-segmented text where prefixes and suffixes were splitted using the [Farasa Segmenter](http://alt.qcri.org/farasa/segmenter.html). We evalaute AraBERT models on different downstream tasks and compare them to [mBERT]((https://github.com/google-research/bert/blob/master/multilingual.md)), and other state of the art models (*To the extent of our knowledge*). The Tasks were Sentiment Analysis on 6 different datasets ([HARD](https://github.com/elnagara/HARD-Arabic-Dataset), [ASTD-Balanced](https://www.aclweb.org/anthology/D15-1299), [ArsenTD-Lev](https://staff.aub.edu.lb/~we07/Publications/ArSentD-LEV_Sentiment_Corpus.pdf), [LABR](https://github.com/mohamedadaly/LABR)), Named Entity Recognition with the [ANERcorp](http://curtis.ml.cmu.edu/w/courses/index.php/ANERcorp), and Arabic Question Answering on [Arabic-SQuAD and ARCD](https://github.com/husseinmozannar/SOQAL) # AraBERTv2 ## What's New! AraBERT now comes in 4 new variants to replace the old v1 versions: More Detail in the AraBERT folder and in the [README](https://github.com/aub-mind/arabert/blob/master/AraBERT/README.md) and in the [AraBERT Paper](https://arxiv.org/abs/2003.00104v2) Model | HuggingFace Model Name | Size (MB/Params)| Pre-Segmentation | DataSet (Sentences/Size/nWords) | ---|:---:|:---:|:---:|:---: AraBERTv0.2-base | [bert-base-arabertv02](https://huggingface.co/aubmindlab/bert-base-arabertv02) | 543MB / 136M | No | 200M / 77GB / 8.6B | AraBERTv0.2-large| [bert-large-arabertv02](https://huggingface.co/aubmindlab/bert-large-arabertv02) | 1.38G 371M | No | 200M / 77GB / 8.6B | AraBERTv2-base| [bert-base-arabertv2](https://huggingface.co/aubmindlab/bert-base-arabertv2) | 543MB 136M | Yes | 200M / 77GB / 8.6B | AraBERTv2-large| [bert-large-arabertv2](https://huggingface.co/aubmindlab/bert-large-arabertv2) | 1.38G 371M | Yes | 200M / 77GB / 8.6B | AraBERTv0.1-base| [bert-base-arabertv01](https://huggingface.co/aubmindlab/bert-base-arabertv01) | 543MB 136M | No | 77M / 23GB / 2.7B | AraBERTv1-base| [bert-base-arabert](https://huggingface.co/aubmindlab/bert-base-arabert) | 543MB 136M | Yes | 77M / 23GB / 2.7B | All models are available in the `HuggingFace` model page under the [aubmindlab](https://huggingface.co/aubmindlab/) name. Checkpoints are available in PyTorch, TF2 and TF1 formats. ## Better Pre-Processing and New Vocab We identified an issue with AraBERTv1's wordpiece vocabulary. The issue came from punctuations and numbers that were still attached to words when learned the wordpiece vocab. We now insert a space between numbers and characters and around punctuation characters. The new vocabulary was learnt using the `BertWordpieceTokenizer` from the `tokenizers` library, and should now support the Fast tokenizer implementation from the `transformers` library. **P.S.**: All the old BERT codes should work with the new BERT, just change the model name and check the new preprocessing dunction **Please read the section on how to use the [preprocessing function](#Preprocessing)** ## Bigger Dataset and More Compute We used ~3.5 times more data, and trained for longer. For Dataset Sources see the [Dataset Section](#Dataset) Model | Hardware | num of examples with seq len (128 / 512) |128 (Batch Size/ Num of Steps) | 512 (Batch Size/ Num of Steps) | Total Steps | Total Time (in Days) | ---|:---:|:---:|:---:|:---:|:---:|:---: AraBERTv0.2-base | TPUv3-8 | 420M / 207M |2560 / 1M | 384/ 2M | 3M | - AraBERTv0.2-large | TPUv3-128 | 420M / 207M | 13440 / 250K | 2056 / 300K | 550K | - AraBERTv2-base | TPUv3-8 | 520M / 245M |13440 / 250K | 2056 / 300K | 550K | - AraBERTv2-large | TPUv3-128 | 520M / 245M | 13440 / 250K | 2056 / 300K | 550K | - AraBERT-base (v1/v0.1) | TPUv2-8 | - |512 / 900K | 128 / 300K| 1.2M | 4 days # Dataset The pretraining data used for the new AraBERT model is also used for Arabic **GPT2 and ELECTRA**. The dataset consists of 77GB or 200,095,961 lines or 8,655,948,860 words or 82,232,988,358 chars (before applying Farasa Segmentation) For the new dataset we added the unshuffled OSCAR corpus, after we thoroughly filter it, to the previous dataset used in AraBERTv1 but with out the websites that we previously crawled: - OSCAR unshuffled and filtered. - [Arabic Wikipedia dump](https://archive.org/details/arwiki-20190201) from 2020/09/01 - [The 1.5B words Arabic Corpus](https://www.semanticscholar.org/paper/1.5-billion-words-Arabic-Corpus-El-Khair/f3eeef4afb81223df96575adadf808fe7fe440b4) - [The OSIAN Corpus](https://www.aclweb.org/anthology/W19-4619) - Assafir news articles. Huge thank you for Assafir for giving us the data # Preprocessing It is recommended to apply our preprocessing function before training/testing on any dataset. **Install farasapy to segment text for AraBERT v1 & v2 `pip install farasapy`** ```python from arabert.preprocess import ArabertPreprocessor model_name="bert-base-arabertv01" arabert_prep = ArabertPreprocessor(model_name=model_name) text = "ولن نبالغ إذا قلنا إن هاتف أو كمبيوتر المكتب في زمننا هذا ضروري" arabert_prep.preprocess(text) ``` ## Accepted_models ``` bert-base-arabertv01 bert-base-arabert bert-base-arabertv02 bert-base-arabertv2 bert-large-arabertv02 bert-large-arabertv2 araelectra-base aragpt2-base aragpt2-medium aragpt2-large aragpt2-mega ``` # TensorFlow 1.x models The TF1.x model are available in the HuggingFace models repo. You can download them as follows: - via git-lfs: clone all the models in a repo ```bash curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash sudo apt-get install git-lfs git lfs install git clone https://huggingface.co/aubmindlab/MODEL_NAME tar -C ./MODEL_NAME -zxvf /content/MODEL_NAME/tf1_model.tar.gz ``` where `MODEL_NAME` is any model under the `aubmindlab` name - via `wget`: - Go to the tf1_model.tar.gz file on huggingface.co/models/aubmindlab/MODEL_NAME. - copy the `oid sha256` - then run `wget https://cdn-lfs.huggingface.co/aubmindlab/aragpt2-base/INSERT_THE_SHA_HERE` (ex: for `aragpt2-base`: `wget https://cdn-lfs.huggingface.co/aubmindlab/aragpt2-base/3766fc03d7c2593ff2fb991d275e96b81b0ecb2098b71ff315611d052ce65248`) # If you used this model please cite us as : Google Scholar has our Bibtex wrong (missing name), use this instead ``` @inproceedings{antoun2020arabert, title={AraBERT: Transformer-based Model for Arabic Language Understanding}, author={Antoun, Wissam and Baly, Fady and Hajj, Hazem}, booktitle={LREC 2020 Workshop Language Resources and Evaluation Conference 11--16 May 2020}, pages={9} } ``` # Acknowledgments Thanks to TensorFlow Research Cloud (TFRC) for the free access to Cloud TPUs, couldn't have done it without this program, and to the [AUB MIND Lab](https://sites.aub.edu.lb/mindlab/) Members for the continous support. Also thanks to [Yakshof](https://www.yakshof.com/#/) and Assafir for data and storage access. Another thanks for Habib Rahal (https://www.behance.net/rahalhabib), for putting a face to AraBERT. # Contacts **Wissam Antoun**: [Linkedin](https://www.linkedin.com/in/wissam-antoun-622142b4/) | [Twitter](https://twitter.com/wissam_antoun) | [Github](https://github.com/WissamAntoun) | <wfa07@mail.aub.edu> | <wissam.antoun@gmail.com> **Fady Baly**: [Linkedin](https://www.linkedin.com/in/fadybaly/) | [Twitter](https://twitter.com/fadybaly) | [Github](https://github.com/fadybaly) | <fgb06@mail.aub.edu> | <baly.fady@gmail.com>
8,306
[ [ -0.052276611328125, -0.05267333984375, 0.02410888671875, 0.00913238525390625, -0.023193359375, -0.007572174072265625, -0.01146697998046875, -0.04437255859375, 0.0180816650390625, 0.021331787109375, -0.04302978515625, -0.049285888671875, -0.05755615234375, 0....
harshit345/xlsr-wav2vec-speech-emotion-recognition
2021-12-12T20:53:33.000Z
[ "transformers", "pytorch", "wav2vec2", "audio", "audio-classification", "speech", "en", "dataset:aesdd", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
audio-classification
harshit345
null
null
harshit345/xlsr-wav2vec-speech-emotion-recognition
52
2,005
transformers
2022-03-02T23:29:05
--- language: en datasets: - aesdd tags: - audio - audio-classification - speech license: apache-2.0 --- ~~~ # requirement packages !pip install git+https://github.com/huggingface/datasets.git !pip install git+https://github.com/huggingface/transformers.git !pip install torchaudio !pip install librosa ~~~ # prediction ~~~ import torch import torch.nn as nn import torch.nn.functional as F import torchaudio from transformers import AutoConfig, Wav2Vec2FeatureExtractor import librosa import IPython.display as ipd import numpy as np import pandas as pd ~~~ ~~~ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_name_or_path = "harshit345/xlsr-wav2vec-speech-emotion-recognition" config = AutoConfig.from_pretrained(model_name_or_path) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name_or_path) sampling_rate = feature_extractor.sampling_rate model = Wav2Vec2ForSpeechClassification.from_pretrained(model_name_or_path).to(device) ~~~ ~~~ def speech_file_to_array_fn(path, sampling_rate): speech_array, _sampling_rate = torchaudio.load(path) resampler = torchaudio.transforms.Resample(_sampling_rate) speech = resampler(speech_array).squeeze().numpy() return speech def predict(path, sampling_rate): speech = speech_file_to_array_fn(path, sampling_rate) inputs = feature_extractor(speech, sampling_rate=sampling_rate, return_tensors="pt", padding=True) inputs = {key: inputs[key].to(device) for key in inputs} with torch.no_grad(): logits = model(**inputs).logits scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0] outputs = [{"Emotion": config.id2label[i], "Score": f"{round(score * 100, 3):.1f}%"} for i, score in enumerate(scores)] return outputs ~~~ # prediction ~~~ # path for a sample path = '/data/jtes_v1.1/wav/f01/ang/f01_ang_01.wav' outputs = predict(path, sampling_rate) ~~~ ~~~ [{'Emotion': 'anger', 'Score': '78.3%'}, {'Emotion': 'disgust', 'Score': '11.7%'}, {'Emotion': 'fear', 'Score': '5.4%'}, {'Emotion': 'happiness', 'Score': '4.1%'}, {'Emotion': 'sadness', 'Score': '0.5%'}] ~~~ ## Evaluation The following tables summarize the scores obtained by model overall and per each class. | Emotions | precision | recall | f1-score | accuracy | |-----------|-----------|--------|----------|----------| | anger | 0.82 | 1.00 | 0.81 | | | disgust | 0.85 | 0.96 | 0.85 | | | fear | 0.78 | 0.88 | 0.80 | | | happiness | 0.84 | 0.71 | 0.78 | | | sadness | 0.86 | 1.00 | 0.79 | | | | | | Overall | 0.806 | ## Colab Notebook https://colab.research.google.com/drive/1aPPb_ZVS5dlFVZySly8Q80a44La1XjJu?usp=sharing
2,786
[ [ -0.040008544921875, -0.046173095703125, 0.005695343017578125, 0.029144287109375, -0.0015811920166015625, -0.0093231201171875, -0.022857666015625, -0.0299224853515625, 0.0269622802734375, 0.01068115234375, -0.046966552734375, -0.0479736328125, -0.0489501953125, ...
timm/efficientnet_lite0.ra_in1k
2023-04-27T21:12:19.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2110.00476", "arxiv:1905.11946", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/efficientnet_lite0.ra_in1k
0
2,004
timm
2022-12-12T23:58:23
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for efficientnet_lite0.ra_in1k A EfficientNet-Lite image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * RandAugment `RA` recipe. Inspired by and evolved from EfficientNet RandAugment recipes. Published as `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 4.7 - GMACs: 0.4 - Activations (M): 6.7 - Image size: 224 x 224 - **Papers:** - EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks: https://arxiv.org/abs/1905.11946 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('efficientnet_lite0.ra_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'efficientnet_lite0.ra_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 320, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'efficientnet_lite0.ra_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1280, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{tan2019efficientnet, title={Efficientnet: Rethinking model scaling for convolutional neural networks}, author={Tan, Mingxing and Le, Quoc}, booktitle={International conference on machine learning}, pages={6105--6114}, year={2019}, organization={PMLR} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
4,721
[ [ -0.02685546875, -0.0369873046875, -0.00585174560546875, 0.0008687973022460938, -0.01544189453125, -0.037353515625, -0.0250091552734375, -0.0264739990234375, 0.0229949951171875, 0.031707763671875, -0.031829833984375, -0.04083251953125, -0.0513916015625, -0.00...
timm/mobilenetv2_120d.ra_in1k
2023-04-27T21:14:23.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2110.00476", "arxiv:1801.04381", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/mobilenetv2_120d.ra_in1k
0
2,003
timm
2022-12-13T00:00:42
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for mobilenetv2_120d.ra_in1k A MobileNet-v2 image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * RandAugment `RA` recipe. Inspired by and evolved from EfficientNet RandAugment recipes. Published as `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 5.8 - GMACs: 0.7 - Activations (M): 12.0 - Image size: 224 x 224 - **Papers:** - MobileNetV2: Inverted Residuals and Linear Bottlenecks: https://arxiv.org/abs/1801.04381 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('mobilenetv2_120d.ra_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv2_120d.ra_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 24, 112, 112]) # torch.Size([1, 32, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 384, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv2_120d.ra_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1280, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{sandler2018mobilenetv2, title={Mobilenetv2: Inverted residuals and linear bottlenecks}, author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, pages={4510--4520}, year={2018} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
4,754
[ [ -0.0275421142578125, -0.0230255126953125, -0.01221466064453125, 0.0023899078369140625, -0.0265655517578125, -0.0263519287109375, -0.00649261474609375, -0.028717041015625, 0.0223236083984375, 0.035736083984375, -0.03167724609375, -0.04241943359375, -0.04663085937...
01-ai/Yi-6B
2023-11-06T14:40:13.000Z
[ "transformers", "pytorch", "safetensors", "Yi", "text-generation", "custom_code", "license:other", "region:us" ]
text-generation
01-ai
null
null
01-ai/Yi-6B
176
2,002
transformers
2023-11-01T11:14:36
--- license: other license_name: yi-license license_link: LICENSE --- <div align="center"> <img src="./Yi.svg" width="200px"> </div> ## Introduction The **Yi** series models are large language models trained from scratch by developers at [01.AI](https://01.ai/). The first public release contains two bilingual(English/Chinese) base models with the parameter sizes of 6B([`Yi-6B`](https://huggingface.co/01-ai/Yi-6B)) and 34B([`Yi-34B`](https://huggingface.co/01-ai/Yi-34B)). Both of them are trained with 4K sequence length and can be extended to 32K during inference time. The [`Yi-6B-200K`](https://huggingface.co/01-ai/Yi-6B-200K) and [`Yi-34B-200K`](https://huggingface.co/01-ai/Yi-34B-200K) are base model with 200K context length. ## News - 🎯 **2023/11/06**: The base model of [`Yi-6B-200K`](https://huggingface.co/01-ai/Yi-6B-200K) and [`Yi-34B-200K`](https://huggingface.co/01-ai/Yi-34B-200K) with 200K context length. - 🎯 **2023/11/02**: The base model of [`Yi-6B`](https://huggingface.co/01-ai/Yi-6B) and [`Yi-34B`](https://huggingface.co/01-ai/Yi-34B). ## Model Performance | Model | MMLU | CMMLU | C-Eval | GAOKAO | BBH | Common-sense Reasoning | Reading Comprehension | Math & Code | | :------------ | :------: | :------: | :------: | :------: | :------: | :--------------------: | :-------------------: | :---------: | | | 5-shot | 5-shot | 5-shot | 0-shot | 3-shot@1 | - | - | - | | LLaMA2-34B | 62.6 | - | - | - | 44.1 | 69.9 | 68.0 | 26.0 | | LLaMA2-70B | 68.9 | 53.3 | - | 49.8 | 51.2 | 71.9 | 69.4 | 36.8 | | Baichuan2-13B | 59.2 | 62.0 | 58.1 | 54.3 | 48.8 | 64.3 | 62.4 | 23.0 | | Qwen-14B | 66.3 | 71.0 | 72.1 | 62.5 | 53.4 | 73.3 | 72.5 | **39.8** | | Skywork-13B | 62.1 | 61.8 | 60.6 | 68.1 | 41.7 | 72.4 | 61.4 | 24.9 | | InternLM-20B | 62.1 | 59.0 | 58.8 | 45.5 | 52.5 | 78.3 | - | 30.4 | | Aquila-34B | 67.8 | 71.4 | 63.1 | - | - | - | - | - | | Falcon-180B | 70.4 | 58.0 | 57.8 | 59.0 | 54.0 | 77.3 | 68.8 | 34.0 | | Yi-6B | 63.2 | 75.5 | 72.0 | 72.2 | 42.8 | 72.3 | 68.7 | 19.8 | | Yi-6B-200K | 64.0 | 75.3 | 73.5 | 73.9 | 42.0 | 72.0 | 69.1 | 19.0 | | **Yi-34B** | **76.3** | **83.7** | 81.4 | 82.8 | **54.3** | **80.1** | 76.4 | 37.1 | | Yi-34B-200K | 76.1 | 83.6 | **81.9** | **83.4** | 52.7 | 79.7 | **76.6** | 36.3 | While benchmarking open-source models, we have observed a disparity between the results generated by our pipeline and those reported in public sources (e.g. OpenCompass). Upon conducting a more in-depth investigation of this difference, we have discovered that various models may employ different prompts, post-processing strategies, and sampling techniques, potentially resulting in significant variations in the outcomes. Our prompt and post-processing strategy remains consistent with the original benchmark, and greedy decoding is employed during evaluation without any post-processing for the generated content. For scores that were not reported by the original authors (including scores reported with different settings), we try to get results with our pipeline. To evaluate the model's capability extensively, we adopted the methodology outlined in Llama2. Specifically, we included PIQA, SIQA, HellaSwag, WinoGrande, ARC, OBQA, and CSQA to assess common sense reasoning. SquAD, QuAC, and BoolQ were incorporated to evaluate reading comprehension. CSQA was exclusively tested using a 7-shot setup, while all other tests were conducted with a 0-shot configuration. Additionally, we introduced GSM8K (8-shot@1), MATH (4-shot@1), HumanEval (0-shot@1), and MBPP (3-shot@1) under the category "Math & Code". Due to technical constraints, we did not test Falcon-180 on QuAC and OBQA; the score is derived by averaging the scores on the remaining tasks. Since the scores for these two tasks are generally lower than the average, we believe that Falcon-180B's performance was not underestimated. ## Usage Please visit our [github repository](https://github.com/01-ai/Yi) for general guidance on how to use this model. ## Disclaimer Although we use data compliance checking algorithms during the training process to ensure the compliance of the trained model to the best of our ability, due to the complexity of the data and the diversity of language model usage scenarios, we cannot guarantee that the model will generate correct and reasonable output in all scenarios. Please be aware that there is still a risk of the model producing problematic outputs. We will not be responsible for any risks and issues resulting from misuse, misguidance, illegal usage, and related misinformation, as well as any associated data security concerns. ## License The Yi series models are fully open for academic research and free commercial usage with permission via applications. All usage must adhere to the [Model License Agreement 2.0](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE). To apply for the official commercial license, please contact us ([yi@01.ai](mailto:yi@01.ai)).
5,823
[ [ -0.0367431640625, -0.044891357421875, 0.014617919921875, 0.0123443603515625, -0.0126800537109375, -0.00449371337890625, -0.00147247314453125, -0.041656494140625, 0.0110321044921875, 0.036956787109375, -0.052642822265625, -0.045318603515625, -0.04595947265625, ...
rinna/japanese-gpt2-xsmall
2023-06-06T02:42:01.000Z
[ "transformers", "pytorch", "tf", "safetensors", "gpt2", "text-generation", "ja", "japanese", "lm", "nlp", "dataset:cc100", "dataset:wikipedia", "license:mit", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
rinna
null
null
rinna/japanese-gpt2-xsmall
12
2,000
transformers
2022-03-02T23:29:05
--- language: ja thumbnail: https://github.com/rinnakk/japanese-gpt2/blob/master/rinna.png tags: - ja - japanese - gpt2 - text-generation - lm - nlp license: mit datasets: - cc100 - wikipedia widget: - text: "生命、宇宙、そして万物についての究極の疑問の答えは" --- # japanese-gpt2-xsmall ![rinna-icon](./rinna.png) This repository provides an extra-small-sized Japanese GPT-2 model. The model was trained using code from Github repository [rinnakk/japanese-pretrained-models](https://github.com/rinnakk/japanese-pretrained-models) by [rinna Co., Ltd.](https://corp.rinna.co.jp/) # How to use the model ~~~~ from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt2-xsmall", use_fast=False) tokenizer.do_lower_case = True # due to some bug of tokenizer config loading model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt2-xsmall") ~~~~ # Model architecture A 6-layer, 512-hidden-size transformer-based language model. # Training The model was trained on [Japanese CC-100](http://data.statmt.org/cc-100/ja.txt.xz) and [Japanese Wikipedia](https://dumps.wikimedia.org/other/cirrussearch) to optimize a traditional language modelling objective on 8\\*V100 GPUs for around 4 days. It reaches around 28 perplexity on a chosen validation set from CC-100. # Tokenization The model uses a [sentencepiece](https://github.com/google/sentencepiece)-based tokenizer, the vocabulary was trained on the Japanese Wikipedia using the official sentencepiece training script. # Licenese [The MIT license](https://opensource.org/licenses/MIT)
1,588
[ [ -0.027740478515625, -0.042938232421875, 0.0271148681640625, 0.0156402587890625, -0.03778076171875, -0.01026153564453125, -0.01525115966796875, -0.0178680419921875, -0.0013780593872070312, 0.032012939453125, -0.045684814453125, -0.01239013671875, -0.0452575683593...
maywell/Synatra-11B-Testbench-2
2023-10-16T01:21:09.000Z
[ "transformers", "pytorch", "mistral", "text-generation", "ko", "license:cc-by-nc-4.0", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
maywell
null
null
maywell/Synatra-11B-Testbench-2
0
2,000
transformers
2023-10-15T23:42:55
--- language: - ko library_name: transformers pipeline_tag: text-generation license: cc-by-nc-4.0 --- # **Synatra-11B-Testbench-2** Made by StableFluffy **Contact (Do not Contact for personal things.)** Discord : is.maywell Telegram : AlzarTakkarsen ## License This model is strictly [*non-commercial*](https://creativecommons.org/licenses/by-nc/4.0/) (**cc-by-nc-4.0**) use only which takes priority over the **MISTRAL APACHE 2.0**. The "Model" is completely free (ie. base model, derivates, merges/mixes) to use for non-commercial purposes as long as the the included **cc-by-nc-4.0** license in any parent repository, and the non-commercial use statute remains, regardless of other models' licences. The licence can be changed after new model released. If you are to use this model for commercial purpose, Contact me. ## Model Details **Base Model** [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) **Trained On** A100 80GB * 4 # **Model Benchmark** X ``` > Readme format: [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) ---
1,118
[ [ -0.029754638671875, -0.05279541015625, 0.004467010498046875, 0.032440185546875, -0.0523681640625, -0.0268402099609375, -0.0026645660400390625, -0.06085205078125, 0.0250396728515625, 0.0279998779296875, -0.058624267578125, -0.0303192138671875, -0.042938232421875,...
agonh/sdxl
2023-10-02T05:08:21.000Z
[ "diffusers", "onnx", "safetensors", "openvino", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "stable-diffusion", "text-to-image", "en", "license:openrail++", "endpoints_compatible", "has_space", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
agonh
null
null
agonh/sdxl
3
1,999
diffusers
2023-10-02T04:25:16
--- license: openrail++ base_model: diffusers/stable-diffusion-xl-base-1.0 tags: - stable-diffusion-xl - stable-diffusion-xl-diffusers - stable-diffusion - text-to-image - diffusers inference: true language: - en --- # SDXL-BASE - Model creator: [FFusion](https://huggingface.co/FFusion) - Original model: [FFusionXL-BASE](https://huggingface.co/FFusion/FFusionXL-BASE) ## Description This repo contains files for [FFusion's FFusionXL-BASE](https://huggingface.co/FFusion/FFusionXL-BASE).
498
[ [ -0.0302886962890625, -0.0281524658203125, 0.052581787109375, 0.053131103515625, -0.019134521484375, -0.0159759521484375, 0.0165863037109375, 0.00023746490478515625, -0.003391265869140625, 0.07025146484375, -0.05047607421875, -0.05413818359375, -0.05609130859375,...
NoCrypt/SomethingV2
2023-04-25T13:32:01.000Z
[ "diffusers", "stable-diffusion", "text-to-image", "safetensors", "en", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
NoCrypt
null
null
NoCrypt/SomethingV2
91
1,996
diffusers
2023-02-18T16:59:05
--- license: creativeml-openrail-m thumbnail: >- https://huggingface.co/NoCrypt/SomethingV2/resolve/main/imgs/00031-1769428138-masterpiece%2C%20best%20quality%2C%20hatsune%20miku%2C%201girl%2C%20white%20shirt%2C%20blue%20necktie%2C%20bare%20shoulders%2C%20very%20detailed%20background%2C%20hands%20on%20ow.png tags: - stable-diffusion - text-to-image - safetensors - diffusers inference: true language: - en widget: - text: >- masterpiece, best quality, 1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden example_title: example 1girl - text: >- masterpiece, best quality, 1boy, medium hair, blonde hair, blue eyes, bishounen, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden example_title: example 1boy library_name: diffusers --- ## Introducing SomethingV2.2, An updated version of this model, can be found [here](https://huggingface.co/NoCrypt/SomethingV2_2) --- [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/NoCrypt/SomethingV2) <center><img src="https://huggingface.co/NoCrypt/SomethingV2/resolve/main/imgs/banner.webp" width="95%"/></center> <center><h1><b>SomethingV2</b></h1></center> <p align="center">Welcome to SomethingV2 - an anime latent diffusion model. This model is intended to produce vibrant but soft anime style images. </p> ## Recommended Settings - VAE: None (Baked in model) - Clip Skip: 2 - Sampler: DPM++ 2M Karras - CFG Scale: 7 - 12 - Negative Prompt: [EasyNegative](https://huggingface.co/datasets/gsdf/EasyNegative) - For better results, using hires fix is a must. - Hires upscaler: Latent (any variant, such as nearest-exact) - Resolution: At least 512x512 first pass, upscale up to 1500x1500 ## Example <img style="display:inline;margin:0;padding:0;" src="https://huggingface.co/NoCrypt/SomethingV2/resolve/main/imgs/00090-1829045217-masterpiece%20best%20quality%20hatsune%20miku%201girl%20white%20shirt%20blue%20necktie%20bare%20shoulders%20very%20detailed%20background%20hands%20on%20ow2473e4832c888be11494dab007c390c19c5b2f7d.png" width="32%"/> <img style="display:inline;margin:0;padding:0;" src="https://huggingface.co/NoCrypt/SomethingV2/resolve/main/imgs/00022-1769428138-masterpiece%2C%20best%20quality%2C%20hatsune%20miku%2C%201girl%2C%20white%20shirt%2C%20blue%20necktie%2C%20bare%20shoulders%2C%20very%20detailed%20background%2C%20hands%20on%20ow.png" width="32%"/> <img style="display:inline;margin:0;padding:0;" src="https://huggingface.co/NoCrypt/SomethingV2/resolve/main/imgs/00098-3514023396-masterpiece%2C%20best%20quality%2C%20hatsune%20miku%2C%201girl%2C%20white%20shirt%2C%20blue%20necktie%2C%20bare%20shoulders%2C%20very%20detailed%20background%2C%20cafe%2C%20angry.png" width="32%"/> <details><summary><big><b>Prompts</b></big></summary> ```yaml masterpiece, best quality, hatsune miku, 1girl, white shirt, blue necktie, bare shoulders, very detailed background, hands on own cheeks, open mouth, one eye closed, clenched teeth, smile Negative prompt: EasyNegative, tattoo, (shoulder tattoo:1.0), (number tattoo:1.3), frills Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1829045217, Size: 456x592, Model: somethingv2_1, Denoising strength: 0.53, Clip skip: 2, ENSD: 31337, Hires upscale: 1.65, Hires steps: 12, Hires upscaler: Latent (nearest-exact), Discard penultimate sigma: True ``` ```yaml masterpiece, best quality, hatsune miku, 1girl, white shirt, blue necktie, bare shoulders, very detailed background, hands on own cheeks, open mouth, eyez closed, clenched teeth, smile, arms behind back, Negative prompt: EasyNegative, tattoo, (shoulder tattoo:1.0), (number tattoo:1.3), frills Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1769428138, Size: 456x592, Model: somethingv2_1, Denoising strength: 0.53, Clip skip: 2, ENSD: 31337, Hires upscale: 1.65, Hires steps: 12, Hires upscaler: Latent (nearest-exact), Discard penultimate sigma: True ``` ```yaml masterpiece, best quality, hatsune miku, 1girl, white shirt, blue necktie, bare shoulders, very detailed background, cafe, angry, crossed arms, detached sleeves, light particles, Negative prompt: EasyNegative, tattoo, (shoulder tattoo:1.0), (number tattoo:1.3), frills Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3514023396, Size: 456x592, Model: somethingv2_1, Denoising strength: 0.53, Clip skip: 2, ENSD: 31337, Hires upscale: 1.65, Hires steps: 12, Hires upscaler: Latent (nearest-exact), Discard penultimate sigma: True ``` </details> ## FAQ ### Model differences? ![](https://huggingface.co/NoCrypt/SomethingV2/resolve/main/imgs/xyz_grid-0003-4163886333-masterpiece%2C%20hatsune%20miku%2C%20white%20shirt%2C%20blue%20necktie%2C%20bare%20shoulders%2C%20detached%20sleeves%2C.png) <details><summary><big><b>Prompts</b></big></summary> ```yaml masterpiece, hatsune miku, white shirt, blue necktie, bare shoulders, detached sleeves, Negative prompt: EasyNegative Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 4163886333, Size: 440x592, Model: -, Denoising strength: 0.53, Clip skip: 2, ENSD: 31337, Hires upscale: 1.65, Hires steps: 13, Hires upscaler: Latent (nearest-exact) ``` </details> ### Why all examples is miku? Because I love miku. But here's other subjects <img style="display:inline;margin:0;padding:0;" src="https://huggingface.co/NoCrypt/SomethingV2/resolve/main/imgs/00018-4018636341-masterpiece%2C%20best%20quality%2C%201girl%2C%20aqua%20eyes%2C%20baseball%20cap%2C%20blonde%20hair%2C%20closed%20mouth%2C%20earrings%2C%20green%20background%2C%20hat%2C%20hoop%20earr.png" width="49%"/> <img style="display:inline;margin:0;padding:0;" src="https://huggingface.co/NoCrypt/SomethingV2/resolve/main/imgs/00019-1334620477-masterpiece%2C%20best%20quality%2C%20landscape.png" width="49%"/> <details><summary><big><b>Prompts</b></big></summary> ```yaml masterpiece, best quality, 1girl, aqua eyes, baseball cap, blonde hair, closed mouth, earrings, green background, hat, hoop earrings, jewelry, looking at viewer, shirt, short hair, simple background, solo, upper body, yellow shirt Negative prompt: EasyNegative Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 4018636341, Size: 440x592, Model: somethingv2, Denoising strength: 0.53, Clip skip: 2, ENSD: 31337, Hires upscale: 1.65, Hires steps: 13, Hires upscaler: Latent (nearest-exact) ``` ```yaml masterpiece, best quality, landscape Negative prompt: EasyNegative Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1334620477, Size: 440x592, Model: somethingv2, Denoising strength: 0.53, Clip skip: 2, ENSD: 31337, Hires upscale: 1.65, Hires steps: 13, Hires upscaler: Latent (nearest-exact) ``` </details>
6,948
[ [ -0.05859375, -0.06048583984375, 0.01456451416015625, 0.0117340087890625, -0.038299560546875, -0.00885772705078125, 0.0025634765625, -0.038055419921875, 0.054962158203125, 0.037261962890625, -0.06500244140625, -0.039703369140625, -0.048065185546875, 0.0093307...
badmonk/aoikax
2023-07-21T06:42:29.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
badmonk
null
null
badmonk/aoikax
1
1,995
diffusers
2023-07-16T08:54:14
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- # Model Card for AOIKAX ## Model Description - **Developed by:** BADMONK - **Model type:** Dreambooth Model + Extracted LoRA - **Language(s) (NLP):** EN - **License:** Creativeml-Openrail-M - **Parent Model:** aznchikmajicmix # How to Get Started with the Model Use the code below to get started with the model. ### AOIKAX ###
433
[ [ -0.018524169921875, -0.03375244140625, 0.01419830322265625, 0.00926971435546875, -0.07171630859375, -0.00736236572265625, 0.042266845703125, -0.027923583984375, 0.04107666015625, 0.058837890625, -0.049285888671875, -0.055511474609375, -0.045989990234375, -0....
timm/convnext_tiny.in12k_ft_in1k_384
2023-03-31T22:39:35.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "dataset:imagenet-12k", "arxiv:2201.03545", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/convnext_tiny.in12k_ft_in1k_384
0
1,994
timm
2023-01-18T20:12:06
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-12k --- # Model card for convnext_tiny.in12k_ft_in1k_384 A ConvNeXt image classification model. Pretrained in `timm` on ImageNet-12k (a 11821 class subset of full ImageNet-22k) and fine-tuned on ImageNet-1k by Ross Wightman. ImageNet-12k training done on TPUs thanks to support of the [TRC](https://sites.research.google/trc/about/) program. Fine-tuning performed on 8x GPU [Lambda Labs](https://lambdalabs.com/) cloud instances. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 28.6 - GMACs: 13.1 - Activations (M): 39.5 - Image size: 384 x 384 - **Papers:** - A ConvNet for the 2020s: https://arxiv.org/abs/2201.03545 - **Original:** https://github.com/huggingface/pytorch-image-models - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-12k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('convnext_tiny.in12k_ft_in1k_384', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'convnext_tiny.in12k_ft_in1k_384', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 96, 96, 96]) # torch.Size([1, 192, 48, 48]) # torch.Size([1, 384, 24, 24]) # torch.Size([1, 768, 12, 12]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'convnext_tiny.in12k_ft_in1k_384', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 768, 12, 12) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). All timing numbers from eager model PyTorch 1.13 on RTX 3090 w/ AMP. | model |top1 |top5 |img_size|param_count|gmacs |macts |samples_per_sec|batch_size| |------------------------------------------------------------------------------------------------------------------------------|------|------|--------|-----------|------|------|---------------|----------| | [convnextv2_huge.fcmae_ft_in22k_in1k_512](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in22k_in1k_512) |88.848|98.742|512 |660.29 |600.81|413.07|28.58 |48 | | [convnextv2_huge.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in22k_in1k_384) |88.668|98.738|384 |660.29 |337.96|232.35|50.56 |64 | | [convnext_xxlarge.clip_laion2b_soup_ft_in1k](https://huggingface.co/timm/convnext_xxlarge.clip_laion2b_soup_ft_in1k) |88.612|98.704|256 |846.47 |198.09|124.45|122.45 |256 | | [convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384) |88.312|98.578|384 |200.13 |101.11|126.74|196.84 |256 | | [convnextv2_large.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in22k_in1k_384) |88.196|98.532|384 |197.96 |101.1 |126.74|128.94 |128 | | [convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320) |87.968|98.47 |320 |200.13 |70.21 |88.02 |283.42 |256 | | [convnext_xlarge.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_xlarge.fb_in22k_ft_in1k_384) |87.75 |98.556|384 |350.2 |179.2 |168.99|124.85 |192 | | [convnextv2_base.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in22k_in1k_384) |87.646|98.422|384 |88.72 |45.21 |84.49 |209.51 |256 | | [convnext_large.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_large.fb_in22k_ft_in1k_384) |87.476|98.382|384 |197.77 |101.1 |126.74|194.66 |256 | | [convnext_large_mlp.clip_laion2b_augreg_ft_in1k](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_augreg_ft_in1k) |87.344|98.218|256 |200.13 |44.94 |56.33 |438.08 |256 | | [convnextv2_large.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in22k_in1k) |87.26 |98.248|224 |197.96 |34.4 |43.13 |376.84 |256 | | [convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384) |87.138|98.212|384 |88.59 |45.21 |84.49 |365.47 |256 | | [convnext_xlarge.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_xlarge.fb_in22k_ft_in1k) |87.002|98.208|224 |350.2 |60.98 |57.5 |368.01 |256 | | [convnext_base.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_base.fb_in22k_ft_in1k_384) |86.796|98.264|384 |88.59 |45.21 |84.49 |366.54 |256 | | [convnextv2_base.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in22k_in1k) |86.74 |98.022|224 |88.72 |15.38 |28.75 |624.23 |256 | | [convnext_large.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_large.fb_in22k_ft_in1k) |86.636|98.028|224 |197.77 |34.4 |43.13 |581.43 |256 | | [convnext_base.clip_laiona_augreg_ft_in1k_384](https://huggingface.co/timm/convnext_base.clip_laiona_augreg_ft_in1k_384) |86.504|97.97 |384 |88.59 |45.21 |84.49 |368.14 |256 | | [convnext_base.clip_laion2b_augreg_ft_in12k_in1k](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k) |86.344|97.97 |256 |88.59 |20.09 |37.55 |816.14 |256 | | [convnextv2_huge.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in1k) |86.256|97.75 |224 |660.29 |115.0 |79.07 |154.72 |256 | | [convnext_small.in12k_ft_in1k_384](https://huggingface.co/timm/convnext_small.in12k_ft_in1k_384) |86.182|97.92 |384 |50.22 |25.58 |63.37 |516.19 |256 | | [convnext_base.clip_laion2b_augreg_ft_in1k](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in1k) |86.154|97.68 |256 |88.59 |20.09 |37.55 |819.86 |256 | | [convnext_base.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_base.fb_in22k_ft_in1k) |85.822|97.866|224 |88.59 |15.38 |28.75 |1037.66 |256 | | [convnext_small.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_small.fb_in22k_ft_in1k_384) |85.778|97.886|384 |50.22 |25.58 |63.37 |518.95 |256 | | [convnextv2_large.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in1k) |85.742|97.584|224 |197.96 |34.4 |43.13 |375.23 |256 | | [convnext_small.in12k_ft_in1k](https://huggingface.co/timm/convnext_small.in12k_ft_in1k) |85.174|97.506|224 |50.22 |8.71 |21.56 |1474.31 |256 | | [convnext_tiny.in12k_ft_in1k_384](https://huggingface.co/timm/convnext_tiny.in12k_ft_in1k_384) |85.118|97.608|384 |28.59 |13.14 |39.48 |856.76 |256 | | [convnextv2_tiny.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in22k_in1k_384) |85.112|97.63 |384 |28.64 |13.14 |39.48 |491.32 |256 | | [convnextv2_base.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in1k) |84.874|97.09 |224 |88.72 |15.38 |28.75 |625.33 |256 | | [convnext_small.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_small.fb_in22k_ft_in1k) |84.562|97.394|224 |50.22 |8.71 |21.56 |1478.29 |256 | | [convnext_large.fb_in1k](https://huggingface.co/timm/convnext_large.fb_in1k) |84.282|96.892|224 |197.77 |34.4 |43.13 |584.28 |256 | | [convnext_tiny.in12k_ft_in1k](https://huggingface.co/timm/convnext_tiny.in12k_ft_in1k) |84.186|97.124|224 |28.59 |4.47 |13.44 |2433.7 |256 | | [convnext_tiny.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_tiny.fb_in22k_ft_in1k_384) |84.084|97.14 |384 |28.59 |13.14 |39.48 |862.95 |256 | | [convnextv2_tiny.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in22k_in1k) |83.894|96.964|224 |28.64 |4.47 |13.44 |1452.72 |256 | | [convnext_base.fb_in1k](https://huggingface.co/timm/convnext_base.fb_in1k) |83.82 |96.746|224 |88.59 |15.38 |28.75 |1054.0 |256 | | [convnextv2_nano.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in22k_in1k_384) |83.37 |96.742|384 |15.62 |7.22 |24.61 |801.72 |256 | | [convnext_small.fb_in1k](https://huggingface.co/timm/convnext_small.fb_in1k) |83.142|96.434|224 |50.22 |8.71 |21.56 |1464.0 |256 | | [convnextv2_tiny.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in1k) |82.92 |96.284|224 |28.64 |4.47 |13.44 |1425.62 |256 | | [convnext_tiny.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_tiny.fb_in22k_ft_in1k) |82.898|96.616|224 |28.59 |4.47 |13.44 |2480.88 |256 | | [convnext_nano.in12k_ft_in1k](https://huggingface.co/timm/convnext_nano.in12k_ft_in1k) |82.282|96.344|224 |15.59 |2.46 |8.37 |3926.52 |256 | | [convnext_tiny_hnf.a2h_in1k](https://huggingface.co/timm/convnext_tiny_hnf.a2h_in1k) |82.216|95.852|224 |28.59 |4.47 |13.44 |2529.75 |256 | | [convnext_tiny.fb_in1k](https://huggingface.co/timm/convnext_tiny.fb_in1k) |82.066|95.854|224 |28.59 |4.47 |13.44 |2346.26 |256 | | [convnextv2_nano.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in22k_in1k) |82.03 |96.166|224 |15.62 |2.46 |8.37 |2300.18 |256 | | [convnextv2_nano.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in1k) |81.83 |95.738|224 |15.62 |2.46 |8.37 |2321.48 |256 | | [convnext_nano_ols.d1h_in1k](https://huggingface.co/timm/convnext_nano_ols.d1h_in1k) |80.866|95.246|224 |15.65 |2.65 |9.38 |3523.85 |256 | | [convnext_nano.d1h_in1k](https://huggingface.co/timm/convnext_nano.d1h_in1k) |80.768|95.334|224 |15.59 |2.46 |8.37 |3915.58 |256 | | [convnextv2_pico.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_pico.fcmae_ft_in1k) |80.304|95.072|224 |9.07 |1.37 |6.1 |3274.57 |256 | | [convnext_pico.d1_in1k](https://huggingface.co/timm/convnext_pico.d1_in1k) |79.526|94.558|224 |9.05 |1.37 |6.1 |5686.88 |256 | | [convnext_pico_ols.d1_in1k](https://huggingface.co/timm/convnext_pico_ols.d1_in1k) |79.522|94.692|224 |9.06 |1.43 |6.5 |5422.46 |256 | | [convnextv2_femto.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_femto.fcmae_ft_in1k) |78.488|93.98 |224 |5.23 |0.79 |4.57 |4264.2 |256 | | [convnext_femto_ols.d1_in1k](https://huggingface.co/timm/convnext_femto_ols.d1_in1k) |77.86 |93.83 |224 |5.23 |0.82 |4.87 |6910.6 |256 | | [convnext_femto.d1_in1k](https://huggingface.co/timm/convnext_femto.d1_in1k) |77.454|93.68 |224 |5.22 |0.79 |4.57 |7189.92 |256 | | [convnextv2_atto.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_atto.fcmae_ft_in1k) |76.664|93.044|224 |3.71 |0.55 |3.81 |4728.91 |256 | | [convnext_atto_ols.a2_in1k](https://huggingface.co/timm/convnext_atto_ols.a2_in1k) |75.88 |92.846|224 |3.7 |0.58 |4.11 |7963.16 |256 | | [convnext_atto.d2_in1k](https://huggingface.co/timm/convnext_atto.d2_in1k) |75.664|92.9 |224 |3.7 |0.55 |3.81 |8439.22 |256 | ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{liu2022convnet, author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, title = {A ConvNet for the 2020s}, journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2022}, } ```
15,985
[ [ -0.06732177734375, -0.03271484375, -0.0022068023681640625, 0.03399658203125, -0.0318603515625, -0.01513671875, -0.013092041015625, -0.0364990234375, 0.063232421875, 0.0166473388671875, -0.044403076171875, -0.04132080078125, -0.051239013671875, -0.00303459167...
timm/maxxvit_rmlp_nano_rw_256.sw_in1k
2023-05-11T00:45:50.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2204.01697", "arxiv:2201.03545", "arxiv:2111.09883", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/maxxvit_rmlp_nano_rw_256.sw_in1k
0
1,993
timm
2023-01-20T21:36:34
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for maxxvit_rmlp_nano_rw_256.sw_in1k A timm specific MaxxViT (w/ a MLP Log-CPB (continuous log-coordinate relative position bias motivated by Swin-V2) image classification model. Trained in `timm` on ImageNet-1k by Ross Wightman. ImageNet-1k training done on TPUs thanks to support of the [TRC](https://sites.research.google/trc/about/) program. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 16.8 - GMACs: 4.4 - Activations (M): 26.0 - Image size: 256 x 256 - **Papers:** - MaxViT: Multi-Axis Vision Transformer: https://arxiv.org/abs/2204.01697 - A ConvNet for the 2020s: https://arxiv.org/abs/2201.03545 - Swin Transformer V2: Scaling Up Capacity and Resolution: https://arxiv.org/abs/2111.09883 - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('maxxvit_rmlp_nano_rw_256.sw_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxxvit_rmlp_nano_rw_256.sw_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 128, 128]) # torch.Size([1, 64, 64, 64]) # torch.Size([1, 128, 32, 32]) # torch.Size([1, 256, 16, 16]) # torch.Size([1, 512, 8, 8]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxxvit_rmlp_nano_rw_256.sw_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 8, 8) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
22,383
[ [ -0.052947998046875, -0.03173828125, 0.0024700164794921875, 0.0283355712890625, -0.023345947265625, -0.0183868408203125, -0.01041412353515625, -0.0241546630859375, 0.0501708984375, 0.0164031982421875, -0.042083740234375, -0.04736328125, -0.04681396484375, -0....
Yntec/Trending
2023-10-15T10:17:02.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "Ciro_Negrogni", "MagicArt35", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
Yntec
null
null
Yntec/Trending
2
1,993
diffusers
2023-09-17T03:50:21
--- license: creativeml-openrail-m library_name: diffusers pipeline_tag: text-to-image tags: - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image - Ciro_Negrogni - MagicArt35 --- # Trending AmovieX by MagicArt35 with the Photographic Trend LoRA by Ciro_Negrogni baked in. Third version of three with Photographic Trend's compositions. First version: https://huggingface.co/Yntec/aPhotographicTrend Second version with AmovieX's compositions: https://huggingface.co/Yntec/aMovieTrend Sample and prompts: ![Sample](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/Kd2mBcYGk3MQBX1f48uOd.png) Pretty Cute Girl Photorealistic, highly detailed, masterpiece, trending on ArtStation, sitting, Detailed Chibi Eyes, fantasy, beautiful detailed legs, streetwear, gorgeous detailed hair, hat, Magazine ad, iconic, 1943, from the movie, sharp focus. ![Sample](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/k2czKN2rOTVdeKtzhehdt.png) Cartoon CUTE LITTLE baby, CHIBI, gorgeous detailed hair, looking, cute socks, holding pillow, skirt, Magazine ad, iconic, 1940, sharp focus. pencil art ​By KlaysMoji and Clay Mann and and leyendecker and Dave Rapoza. Original pages: https://civitai.com/models/98543 (Photographic Trend) https://civitai.com/models/94687/photo-movie-x (AmovieX) # Recipe - Merge Photographic Trend LoRA to checkpoint 1.0 Model A: AmovieX OutPut: PhotographicTrendAmovieX - SuperMerger Weight sum Train Difference use MBW 0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1 Model A: PhotographicTrendAmovieX Model B: AmovieX OutPut: Trending
1,654
[ [ -0.019927978515625, -0.04730224609375, 0.00927734375, 0.033905029296875, -0.023956298828125, -0.00044846534729003906, 0.032318115234375, -0.0380859375, 0.0869140625, 0.038482666015625, -0.0589599609375, -0.049957275390625, -0.04132080078125, -0.0211639404296...
malteos/PubMedNCL
2023-10-12T09:52:20.000Z
[ "transformers", "pytorch", "safetensors", "bert", "feature-extraction", "en", "arxiv:2202.06671", "arxiv:2007.15779", "license:mit", "endpoints_compatible", "region:us" ]
feature-extraction
malteos
null
null
malteos/PubMedNCL
2
1,989
transformers
2023-04-15T07:56:21
--- license: mit tags: - feature-extraction language: en --- # PubMedNCL A pretrained language model for document representations of biomedical papers. PubMedNCL is based on [PubMedBERT](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext), which is a BERT model pretrained on abstracts and full-texts from PubMedCentral, and fine-tuned via citation neighborhood contrastive learning, as introduced by [SciNCL](https://huggingface.co/malteos/scincl). ## How to use the pretrained model ```python from transformers import AutoTokenizer, AutoModel # load model and tokenizer tokenizer = AutoTokenizer.from_pretrained('malteos/PubMedNCL') model = AutoModel.from_pretrained('malteos/PubMedNCL') papers = [{'title': 'BERT', 'abstract': 'We introduce a new language representation model called BERT'}, {'title': 'Attention is all you need', 'abstract': ' The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'}] # concatenate title and abstract with [SEP] token title_abs = [d['title'] + tokenizer.sep_token + (d.get('abstract') or '') for d in papers] # preprocess the input inputs = tokenizer(title_abs, padding=True, truncation=True, return_tensors="pt", max_length=512) # inference result = model(**inputs) # take the first token ([CLS] token) in the batch as the embedding embeddings = result.last_hidden_state[:, 0, :] ``` ## Citation - [Neighborhood Contrastive Learning for Scientific Document Representations with Citation Embeddings (EMNLP 2022 paper)](https://arxiv.org/abs/2202.06671). - [Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing](https://arxiv.org/abs/2007.15779). ## License MIT
1,745
[ [ -0.0018110275268554688, -0.03277587890625, 0.04656982421875, 0.01654052734375, -0.037109375, 0.0124053955078125, -0.004802703857421875, -0.01386260986328125, 0.02130126953125, 0.01387786865234375, -0.0211181640625, -0.051971435546875, -0.0618896484375, 0.022...
espnet/kan-bayashi_ljspeech_vits
2021-10-23T20:27:43.000Z
[ "espnet", "audio", "text-to-speech", "en", "dataset:ljspeech", "arxiv:1804.00015", "license:cc-by-4.0", "has_space", "region:us" ]
text-to-speech
espnet
null
null
espnet/kan-bayashi_ljspeech_vits
170
1,988
espnet
2022-03-02T23:29:05
--- tags: - espnet - audio - text-to-speech language: en datasets: - ljspeech license: cc-by-4.0 --- ## ESPnet2 TTS pretrained model ### `kan-bayashi/ljspeech_vits` ♻️ Imported from https://zenodo.org/record/5443814/ This model was trained by kan-bayashi using ljspeech/tts1 recipe in [espnet](https://github.com/espnet/espnet/). ### Demo: How to use in ESPnet2 ```python # coming soon ``` ### Citing ESPnet ```BibTex @inproceedings{watanabe2018espnet, author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson {Enrique Yalta Soplin} and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, title={{ESPnet}: End-to-End Speech Processing Toolkit}, year={2018}, booktitle={Proceedings of Interspeech}, pages={2207--2211}, doi={10.21437/Interspeech.2018-1456}, url={http://dx.doi.org/10.21437/Interspeech.2018-1456} } @inproceedings{hayashi2020espnet, title={{Espnet-TTS}: Unified, reproducible, and integratable open source end-to-end text-to-speech toolkit}, author={Hayashi, Tomoki and Yamamoto, Ryuichi and Inoue, Katsuki and Yoshimura, Takenori and Watanabe, Shinji and Toda, Tomoki and Takeda, Kazuya and Zhang, Yu and Tan, Xu}, booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, pages={7654--7658}, year={2020}, organization={IEEE} } ``` or arXiv: ```bibtex @misc{watanabe2018espnet, title={ESPnet: End-to-End Speech Processing Toolkit}, author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Enrique Yalta Soplin and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, year={2018}, eprint={1804.00015}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
1,901
[ [ -0.0291290283203125, -0.03448486328125, 0.00437164306640625, 0.0163421630859375, -0.0288543701171875, -0.00011420249938964844, -0.0277252197265625, -0.04443359375, 0.0164794921875, 0.00891876220703125, -0.049224853515625, -0.032196044921875, -0.045562744140625, ...
badmonk/egross
2023-07-13T02:22:49.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
badmonk
null
null
badmonk/egross
1
1,987
diffusers
2023-07-12T03:46:47
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- # Model Card for EGROSS ## Model Description - **Developed by:** BADMONK - **Model type:** Dreambooth Model + Extracted LoRA - **Language(s) (NLP):** EN - **License:** Creativeml-Openrail-M - **Parent Model:** beensad # How to Get Started with the Model Use the code below to get started with the model. ### EGROSS ###
419
[ [ -0.0220794677734375, -0.037353515625, 0.01119232177734375, 0.0005645751953125, -0.076171875, -0.00004661083221435547, 0.0322265625, -0.03375244140625, 0.04608154296875, 0.042816162109375, -0.040374755859375, -0.05194091796875, -0.040435791015625, -0.03161621...
timm/densenet169.tv_in1k
2023-04-21T22:54:43.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:1608.06993", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/densenet169.tv_in1k
0
1,986
timm
2023-04-21T22:54:20
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for densenet169.tv_in1k A DenseNet image classification model. Trained on ImageNet-1k (original torchvision weights). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 14.1 - GMACs: 3.4 - Activations (M): 7.3 - Image size: 224 x 224 - **Papers:** - Densely Connected Convolutional Networks: https://arxiv.org/abs/1608.06993 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('densenet169.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet169.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1280, 14, 14]) # torch.Size([1, 1664, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet169.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1664, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{huang2017densely, title={Densely Connected Convolutional Networks}, author={Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q }, booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, year={2017} } ```
3,500
[ [ -0.0350341796875, -0.03729248046875, -0.0009918212890625, 0.0113372802734375, -0.0285491943359375, -0.0318603515625, -0.0260467529296875, -0.0228118896484375, 0.0221710205078125, 0.040374755859375, -0.02642822265625, -0.0489501953125, -0.054168701171875, -0....
Qwen/Qwen-VL
2023-10-31T12:45:43.000Z
[ "transformers", "pytorch", "qwen", "text-generation", "custom_code", "zh", "en", "arxiv:2308.12966", "has_space", "region:us" ]
text-generation
Qwen
null
null
Qwen/Qwen-VL
54
1,984
transformers
2023-08-18T02:20:59
--- language: - zh - en tags: - qwen pipeline_tag: text-generation inference: false --- # Qwen-VL <br> <p align="center"> <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_vl.jpg" width="400"/> <p> <br> <p align="center"> Qwen-VL <a href="https://modelscope.cn/models/qwen/Qwen-VL/summary">🤖 <a> | <a href="https://huggingface.co/Qwen/Qwen-VL">🤗</a>&nbsp | Qwen-VL-Chat <a href="https://modelscope.cn/models/qwen/Qwen-VL-Chat/summary">🤖 <a>| <a href="https://huggingface.co/Qwen/Qwen-VL-Chat">🤗</a>&nbsp | Qwen-VL-Chat-Int4 <a href="https://huggingface.co/Qwen/Qwen-VL-Chat-Int4">🤗</a> <br> <a href="assets/wechat.png">WeChat</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://discord.gg/z3GAxXZ9Ce">Discord</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://modelscope.cn/studios/qwen/Qwen-VL-Chat-Demo/summary">Demo</a>&nbsp | &nbsp<a href="https://arxiv.org/abs/2308.12966">Report</a> </p> <br> **Qwen-VL** 是阿里云研发的大规模视觉语言模型(Large Vision Language Model, LVLM)。Qwen-VL 可以以图像、文本、检测框作为输入,并以文本和检测框作为输出。Qwen-VL 系列模型性能强大,具备多语言对话、多图交错对话等能力,并支持中文开放域定位和细粒度图像识别与理解。 **Qwen-VL** (Qwen Large Vision Language Model) is the visual multimodal version of the large model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-VL accepts image, text, and bounding box as inputs, outputs text and bounding box. The features of Qwen-VL include: 目前,我们提供了Qwen-VL和Qwen-VL-Chat两个模型,分别为预训练模型和Chat模型。如果想了解更多关于模型的信息,请点击[链接](https://github.com/QwenLM/Qwen-VL/blob/master/visual_memo.md)查看我们的技术备忘录。本仓库为Qwen-VL-Chat仓库。 We release Qwen-VL and Qwen-VL-Chat, which are pretrained model and Chat model respectively. For more details about Qwen-VL, please refer to our [technical memo](https://github.com/QwenLM/Qwen-VL/blob/master/visual_memo.md). This repo is the one for Qwen-VL. <br> ## 安装要求 (Requirements) * python 3.8及以上版本 * pytorch 1.12及以上版本,推荐2.0及以上版本 * 建议使用CUDA 11.4及以上(GPU用户需考虑此选项) * python 3.8 and above * pytorch 1.12 and above, 2.0 and above are recommended * CUDA 11.4 and above are recommended (this is for GPU users) <br> ## 快速开始 (Quickstart) 我们提供简单的示例来说明如何利用 🤗 Transformers 快速使用 Qwen-VL。 在开始前,请确保你已经配置好环境并安装好相关的代码包。最重要的是,确保你满足上述要求,然后安装相关的依赖库。 Below, we provide simple examples to show how to use Qwen-VL with 🤗 Transformers. Before running the code, make sure you have setup the environment and installed the required packages. Make sure you meet the above requirements, and then install the dependent libraries. ```bash pip install -r requirements.txt ``` 接下来你可以开始使用Transformers来使用我们的模型。关于视觉模块的更多用法,请参考[教程](TUTORIAL.md)。 Now you can start with Transformers. More usage aboue vision encoder, please refer to [tutorial](TUTORIAL_zh.md). #### 🤗 Transformers To use Qwen-VL for the inference, all you need to do is to input a few lines of codes as demonstrated below. However, **please make sure that you are using the latest code.** ```python from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig import torch torch.manual_seed(1234) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-VL", trust_remote_code=True) # use bf16 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL", device_map="auto", trust_remote_code=True, bf16=True).eval() # use fp16 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL", device_map="auto", trust_remote_code=True, fp16=True).eval() # use cpu only # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL", device_map="cpu", trust_remote_code=True).eval() # use cuda device model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL", device_map="cuda", trust_remote_code=True).eval() # Specify hyperparameters for generation (No need to do this if you are using transformers>=4.32.0) # model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-VL", trust_remote_code=True) query = tokenizer.from_list_format([ {'image': 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'}, {'text': 'Generate the caption in English with grounding:'}, ]) inputs = tokenizer(query, return_tensors='pt') inputs = inputs.to(model.device) pred = model.generate(**inputs) response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False) print(response) # <img>https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg</img>Generate the caption in English with grounding:<ref> Woman</ref><box>(451,379),(731,806)</box> and<ref> her dog</ref><box>(219,424),(576,896)</box> playing on the beach<|endoftext|> image = tokenizer.draw_bbox_on_latest_picture(response) if image: image.save('2.jpg') else: print("no box") ``` <p align="center"> <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo_spotting_caption.jpg" width="500"/> <p> <br> ## 评测 我们从两个角度评测了两个模型的能力: 1. 在**英文标准 Benchmark** 上评测模型的基础任务能力。目前评测了四大类多模态任务: - Zero-shot Caption: 评测模型在未见过数据集上的零样本图片描述能力; - General VQA: 评测模型的通用问答能力,例如判断题、颜色、个数、类目等问答能力; - Text-based VQA:评测模型对于图片中文字相关的识别/问答能力,例如文档问答、图表问答、文字问答等; - Referring Expression Compression:评测模型给定物体描述画检测框的能力; 2. **试金石 (TouchStone)**:为了评测模型整体的图文对话能力和人类对齐水平。我们为此构建了一个基于 GPT4 打分来评测 LVLM 模型的 Benchmark:TouchStone。在 TouchStone-v0.1 中: - 评测基准总计涵盖 300+张图片、800+道题目、27个类别。包括基础属性问答、人物地标问答、影视作品问答、视觉推理、反事实推理、诗歌创作、故事写作,商品比较、图片解题等**尽可能广泛的类别**。 - 为了弥补目前 GPT4 无法直接读取图片的缺陷,我们给所有的带评测图片提供了**人工标注的充分详细描述**,并且将图片的详细描述、问题和模型的输出结果一起交给 GPT4 打分。 - 评测同时包含英文版本和中文版本。 评测结果如下: We evaluated the model's ability from two perspectives: 1. **Standard Benchmarks**: We evaluate the model's basic task capabilities on four major categories of multimodal tasks: - Zero-shot Caption: Evaluate model's zero-shot image captioning ability on unseen datasets; - General VQA: Evaluate the general question-answering ability of pictures, such as the judgment, color, number, category, etc; - Text-based VQA: Evaluate the model's ability to recognize text in pictures, such as document QA, chart QA, etc; - Referring Expression Comprehension: Evaluate the ability to localize a target object in an image described by a referring expression. 2. **TouchStone**: To evaluate the overall text-image dialogue capability and alignment level with humans, we have constructed a benchmark called TouchStone, which is based on scoring with GPT4 to evaluate the LVLM model. - The TouchStone benchmark covers a total of 300+ images, 800+ questions, and 27 categories. Such as attribute-based Q&A, celebrity recognition, writing poetry, summarizing multiple images, product comparison, math problem solving, etc; - In order to break the current limitation of GPT4 in terms of direct image input, TouchStone provides fine-grained image annotations by human labeling. These detailed annotations, along with the questions and the model's output, are then presented to GPT4 for scoring. - The benchmark includes both English and Chinese versions. The results of the evaluation are as follows: Qwen-VL outperforms current SOTA generalist models on multiple VL tasks and has a more comprehensive coverage in terms of capability range. <p align="center"> <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/radar.png" width="600"/> <p> ### 零样本图像描述 & 通用视觉问答 (Zero-shot Captioning & General VQA) <table> <thead> <tr> <th rowspan="2">Model type</th> <th rowspan="2">Model</th> <th colspan="2">Zero-shot Captioning</th> <th colspan="5">General VQA</th> </tr> <tr> <th>NoCaps</th> <th>Flickr30K</th> <th>VQAv2<sup>dev</sup></th> <th>OK-VQA</th> <th>GQA</th> <th>SciQA-Img<br>(0-shot)</th> <th>VizWiz<br>(0-shot)</th> </tr> </thead> <tbody align="center"> <tr> <td rowspan="10">Generalist<br>Models</td> <td>Flamingo-9B</td> <td>-</td> <td>61.5</td> <td>51.8</td> <td>44.7</td> <td>-</td> <td>-</td> <td>28.8</td> </tr> <tr> <td>Flamingo-80B</td> <td>-</td> <td>67.2</td> <td>56.3</td> <td>50.6</td> <td>-</td> <td>-</td> <td>31.6</td> </tr> <tr> <td>Unified-IO-XL</td> <td>100.0</td> <td>-</td> <td>77.9</td> <td>54.0</td> <td>-</td> <td>-</td> <td>-</td> </tr> <tr> <td>Kosmos-1</td> <td>-</td> <td>67.1</td> <td>51.0</td> <td>-</td> <td>-</td> <td>-</td> <td>29.2</td> </tr> <tr> <td>Kosmos-2</td> <td>-</td> <td>66.7</td> <td>45.6</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> </tr> <tr> <td>BLIP-2 (Vicuna-13B)</td> <td>103.9</td> <td>71.6</td> <td>65.0</td> <td>45.9</td> <td>32.3</td> <td>61.0</td> <td>19.6</td> </tr> <tr> <td>InstructBLIP (Vicuna-13B)</td> <td><strong>121.9</strong></td> <td>82.8</td> <td>-</td> <td>-</td> <td>49.5</td> <td>63.1</td> <td>33.4</td> </tr> <tr> <td>Shikra (Vicuna-13B)</td> <td>-</td> <td>73.9</td> <td>77.36</td> <td>47.16</td> <td>-</td> <td>-</td> <td>-</td> </tr> <tr> <td><strong>Qwen-VL (Qwen-7B)</strong></td> <td>121.4</td> <td><b>85.8</b></td> <td><b>78.8</b></td> <td><b>58.6</b></td> <td><b>59.3</b></td> <td>67.1</td> <td>35.2</td> </tr> <!-- <tr> <td>Qwen-VL (4-shot)</td> <td>-</td> <td>-</td> <td>-</td> <td>63.6</td> <td>-</td> <td>-</td> <td>39.1</td> </tr> --> <tr> <td>Qwen-VL-Chat</td> <td>120.2</td> <td>81.0</td> <td>78.2</td> <td>56.6</td> <td>57.5</td> <td><b>68.2</b></td> <td><b>38.9</b></td> </tr> <!-- <tr> <td>Qwen-VL-Chat (4-shot)</td> <td>-</td> <td>-</td> <td>-</td> <td>60.6</td> <td>-</td> <td>-</td> <td>44.45</td> </tr> --> <tr> <td>Previous SOTA<br>(Per Task Fine-tuning)</td> <td>-</td> <td>127.0<br>(PALI-17B)</td> <td>84.5<br>(InstructBLIP<br>-FlanT5-XL)</td> <td>86.1<br>(PALI-X<br>-55B)</td> <td>66.1<br>(PALI-X<br>-55B)</td> <td>72.1<br>(CFR)</td> <td>92.53<br>(LLaVa+<br>GPT-4)</td> <td>70.9<br>(PALI-X<br>-55B)</td> </tr> </tbody> </table> - 在 Zero-shot Caption 中,Qwen-VL 在 Flickr30K 数据集上取得了 **SOTA** 的结果,并在 Nocaps 数据集上取得了和 InstructBlip 可竞争的结果。 - 在 General VQA 中,Qwen-VL 取得了 LVLM 模型同等量级和设定下 **SOTA** 的结果。 - For zero-shot image captioning, Qwen-VL achieves the **SOTA** on Flickr30K and competitive results on Nocaps with InstructBlip. - For general VQA, Qwen-VL achieves the **SOTA** under the same generalist LVLM scale settings. ### 文本导向的视觉问答 (Text-oriented VQA) <table> <thead> <tr> <th>Model type</th> <th>Model</th> <th>TextVQA</th> <th>DocVQA</th> <th>ChartQA</th> <th>AI2D</th> <th>OCR-VQA</th> </tr> </thead> <tbody align="center"> <tr> <td rowspan="5">Generalist Models</td> <td>BLIP-2 (Vicuna-13B)</td> <td>42.4</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> </tr> <tr> <td>InstructBLIP (Vicuna-13B)</td> <td>50.7</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> </tr> <tr> <td>mPLUG-DocOwl (LLaMA-7B)</td> <td>52.6</td> <td>62.2</td> <td>57.4</td> <td>-</td> <td>-</td> </tr> <tr> <td>Pic2Struct-Large (1.3B)</td> <td>-</td> <td><b>76.6</b></td> <td>58.6</td> <td>42.1</td> <td>71.3</td> </tr> <tr> <td>Qwen-VL (Qwen-7B)</td> <td><b>63.8</b></td> <td>65.1</td> <td><b>65.7</b></td> <td><b>62.3</b></td> <td><b>75.7</b></td> </tr> <tr> <td>Specialist SOTAs<br>(Specialist/Finetuned)</td> <td>PALI-X-55B (Single-task FT)<br>(Without OCR Pipeline)</td> <td>71.44</td> <td>80.0</td> <td>70.0</td> <td>81.2</td> <td>75.0</td> </tr> </tbody> </table> - 在文字相关的识别/问答评测上,取得了当前规模下通用 LVLM 达到的最好结果。 - 分辨率对上述某几个评测非常重要,大部分 224 分辨率的开源 LVLM 模型无法完成以上评测,或只能通过切图的方式解决。Qwen-VL 将分辨率提升到 448,可以直接以端到端的方式进行以上评测。Qwen-VL 在很多任务上甚至超过了 1024 分辨率的 Pic2Struct-Large 模型。 - In text-related recognition/QA evaluation, Qwen-VL achieves the SOTA under the generalist LVLM scale settings. - Resolution is important for several above evaluations. While most open-source LVLM models with 224 resolution are incapable of these evaluations or can only solve these by cutting images, Qwen-VL scales the resolution to 448 so that it can be evaluated end-to-end. Qwen-VL even outperforms Pic2Struct-Large models of 1024 resolution on some tasks. ### 细粒度视觉定位 (Referring Expression Comprehension) <table> <thead> <tr> <th rowspan="2">Model type</th> <th rowspan="2">Model</th> <th colspan="3">RefCOCO</th> <th colspan="3">RefCOCO+</th> <th colspan="2">RefCOCOg</th> <th>GRIT</th> </tr> <tr> <th>val</th> <th>test-A</th> <th>test-B</th> <th>val</th> <th>test-A</th> <th>test-B</th> <th>val-u</th> <th>test-u</th> <th>refexp</th> </tr> </thead> <tbody align="center"> <tr> <td rowspan="8">Generalist Models</td> <td>GPV-2</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>51.50</td> </tr> <tr> <td>OFA-L*</td> <td>79.96</td> <td>83.67</td> <td>76.39</td> <td>68.29</td> <td>76.00</td> <td>61.75</td> <td>67.57</td> <td>67.58</td> <td>61.70</td> </tr> <tr> <td>Unified-IO</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td><b>78.61</b></td> </tr> <tr> <td>VisionLLM-H</td> <td></td> <td>86.70</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> <td>-</td> </tr> <tr> <td>Shikra-7B</td> <td>87.01</td> <td>90.61</td> <td>80.24 </td> <td>81.60</td> <td>87.36</td> <td>72.12</td> <td>82.27</td> <td>82.19</td> <td>69.34</td> </tr> <tr> <td>Shikra-13B</td> <td>87.83 </td> <td>91.11</td> <td>81.81</td> <td>82.89</td> <td>87.79</td> <td>74.41</td> <td>82.64</td> <td>83.16</td> <td>69.03</td> </tr> <tr> <td>Qwen-VL-7B</td> <td><b>89.36</b></td> <td>92.26</td> <td><b>85.34</b></td> <td><b>83.12</b></td> <td>88.25</td> <td><b>77.21</b></td> <td>85.58</td> <td>85.48</td> <td>78.22</td> </tr> <tr> <td>Qwen-VL-7B-Chat</td> <td>88.55</td> <td><b>92.27</b></td> <td>84.51</td> <td>82.82</td> <td><b>88.59</b></td> <td>76.79</td> <td><b>85.96</b></td> <td><b>86.32</b></td> <td>-</td> <tr> <td rowspan="3">Specialist SOTAs<br>(Specialist/Finetuned)</td> <td>G-DINO-L</td> <td>90.56&nbsp;&nbsp;</td> <td>93.19</td> <td>88.24</td> <td>82.75</td> <td>88.95</td> <td>75.92</td> <td>86.13</td> <td>87.02</td> <td>-</td> </tr> <tr> <td>UNINEXT-H</td> <td>92.64 </td> <td>94.33</td> <td>91.46</td> <td>85.24</td> <td>89.63</td> <td>79.79</td> <td>88.73</td> <td>89.37</td> <td>-</td> </tr> <tr> <td>ONE-PEACE</td> <td>92.58 </td> <td>94.18</td> <td>89.26</td> <td>88.77</td> <td>92.21</td> <td>83.23</td> <td>89.22</td> <td>89.27</td> <td>-</td> </tr> </tbody> </table> - 在定位任务上,Qwen-VL 全面超过 Shikra-13B,取得了目前 Generalist LVLM 模型上在 Refcoco 上的 **SOTA**。 - Qwen-VL 并没有在任何中文定位数据上训练过,但通过中文 Caption 数据和 英文 Grounding 数据的训练,可以 Zero-shot 泛化出中文 Grounding 能力。 我们提供了以上**所有**评测脚本以供复现我们的实验结果。请阅读 [eval/EVALUATION.md](eval/EVALUATION.md) 了解更多信息。 - Qwen-VL achieves the **SOTA** in all above referring expression comprehension benchmarks. - Qwen-VL has not been trained on any Chinese grounding data, but it can still generalize to the Chinese Grounding tasks in a zero-shot way by training Chinese Caption data and English Grounding data. We provide all of the above evaluation scripts for reproducing our experimental results. Please read [eval/EVALUATION.md](eval/EVALUATION.md) for more information. ### 闲聊能力测评 (Chat Evaluation) TouchStone 是一个基于 GPT4 打分来评测 LVLM 模型的图文对话能力和人类对齐水平的基准。它涵盖了 300+张图片、800+道题目、27个类别,包括基础属性、人物地标、视觉推理、诗歌创作、故事写作、商品比较、图片解题等**尽可能广泛的类别**。关于 TouchStone 的详细介绍,请参考[touchstone/README_CN.md](touchstone/README_CN.md)了解更多信息。 TouchStone is a benchmark based on scoring with GPT4 to evaluate the abilities of the LVLM model on text-image dialogue and alignment levels with humans. It covers a total of 300+ images, 800+ questions, and 27 categories, such as attribute-based Q&A, celebrity recognition, writing poetry, summarizing multiple images, product comparison, math problem solving, etc. Please read [touchstone/README_CN.md](touchstone/README.md) for more information. #### 英语 (English) | Model | Score | |---------------|-------| | PandaGPT | 488.5 | | MiniGPT4 | 531.7 | | InstructBLIP | 552.4 | | LLaMA-AdapterV2 | 590.1 | | mPLUG-Owl | 605.4 | | LLaVA | 602.7 | | Qwen-VL-Chat | 645.2 | #### 中文 (Chinese) | Model | Score | |---------------|-------| | VisualGLM | 247.1 | | Qwen-VL-Chat | 401.2 | Qwen-VL-Chat 模型在中英文的对齐评测中均取得当前 LVLM 模型下的最好结果。 Qwen-VL-Chat has achieved the best results in both Chinese and English alignment evaluation. <br> ## 常见问题 (FAQ) 如遇到问题,敬请查阅 [FAQ](https://github.com/QwenLM/Qwen-VL/blob/master/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。 If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen-VL/blob/master/FAQ.md) and the issues first to search a solution before you launch a new issue. <br> ## 使用协议 (License Agreement) 研究人员与开发者可使用Qwen-VL和Qwen-VL-Chat或进行二次开发。我们同样允许商业使用,具体细节请查看[LICENSE](https://github.com/QwenLM/Qwen-VL/blob/master/LICENSE)。如需商用,请填写[问卷](https://dashscope.console.aliyun.com/openModelApply/qianwen)申请。 Researchers and developers are free to use the codes and model weights of both Qwen-VL and Qwen-VL-Chat. We also allow their commercial use. Check our license at [LICENSE](LICENSE) for more details. <br> ## 引用 (Citation)[](https://) 如果你觉得我们的论文和代码对你的研究有帮助,请考虑:star: 和引用 :pencil: :) If you find our paper and code useful in your research, please consider giving a star :star: and citation :pencil: :) ```BibTeX @article{Qwen-VL, title={Qwen-VL: A Frontier Large Vision-Language Model with Versatile Abilities}, author={Bai, Jinze and Bai, Shuai and Yang, Shusheng and Wang, Shijie and Tan, Sinan and Wang, Peng and Lin, Junyang and Zhou, Chang and Zhou, Jingren}, journal={arXiv preprint arXiv:2308.12966}, year={2023} } ``` <br> ## 联系我们 (Contact Us) 如果你想给我们的研发团队和产品团队留言,请通过邮件(qianwen_opensource@alibabacloud.com)联系我们。 If you are interested to leave a message to either our research team or product team, feel free to send an email to qianwen_opensource@alibabacloud.com.
18,726
[ [ -0.0382080078125, -0.0345458984375, 0.0150909423828125, 0.00739288330078125, -0.03131103515625, -0.01171875, 0.0025730133056640625, -0.045806884765625, -0.00327301025390625, 0.0273590087890625, -0.05487060546875, -0.046875, -0.029083251953125, 0.002872467041...
segmind/tiny-sd
2023-08-08T07:35:32.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "dataset:recastai/LAION-art-EN-improved-captions", "arxiv:2305.15798", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
segmind
null
null
segmind/tiny-sd
33
1,980
diffusers
2023-07-27T18:21:36
--- license: creativeml-openrail-m base_model: SG161222/Realistic_Vision_V4.0 datasets: - recastai/LAION-art-EN-improved-captions tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- # Text-to-image Distillation This pipeline was distilled from **SG161222/Realistic_Vision_V4.0** on a Subset of **recastai/LAION-art-EN-improved-captions** dataset. Below are some example images generated with the tiny-sd model. ![val_imgs_grid](./grid_tiny.png) This Pipeline is based upon [the paper](https://arxiv.org/pdf/2305.15798.pdf). Training Code can be found [here](https://github.com/segmind/distill-sd). ## Pipeline usage You can use the pipeline like so: ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("segmind/tiny-sd", torch_dtype=torch.float16) prompt = "Portrait of a pretty girl" image = pipeline(prompt).images[0] image.save("my_image.png") ``` ## Training info These are the key hyperparameters used during training: * Steps: 125000 * Learning rate: 1e-4 * Batch size: 32 * Gradient accumulation steps: 4 * Image resolution: 512 * Mixed-precision: fp16 ## Speed Comparision We have observed that the distilled models are upto 80% faster than the Base SD1.5 Models. Below is a comparision on an A100 80GB. ![graph](./graph.png) ![comparision](./comparision1.png) [Here](https://github.com/segmind/distill-sd/blob/master/inference.py) is the code for benchmarking the speeds.
1,512
[ [ -0.032562255859375, -0.033111572265625, 0.04571533203125, -0.00930023193359375, -0.051788330078125, -0.01290130615234375, -0.006626129150390625, -0.004055023193359375, -0.0019702911376953125, 0.033538818359375, -0.039703369140625, -0.0313720703125, -0.0591430664...
Luciano/xlm-roberta-large-finetuned-lener-br
2023-09-12T03:16:53.000Z
[ "transformers", "pytorch", "tensorboard", "safetensors", "xlm-roberta", "token-classification", "generated_from_trainer", "pt", "dataset:lener_br", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
Luciano
null
null
Luciano/xlm-roberta-large-finetuned-lener-br
1
1,978
transformers
2022-10-15T14:26:45
--- language: - pt license: mit tags: - generated_from_trainer datasets: - lener_br metrics: - precision - recall - f1 - accuracy base_model: xlm-roberta-large model-index: - name: xlm-roberta-large-finetuned-lener-br results: - task: type: token-classification name: Token Classification dataset: name: lener_br type: lener_br config: lener_br split: train args: lener_br metrics: - type: precision value: 0.8762313715584744 name: Precision - type: recall value: 0.8966141121736882 name: Recall - type: f1 value: 0.8863055697496168 name: F1 - type: accuracy value: 0.979500052295785 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-large-finetuned-lener-br This model is a fine-tuned version of [xlm-roberta-large](https://huggingface.co/xlm-roberta-large) on the lener_br dataset. It achieves the following results on the evaluation set: - Loss: nan - Precision: 0.8762 - Recall: 0.8966 - F1: 0.8863 - Accuracy: 0.9795 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0785 | 1.0 | 3914 | nan | 0.7119 | 0.8410 | 0.7711 | 0.9658 | | 0.076 | 2.0 | 7828 | nan | 0.8397 | 0.8679 | 0.8536 | 0.9740 | | 0.0434 | 3.0 | 11742 | nan | 0.8545 | 0.8666 | 0.8605 | 0.9693 | | 0.022 | 4.0 | 15656 | nan | 0.8293 | 0.8573 | 0.8431 | 0.9652 | | 0.0284 | 5.0 | 19570 | nan | 0.8789 | 0.8571 | 0.8678 | 0.9776 | | 0.029 | 6.0 | 23484 | nan | 0.8521 | 0.8788 | 0.8653 | 0.9771 | | 0.0227 | 7.0 | 27398 | nan | 0.7648 | 0.8873 | 0.8215 | 0.9686 | | 0.0219 | 8.0 | 31312 | nan | 0.8609 | 0.9026 | 0.8813 | 0.9780 | | 0.0121 | 9.0 | 35226 | nan | 0.8746 | 0.8979 | 0.8861 | 0.9812 | | 0.0087 | 10.0 | 39140 | nan | 0.8829 | 0.8827 | 0.8828 | 0.9808 | | 0.0081 | 11.0 | 43054 | nan | 0.8740 | 0.8749 | 0.8745 | 0.9765 | | 0.0058 | 12.0 | 46968 | nan | 0.8838 | 0.8842 | 0.8840 | 0.9788 | | 0.0044 | 13.0 | 50882 | nan | 0.869 | 0.8984 | 0.8835 | 0.9788 | | 0.002 | 14.0 | 54796 | nan | 0.8762 | 0.8966 | 0.8863 | 0.9795 | | 0.0017 | 15.0 | 58710 | nan | 0.8729 | 0.8982 | 0.8854 | 0.9791 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
3,418
[ [ -0.04156494140625, -0.049041748046875, 0.0162353515625, 0.00699615478515625, -0.01218414306640625, -0.0128326416015625, -0.01139068603515625, -0.01020050048828125, 0.035400390625, 0.028289794921875, -0.048370361328125, -0.054595947265625, -0.056243896484375, ...
neuraly/bert-base-italian-cased-sentiment
2021-09-22T09:29:18.000Z
[ "transformers", "pytorch", "tf", "jax", "bert", "text-classification", "sentiment", "Italian", "it", "license:mit", "endpoints_compatible", "region:us" ]
text-classification
neuraly
null
null
neuraly/bert-base-italian-cased-sentiment
7
1,977
transformers
2022-03-02T23:29:05
--- language: it thumbnail: https://neuraly.ai/static/assets/images/huggingface/thumbnail.png tags: - sentiment - Italian license: mit widget: - text: Huggingface è un team fantastico! --- # 🤗 + neuraly - Italian BERT Sentiment model ## Model description This model performs sentiment analysis on Italian sentences. It was trained starting from an instance of [bert-base-italian-cased](https://huggingface.co/dbmdz/bert-base-italian-cased), and fine-tuned on an Italian dataset of tweets, reaching 82% of accuracy on the latter one. ## Intended uses & limitations #### How to use ```python import torch from torch import nn from transformers import AutoTokenizer, AutoModelForSequenceClassification # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained("neuraly/bert-base-italian-cased-sentiment") # Load the model, use .cuda() to load it on the GPU model = AutoModelForSequenceClassification.from_pretrained("neuraly/bert-base-italian-cased-sentiment") sentence = 'Huggingface è un team fantastico!' input_ids = tokenizer.encode(sentence, add_special_tokens=True) # Create tensor, use .cuda() to transfer the tensor to GPU tensor = torch.tensor(input_ids).long() # Fake batch dimension tensor = tensor.unsqueeze(0) # Call the model and get the logits logits, = model(tensor) # Remove the fake batch dimension logits = logits.squeeze(0) # The model was trained with a Log Likelyhood + Softmax combined loss, hence to extract probabilities we need a softmax on top of the logits tensor proba = nn.functional.softmax(logits, dim=0) # Unpack the tensor to obtain negative, neutral and positive probabilities negative, neutral, positive = proba ``` #### Limitations and bias A possible drawback (or bias) of this model is related to the fact that it was trained on a tweet dataset, with all the limitations that come with it. The domain is strongly related to football players and teams, but it works surprisingly well even on other topics. ## Training data We trained the model by combining the two tweet datasets taken from [Sentipolc EVALITA 2016](http://www.di.unito.it/~tutreeb/sentipolc-evalita16/data.html). Overall the dataset consists of 45K pre-processed tweets. The model weights come from a pre-trained instance of [bert-base-italian-cased](https://huggingface.co/dbmdz/bert-base-italian-cased). A huge "thank you" goes to that team, brilliant work! ## Training procedure #### Preprocessing We tried to save as much information as possible, since BERT captures extremely well the semantic of complex text sequences. Overall we removed only **@mentions**, **urls** and **emails** from every tweet and kept pretty much everything else. #### Hardware - **GPU**: Nvidia GTX1080ti - **CPU**: AMD Ryzen7 3700x 8c/16t - **RAM**: 64GB DDR4 #### Hyperparameters - Optimizer: **AdamW** with learning rate of **2e-5**, epsilon of **1e-8** - Max epochs: **5** - Batch size: **32** - Early Stopping: **enabled** with patience = 1 Early stopping was triggered after 3 epochs. ## Eval results The model achieves an overall accuracy on the test set equal to 82% The test set is a 20% split of the whole dataset. ## About us [Neuraly](https://neuraly.ai) is a young and dynamic startup committed to designing AI-driven solutions and services through the most advanced Machine Learning and Data Science technologies. You can find out more about who we are and what we do on our [website](https://neuraly.ai). ## Acknowledgments Thanks to the generous support from the [Hugging Face](https://huggingface.co/) team, it is possible to download the model from their S3 storage and live test it from their inference API 🤗.
3,691
[ [ -0.019927978515625, -0.0645751953125, 0.0158233642578125, 0.034027099609375, -0.0205535888671875, 0.001491546630859375, -0.0278167724609375, -0.03369140625, 0.0369873046875, -0.000003337860107421875, -0.047210693359375, -0.05621337890625, -0.0469970703125, -...
timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384
2023-03-31T21:59:42.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "dataset:laion-2b", "arxiv:2210.08402", "arxiv:2201.03545", "arxiv:2103.00020", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384
3
1,976
timm
2023-03-31T21:58:31
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k - laion-2b --- # Model card for convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384 A ConvNeXt image classification model. CLIP image tower weights pretrained in [OpenCLIP](https://github.com/mlfoundations/open_clip) on LAION and fine-tuned on ImageNet-12k followed by ImageNet-1k in `timm` bby Ross Wightman. Please see related OpenCLIP model cards for more details on pretrain: * https://huggingface.co/laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup * https://huggingface.co/laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg * https://huggingface.co/laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg * https://huggingface.co/laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 88.6 - GMACs: 45.2 - Activations (M): 84.5 - Image size: 384 x 384 - **Papers:** - LAION-5B: An open large-scale dataset for training next generation image-text models: https://arxiv.org/abs/2210.08402 - A ConvNet for the 2020s: https://arxiv.org/abs/2201.03545 - Learning Transferable Visual Models From Natural Language Supervision: https://arxiv.org/abs/2103.00020 - **Original:** https://github.com/mlfoundations/open_clip - **Pretrain Dataset:** LAION-2B - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 96, 96]) # torch.Size([1, 256, 48, 48]) # torch.Size([1, 512, 24, 24]) # torch.Size([1, 1024, 12, 12]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 12, 12) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). All timing numbers from eager model PyTorch 1.13 on RTX 3090 w/ AMP. | model |top1 |top5 |img_size|param_count|gmacs |macts |samples_per_sec|batch_size| |------------------------------------------------------------------------------------------------------------------------------|------|------|--------|-----------|------|------|---------------|----------| | [convnextv2_huge.fcmae_ft_in22k_in1k_512](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in22k_in1k_512) |88.848|98.742|512 |660.29 |600.81|413.07|28.58 |48 | | [convnextv2_huge.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in22k_in1k_384) |88.668|98.738|384 |660.29 |337.96|232.35|50.56 |64 | | [convnext_xxlarge.clip_laion2b_soup_ft_in1k](https://huggingface.co/timm/convnext_xxlarge.clip_laion2b_soup_ft_in1k) |88.612|98.704|256 |846.47 |198.09|124.45|122.45 |256 | | [convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384) |88.312|98.578|384 |200.13 |101.11|126.74|196.84 |256 | | [convnextv2_large.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in22k_in1k_384) |88.196|98.532|384 |197.96 |101.1 |126.74|128.94 |128 | | [convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320) |87.968|98.47 |320 |200.13 |70.21 |88.02 |283.42 |256 | | [convnext_xlarge.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_xlarge.fb_in22k_ft_in1k_384) |87.75 |98.556|384 |350.2 |179.2 |168.99|124.85 |192 | | [convnextv2_base.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in22k_in1k_384) |87.646|98.422|384 |88.72 |45.21 |84.49 |209.51 |256 | | [convnext_large.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_large.fb_in22k_ft_in1k_384) |87.476|98.382|384 |197.77 |101.1 |126.74|194.66 |256 | | [convnext_large_mlp.clip_laion2b_augreg_ft_in1k](https://huggingface.co/timm/convnext_large_mlp.clip_laion2b_augreg_ft_in1k) |87.344|98.218|256 |200.13 |44.94 |56.33 |438.08 |256 | | [convnextv2_large.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in22k_in1k) |87.26 |98.248|224 |197.96 |34.4 |43.13 |376.84 |256 | | [convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384) |87.138|98.212|384 |88.59 |45.21 |84.49 |365.47 |256 | | [convnext_xlarge.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_xlarge.fb_in22k_ft_in1k) |87.002|98.208|224 |350.2 |60.98 |57.5 |368.01 |256 | | [convnext_base.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_base.fb_in22k_ft_in1k_384) |86.796|98.264|384 |88.59 |45.21 |84.49 |366.54 |256 | | [convnextv2_base.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in22k_in1k) |86.74 |98.022|224 |88.72 |15.38 |28.75 |624.23 |256 | | [convnext_large.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_large.fb_in22k_ft_in1k) |86.636|98.028|224 |197.77 |34.4 |43.13 |581.43 |256 | | [convnext_base.clip_laiona_augreg_ft_in1k_384](https://huggingface.co/timm/convnext_base.clip_laiona_augreg_ft_in1k_384) |86.504|97.97 |384 |88.59 |45.21 |84.49 |368.14 |256 | | [convnext_base.clip_laion2b_augreg_ft_in12k_in1k](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k) |86.344|97.97 |256 |88.59 |20.09 |37.55 |816.14 |256 | | [convnextv2_huge.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_huge.fcmae_ft_in1k) |86.256|97.75 |224 |660.29 |115.0 |79.07 |154.72 |256 | | [convnext_small.in12k_ft_in1k_384](https://huggingface.co/timm/convnext_small.in12k_ft_in1k_384) |86.182|97.92 |384 |50.22 |25.58 |63.37 |516.19 |256 | | [convnext_base.clip_laion2b_augreg_ft_in1k](https://huggingface.co/timm/convnext_base.clip_laion2b_augreg_ft_in1k) |86.154|97.68 |256 |88.59 |20.09 |37.55 |819.86 |256 | | [convnext_base.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_base.fb_in22k_ft_in1k) |85.822|97.866|224 |88.59 |15.38 |28.75 |1037.66 |256 | | [convnext_small.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_small.fb_in22k_ft_in1k_384) |85.778|97.886|384 |50.22 |25.58 |63.37 |518.95 |256 | | [convnextv2_large.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_large.fcmae_ft_in1k) |85.742|97.584|224 |197.96 |34.4 |43.13 |375.23 |256 | | [convnext_small.in12k_ft_in1k](https://huggingface.co/timm/convnext_small.in12k_ft_in1k) |85.174|97.506|224 |50.22 |8.71 |21.56 |1474.31 |256 | | [convnext_tiny.in12k_ft_in1k_384](https://huggingface.co/timm/convnext_tiny.in12k_ft_in1k_384) |85.118|97.608|384 |28.59 |13.14 |39.48 |856.76 |256 | | [convnextv2_tiny.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in22k_in1k_384) |85.112|97.63 |384 |28.64 |13.14 |39.48 |491.32 |256 | | [convnextv2_base.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_base.fcmae_ft_in1k) |84.874|97.09 |224 |88.72 |15.38 |28.75 |625.33 |256 | | [convnext_small.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_small.fb_in22k_ft_in1k) |84.562|97.394|224 |50.22 |8.71 |21.56 |1478.29 |256 | | [convnext_large.fb_in1k](https://huggingface.co/timm/convnext_large.fb_in1k) |84.282|96.892|224 |197.77 |34.4 |43.13 |584.28 |256 | | [convnext_tiny.in12k_ft_in1k](https://huggingface.co/timm/convnext_tiny.in12k_ft_in1k) |84.186|97.124|224 |28.59 |4.47 |13.44 |2433.7 |256 | | [convnext_tiny.fb_in22k_ft_in1k_384](https://huggingface.co/timm/convnext_tiny.fb_in22k_ft_in1k_384) |84.084|97.14 |384 |28.59 |13.14 |39.48 |862.95 |256 | | [convnextv2_tiny.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in22k_in1k) |83.894|96.964|224 |28.64 |4.47 |13.44 |1452.72 |256 | | [convnext_base.fb_in1k](https://huggingface.co/timm/convnext_base.fb_in1k) |83.82 |96.746|224 |88.59 |15.38 |28.75 |1054.0 |256 | | [convnextv2_nano.fcmae_ft_in22k_in1k_384](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in22k_in1k_384) |83.37 |96.742|384 |15.62 |7.22 |24.61 |801.72 |256 | | [convnext_small.fb_in1k](https://huggingface.co/timm/convnext_small.fb_in1k) |83.142|96.434|224 |50.22 |8.71 |21.56 |1464.0 |256 | | [convnextv2_tiny.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_tiny.fcmae_ft_in1k) |82.92 |96.284|224 |28.64 |4.47 |13.44 |1425.62 |256 | | [convnext_tiny.fb_in22k_ft_in1k](https://huggingface.co/timm/convnext_tiny.fb_in22k_ft_in1k) |82.898|96.616|224 |28.59 |4.47 |13.44 |2480.88 |256 | | [convnext_nano.in12k_ft_in1k](https://huggingface.co/timm/convnext_nano.in12k_ft_in1k) |82.282|96.344|224 |15.59 |2.46 |8.37 |3926.52 |256 | | [convnext_tiny_hnf.a2h_in1k](https://huggingface.co/timm/convnext_tiny_hnf.a2h_in1k) |82.216|95.852|224 |28.59 |4.47 |13.44 |2529.75 |256 | | [convnext_tiny.fb_in1k](https://huggingface.co/timm/convnext_tiny.fb_in1k) |82.066|95.854|224 |28.59 |4.47 |13.44 |2346.26 |256 | | [convnextv2_nano.fcmae_ft_in22k_in1k](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in22k_in1k) |82.03 |96.166|224 |15.62 |2.46 |8.37 |2300.18 |256 | | [convnextv2_nano.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_nano.fcmae_ft_in1k) |81.83 |95.738|224 |15.62 |2.46 |8.37 |2321.48 |256 | | [convnext_nano_ols.d1h_in1k](https://huggingface.co/timm/convnext_nano_ols.d1h_in1k) |80.866|95.246|224 |15.65 |2.65 |9.38 |3523.85 |256 | | [convnext_nano.d1h_in1k](https://huggingface.co/timm/convnext_nano.d1h_in1k) |80.768|95.334|224 |15.59 |2.46 |8.37 |3915.58 |256 | | [convnextv2_pico.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_pico.fcmae_ft_in1k) |80.304|95.072|224 |9.07 |1.37 |6.1 |3274.57 |256 | | [convnext_pico.d1_in1k](https://huggingface.co/timm/convnext_pico.d1_in1k) |79.526|94.558|224 |9.05 |1.37 |6.1 |5686.88 |256 | | [convnext_pico_ols.d1_in1k](https://huggingface.co/timm/convnext_pico_ols.d1_in1k) |79.522|94.692|224 |9.06 |1.43 |6.5 |5422.46 |256 | | [convnextv2_femto.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_femto.fcmae_ft_in1k) |78.488|93.98 |224 |5.23 |0.79 |4.57 |4264.2 |256 | | [convnext_femto_ols.d1_in1k](https://huggingface.co/timm/convnext_femto_ols.d1_in1k) |77.86 |93.83 |224 |5.23 |0.82 |4.87 |6910.6 |256 | | [convnext_femto.d1_in1k](https://huggingface.co/timm/convnext_femto.d1_in1k) |77.454|93.68 |224 |5.22 |0.79 |4.57 |7189.92 |256 | | [convnextv2_atto.fcmae_ft_in1k](https://huggingface.co/timm/convnextv2_atto.fcmae_ft_in1k) |76.664|93.044|224 |3.71 |0.55 |3.81 |4728.91 |256 | | [convnext_atto_ols.a2_in1k](https://huggingface.co/timm/convnext_atto_ols.a2_in1k) |75.88 |92.846|224 |3.7 |0.58 |4.11 |7963.16 |256 | | [convnext_atto.d2_in1k](https://huggingface.co/timm/convnext_atto.d2_in1k) |75.664|92.9 |224 |3.7 |0.55 |3.81 |8439.22 |256 | ## Citation ```bibtex @software{ilharco_gabriel_2021_5143773, author = {Ilharco, Gabriel and Wortsman, Mitchell and Wightman, Ross and Gordon, Cade and Carlini, Nicholas and Taori, Rohan and Dave, Achal and Shankar, Vaishaal and Namkoong, Hongseok and Miller, John and Hajishirzi, Hannaneh and Farhadi, Ali and Schmidt, Ludwig}, title = {OpenCLIP}, month = jul, year = 2021, note = {If you use this software, please cite it as below.}, publisher = {Zenodo}, version = {0.1}, doi = {10.5281/zenodo.5143773}, url = {https://doi.org/10.5281/zenodo.5143773} } ``` ```bibtex @inproceedings{schuhmann2022laionb, title={{LAION}-5B: An open large-scale dataset for training next generation image-text models}, author={Christoph Schuhmann and Romain Beaumont and Richard Vencu and Cade W Gordon and Ross Wightman and Mehdi Cherti and Theo Coombes and Aarush Katta and Clayton Mullis and Mitchell Wortsman and Patrick Schramowski and Srivatsa R Kundurthy and Katherine Crowson and Ludwig Schmidt and Robert Kaczmarczyk and Jenia Jitsev}, booktitle={Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track}, year={2022}, url={https://openreview.net/forum?id=M3Y74vmsMcY} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{Radford2021LearningTV, title={Learning Transferable Visual Models From Natural Language Supervision}, author={Alec Radford and Jong Wook Kim and Chris Hallacy and A. Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever}, booktitle={ICML}, year={2021} } ``` ```bibtex @article{liu2022convnet, author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, title = {A ConvNet for the 2020s}, journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2022}, } ```
18,566
[ [ -0.060302734375, -0.035888671875, -0.002239227294921875, 0.034942626953125, -0.031494140625, -0.0179595947265625, -0.01480865478515625, -0.033843994140625, 0.05731201171875, 0.020538330078125, -0.042999267578125, -0.0450439453125, -0.0535888671875, -0.003265...
sentence-transformers/distiluse-base-multilingual-cased
2022-06-21T14:55:22.000Z
[ "sentence-transformers", "pytorch", "tf", "rust", "distilbert", "feature-extraction", "sentence-similarity", "transformers", "multilingual", "arxiv:1908.10084", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
sentence-transformers
null
null
sentence-transformers/distiluse-base-multilingual-cased
9
1,974
sentence-transformers
2022-03-02T23:29:05
--- pipeline_tag: sentence-similarity language: multilingual license: apache-2.0 tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # sentence-transformers/distiluse-base-multilingual-cased This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 512 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/distiluse-base-multilingual-cased') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/distiluse-base-multilingual-cased) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 512, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
2,369
[ [ -0.0186004638671875, -0.061737060546875, 0.0277099609375, 0.034210205078125, -0.02392578125, -0.017059326171875, -0.02130126953125, 0.0100555419921875, 0.0158233642578125, 0.02703857421875, -0.0411376953125, -0.039581298828125, -0.04840087890625, 0.015464782...
Yntec/BasilRemix
2023-09-09T14:08:34.000Z
[ "diffusers", "Anime", "3D", "Illustration", "nuigurumi", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:other", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
Yntec
null
null
Yntec/BasilRemix
0
1,973
diffusers
2023-09-08T02:14:20
--- license: other library_name: diffusers pipeline_tag: text-to-image tags: - Anime - 3D - Illustration - nuigurumi - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers --- # Basil Remix BasilMix mixed with ReVAnimated v11 to bring its compositions back to life! It has the MoistMixV2VAE baked in. Comparison: ![Sample](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/OxgKIXdKMCQujcEHYkqlp.png) (Click for larger) Sample and prompt: ![Sample](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/Klj0dHwDyLi_caXyAh_6l.png) Pretty detailed CUTE Girl, Cartoon, sitting on a computer monitor, holding antique TV, DETAILED CHIBI EYES, gorgeous detailed hair, Magazine ad, iconic, 1940, sharp focus. Illustration By KlaysMoji and artgerm and Clay Mann and and leyendecker and kyoani Original page: https://huggingface.co/nuigurumi/basil_mix # Recipe - SuperMerger Weight sum Train Difference Use MBW 0,1,1,1,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,1,1,1,1 Model A: BasilMix Model B: ReVAnimated v11 Output Model: BasilRemix
1,130
[ [ -0.018707275390625, -0.018829345703125, -0.0007681846618652344, 0.030853271484375, -0.0240020751953125, 0.017181396484375, 0.0072784423828125, -0.0009436607360839844, 0.07659912109375, 0.068359375, -0.04425048828125, 0.0067138671875, -0.0263824462890625, -0....
WizardLM/WizardCoder-Python-13B-V1.0
2023-09-09T06:43:58.000Z
[ "transformers", "pytorch", "llama", "text-generation", "code", "arxiv:2304.12244", "arxiv:2306.08568", "arxiv:2308.09583", "arxiv:2303.08774", "license:llama2", "model-index", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
WizardLM
null
null
WizardLM/WizardCoder-Python-13B-V1.0
76
1,968
transformers
2023-08-26T04:59:34
--- license: llama2 metrics: - code_eval library_name: transformers tags: - code model-index: - name: WizardCoder-Python-13B-V1.0 results: - task: type: text-generation dataset: type: openai_humaneval name: HumanEval metrics: - name: pass@1 type: pass@1 value: 0.64 verified: false --- <p align="center"> 🤗 <a href="https://huggingface.co/WizardLM" target="_blank">HF Repo</a> •🐱 <a href="https://github.com/nlpxucan/WizardLM" target="_blank">Github Repo</a> • 🐦 <a href="https://twitter.com/WizardLM_AI" target="_blank">Twitter</a> • 📃 <a href="https://arxiv.org/abs/2304.12244" target="_blank">[WizardLM]</a> • 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> • 📃 <a href="https://arxiv.org/abs/2308.09583" target="_blank">[WizardMath]</a> <br> </p> <p align="center"> 👋 Join our <a href="https://discord.gg/VZjjHtWrKs" target="_blank">Discord</a> </p> ## News - 🔥🔥🔥[2023/08/26] We released **WizardCoder-Python-34B-V1.0** , which achieves the **73.2 pass@1** and surpasses **GPT4 (2023/03/15)**, **ChatGPT-3.5**, and **Claude2** on the [HumanEval Benchmarks](https://github.com/openai/human-eval). - [2023/06/16] We released **WizardCoder-15B-V1.0** , which achieves the **57.3 pass@1** and surpasses **Claude-Plus (+6.8)**, **Bard (+15.3)** and **InstructCodeT5+ (+22.3)** on the [HumanEval Benchmarks](https://github.com/openai/human-eval). ❗Note: There are two HumanEval results of GPT4 and ChatGPT-3.5. The 67.0 and 48.1 are reported by the official GPT4 Report (2023/03/15) of [OpenAI](https://arxiv.org/abs/2303.08774). The 82.0 and 72.5 are tested by ourselves with the latest API (2023/08/26). | Model | Checkpoint | Paper | HumanEval | MBPP | Demo | License | | ----- |------| ---- |------|-------| ----- | ----- | | WizardCoder-Python-34B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 73.2 | 61.2 | [Demo](http://47.103.63.15:50085/) | <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama2</a> | | WizardCoder-15B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-15B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 59.8 |50.6 | -- | <a href="https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement" target="_blank">OpenRAIL-M</a> | | WizardCoder-Python-13B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-Python-13B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 64.0 | 55.6 | -- | <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama2</a> | | WizardCoder-Python-7B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-Python-7B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 55.5 | 51.6 | [Demo](http://47.103.63.15:50088/) | <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama2</a> | | WizardCoder-3B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-3B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 34.8 |37.4 | -- | <a href="https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement" target="_blank">OpenRAIL-M</a> | | WizardCoder-1B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-1B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 23.8 |28.6 | -- | <a href="https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement" target="_blank">OpenRAIL-M</a> | - Our **WizardMath-70B-V1.0** model slightly outperforms some closed-source LLMs on the GSM8K, including **ChatGPT 3.5**, **Claude Instant 1** and **PaLM 2 540B**. - Our **WizardMath-70B-V1.0** model achieves **81.6 pass@1** on the [GSM8k Benchmarks](https://github.com/openai/grade-school-math), which is **24.8** points higher than the SOTA open-source LLM, and achieves **22.7 pass@1** on the [MATH Benchmarks](https://github.com/hendrycks/math), which is **9.2** points higher than the SOTA open-source LLM. <font size=4> | Model | Checkpoint | Paper | GSM8k | MATH |Online Demo| License| | ----- |------| ---- |------|-------| ----- | ----- | | WizardMath-70B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardMath-70B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2308.09583" target="_blank">[WizardMath]</a>| **81.6** | **22.7** |[Demo](http://47.103.63.15:50083/)| <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 </a> | | WizardMath-13B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardMath-13B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2308.09583" target="_blank">[WizardMath]</a>| **63.9** | **14.0** |[Demo](http://47.103.63.15:50082/)| <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 </a> | | WizardMath-7B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardMath-7B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2308.09583" target="_blank">[WizardMath]</a>| **54.9** | **10.7** | [Demo ](http://47.103.63.15:50080/)| <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 </a>| </font> - [08/09/2023] We released **WizardLM-70B-V1.0** model. Here is [Full Model Weight](https://huggingface.co/WizardLM/WizardLM-70B-V1.0). <font size=4> | <sup>Model</sup> | <sup>Checkpoint</sup> | <sup>Paper</sup> |<sup>MT-Bench</sup> | <sup>AlpacaEval</sup> | <sup>GSM8k</sup> | <sup>HumanEval</sup> | <sup>License</sup>| | ----- |------| ---- |------|-------| ----- | ----- | ----- | | <sup>**WizardLM-70B-V1.0**</sup> | <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-70B-V1.0" target="_blank">HF Link</a> </sup>|<sup>📃**Coming Soon**</sup>| <sup>**7.78**</sup> | <sup>**92.91%**</sup> |<sup>**77.6%**</sup> | <sup> **50.6**</sup>|<sup> <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 License </a></sup> | | <sup>WizardLM-13B-V1.2</sup> | <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-13B-V1.2" target="_blank">HF Link</a> </sup>| | <sup>7.06</sup> | <sup>89.17%</sup> |<sup>55.3%</sup> | <sup>36.6 </sup>|<sup> <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 License </a></sup> | | <sup>WizardLM-13B-V1.1</sup> |<sup> 🤗 <a href="https://huggingface.co/WizardLM/WizardLM-13B-V1.1" target="_blank">HF Link</a> </sup> | | <sup>6.76</sup> |<sup>86.32%</sup> | | <sup>25.0 </sup>| <sup>Non-commercial</sup>| | <sup>WizardLM-30B-V1.0</sup> | <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-30B-V1.0" target="_blank">HF Link</a></sup> | | <sup>7.01</sup> | | | <sup>37.8 </sup>| <sup>Non-commercial</sup> | | <sup>WizardLM-13B-V1.0</sup> | <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-13B-V1.0" target="_blank">HF Link</a> </sup> | | <sup>6.35</sup> | <sup>75.31%</sup> | | <sup> 24.0 </sup> | <sup>Non-commercial</sup>| | <sup>WizardLM-7B-V1.0 </sup>| <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-7B-V1.0" target="_blank">HF Link</a> </sup> |<sup> 📃 <a href="https://arxiv.org/abs/2304.12244" target="_blank">[WizardLM]</a> </sup>| | | |<sup>19.1 </sup>|<sup> Non-commercial</sup>| </font> ## Comparing WizardCoder-Python-34B-V1.0 with Other LLMs. 🔥 The following figure shows that our **WizardCoder-Python-34B-V1.0 attains the second position in this benchmark**, surpassing GPT4 (2023/03/15, 73.2 vs. 67.0), ChatGPT-3.5 (73.2 vs. 72.5) and Claude2 (73.2 vs. 71.2). <p align="center" width="100%"> <a ><img src="https://raw.githubusercontent.com/nlpxucan/WizardLM/main/WizardCoder/imgs/compare_sota.png" alt="WizardCoder" style="width: 96%; min-width: 300px; display: block; margin: auto;"></a> </p> ## Prompt Format ``` "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:" ``` ## Inference Demo Script We provide the inference demo code [here](https://github.com/nlpxucan/WizardLM/tree/main/demo). Note: This script supports `WizardLM/WizardCoder-Python-34B/13B/7B-V1.0`. If you want to inference with `WizardLM/WizardCoder-15B/3B/1B-V1.0`, please change the `stop_tokens = ['</s>']` to `stop_tokens = ['<|endoftext|>']` in the script. ## Citation Please cite the repo if you use the data, method or code in this repo. ``` @article{luo2023wizardcoder, title={WizardCoder: Empowering Code Large Language Models with Evol-Instruct}, author={Luo, Ziyang and Xu, Can and Zhao, Pu and Sun, Qingfeng and Geng, Xiubo and Hu, Wenxiang and Tao, Chongyang and Ma, Jing and Lin, Qingwei and Jiang, Daxin}, journal={arXiv preprint arXiv:2306.08568}, year={2023} } ```
9,365
[ [ -0.047760009765625, -0.033416748046875, -0.006984710693359375, 0.0266265869140625, 0.00334930419921875, -0.01239013671875, 0.002605438232421875, -0.03607177734375, 0.015838623046875, 0.021209716796875, -0.048858642578125, -0.0469970703125, -0.03863525390625, ...
microsoft/BiomedVLP-CXR-BERT-general
2022-07-11T14:52:52.000Z
[ "transformers", "pytorch", "bert", "fill-mask", "exbert", "en", "arxiv:2204.09817", "arxiv:2103.00020", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
microsoft
null
null
microsoft/BiomedVLP-CXR-BERT-general
22
1,965
transformers
2022-05-05T22:10:13
--- language: en tags: - exbert license: mit widget: - text: "Left pleural effusion with adjacent [MASK]." example_title: "Radiology 1" - text: "Heart size normal and lungs are [MASK]." example_title: "Radiology 2" - text: "[MASK] is a tumor suppressor gene." example_title: "Biomedical" - text: "The patient was on [MASK] for chronic atrial fibrillation" example_title: "Medication" --- # CXR-BERT-general [CXR-BERT](https://arxiv.org/abs/2204.09817) is a chest X-ray (CXR) domain-specific language model that makes use of an improved vocabulary, novel pretraining procedure, weight regularization, and text augmentations. The resulting model demonstrates improved performance on radiology natural language inference, radiology masked language model token prediction, and downstream vision-language processing tasks such as zero-shot phrase grounding and image classification. First, we pretrain **CXR-BERT-general** from a randomly initialized BERT model via Masked Language Modeling (MLM) on abstracts [PubMed](https://pubmed.ncbi.nlm.nih.gov/) and clinical notes from the publicly-available [MIMIC-III](https://physionet.org/content/mimiciii/1.4/) and [MIMIC-CXR](https://physionet.org/content/mimic-cxr/). In that regard, the general model is expected be applicable for research in clinical domains other than the chest radiology through domain specific fine-tuning. **CXR-BERT-specialized** is continually pretrained from CXR-BERT-general to further specialize in the chest X-ray domain. At the final stage, CXR-BERT is trained in a multi-modal contrastive learning framework, similar to the [CLIP](https://arxiv.org/abs/2103.00020) framework. The latent representation of [CLS] token is utilized to align text/image embeddings. ## Model variations | Model | Model identifier on HuggingFace | Vocabulary | Note | | ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | -------------- | --------------------------------------------------------- | | CXR-BERT-general | [microsoft/BiomedVLP-CXR-BERT-general](https://huggingface.co/microsoft/BiomedVLP-CXR-BERT-general) | PubMed & MIMIC | Pretrained for biomedical literature and clinical domains | | CXR-BERT-specialized (after multi-modal training) | [microsoft/BiomedVLP-CXR-BERT-specialized](https://huggingface.co/microsoft/BiomedVLP-CXR-BERT-specialized) | PubMed & MIMIC | Pretrained for chest X-ray domain | ## Citation The corresponding manuscript is accepted to be presented at the [**European Conference on Computer Vision (ECCV) 2022**](https://eccv2022.ecva.net/) ```bibtex @misc{https://doi.org/10.48550/arxiv.2204.09817, doi = {10.48550/ARXIV.2204.09817}, url = {https://arxiv.org/abs/2204.09817}, author = {Boecking, Benedikt and Usuyama, Naoto and Bannur, Shruthi and Castro, Daniel C. and Schwaighofer, Anton and Hyland, Stephanie and Wetscherek, Maria and Naumann, Tristan and Nori, Aditya and Alvarez-Valle, Javier and Poon, Hoifung and Oktay, Ozan}, title = {Making the Most of Text Semantics to Improve Biomedical Vision-Language Processing}, publisher = {arXiv}, year = {2022}, } ``` ## Model Use ### Intended Use This model is intended to be used solely for (I) future research on visual-language processing and (II) reproducibility of the experimental results reported in the reference paper. #### Primary Intended Use The primary intended use is to support AI researchers building on top of this work. CXR-BERT and its associated models should be helpful for exploring various clinical NLP & VLP research questions, especially in the radiology domain. #### Out-of-Scope Use **Any** deployed use case of the model --- commercial or otherwise --- is currently out of scope. Although we evaluated the models using a broad set of publicly-available research benchmarks, the models and evaluations are not intended for deployed use cases. Please refer to [the associated paper](https://arxiv.org/abs/2204.09817) for more details. ## Data This model builds upon existing publicly-available datasets: - [PubMed](https://pubmed.ncbi.nlm.nih.gov/) - [MIMIC-III](https://physionet.org/content/mimiciii/) - [MIMIC-CXR](https://physionet.org/content/mimic-cxr/) These datasets reflect a broad variety of sources ranging from biomedical abstracts to intensive care unit notes to chest X-ray radiology notes. The radiology notes are accompanied with their associated chest x-ray DICOM images in MIMIC-CXR dataset. ## Performance We demonstrate that this language model achieves state-of-the-art results in radiology natural language inference through its improved vocabulary and novel language pretraining objective leveraging semantics and discourse characteristics in radiology reports. A highlight of comparison to other common models, including [ClinicalBERT](https://aka.ms/clinicalbert) and [PubMedBERT](https://aka.ms/pubmedbert): | | RadNLI accuracy (MedNLI transfer) | Mask prediction accuracy | Avg. # tokens after tokenization | Vocabulary size | | ----------------------------------------------- | :-------------------------------: | :----------------------: | :------------------------------: | :-------------: | | RadNLI baseline | 53.30 | - | - | - | | ClinicalBERT | 47.67 | 39.84 | 78.98 (+38.15%) | 28,996 | | PubMedBERT | 57.71 | 35.24 | 63.55 (+11.16%) | 28,895 | | CXR-BERT (after Phase-III) | 60.46 | 77.72 | 58.07 (+1.59%) | 30,522 | | **CXR-BERT (after Phase-III + Joint Training)** | **65.21** | **81.58** | **58.07 (+1.59%)** | 30,522 | CXR-BERT also contributes to better vision-language representation learning through its improved text encoding capability. Below is the zero-shot phrase grounding performance on the **MS-CXR** dataset, which evaluates the quality of image-text latent representations. | Vision–Language Pretraining Method | Text Encoder | MS-CXR Phrase Grounding (Avg. CNR Score) | | ---------------------------------- | ------------ | :--------------------------------------: | | Baseline | ClinicalBERT | 0.769 | | Baseline | PubMedBERT | 0.773 | | ConVIRT | ClinicalBERT | 0.818 | | GLoRIA | ClinicalBERT | 0.930 | | **BioViL** | **CXR-BERT** | **1.027** | | **BioViL-L** | **CXR-BERT** | **1.142** | Additional details about performance can be found in the corresponding paper, [Making the Most of Text Semantics to Improve Biomedical Vision-Language Processing](https://arxiv.org/abs/2204.09817). ## Limitations This model was developed using English corpora, and thus can be considered English-only. ## Further information Please refer to the corresponding paper, ["Making the Most of Text Semantics to Improve Biomedical Vision-Language Processing", ECCV'22](https://arxiv.org/abs/2204.09817) for additional details on the model training and evaluation. For additional inference pipelines with CXR-BERT, please refer to the [HI-ML GitHub](https://aka.ms/biovil-code) repository. The associated source files will soon be accessible through this link.
8,183
[ [ -0.02545166015625, -0.046722412109375, 0.038482666015625, -0.0007028579711914062, -0.0290374755859375, -0.004848480224609375, -0.0113067626953125, -0.0498046875, 0.003681182861328125, 0.0274810791015625, -0.0251007080078125, -0.052886962890625, -0.05999755859375...
Helsinki-NLP/opus-mt-en-da
2023-08-16T11:29:20.000Z
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "en", "da", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
Helsinki-NLP
null
null
Helsinki-NLP/opus-mt-en-da
4
1,964
transformers
2022-03-02T23:29:04
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-da * source languages: en * target languages: da * OPUS readme: [en-da](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-da/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-da/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-da/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-da/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.en.da | 60.4 | 0.745 |
818
[ [ -0.0211181640625, -0.0380859375, 0.019775390625, 0.0284423828125, -0.031341552734375, -0.028961181640625, -0.0305328369140625, -0.006378173828125, 0.00494384765625, 0.0323486328125, -0.049591064453125, -0.043121337890625, -0.047454833984375, 0.01969909667968...
maywell/Synatra-11B-Testbench
2023-10-15T12:34:51.000Z
[ "transformers", "pytorch", "mistral", "text-generation", "ko", "license:cc-by-nc-4.0", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
maywell
null
null
maywell/Synatra-11B-Testbench
1
1,964
transformers
2023-10-15T11:27:17
--- language: - ko library_name: transformers pipeline_tag: text-generation license: cc-by-nc-4.0 --- # **Synatra-11B-Testbench** Made by StableFluffy **Contact (Do not Contact for personal things.)** Discord : is.maywell Telegram : AlzarTakkarsen ## License This model is strictly [*non-commercial*](https://creativecommons.org/licenses/by-nc/4.0/) (**cc-by-nc-4.0**) use only which takes priority over the **MISTRAL APACHE 2.0**. The "Model" is completely free (ie. base model, derivates, merges/mixes) to use for non-commercial purposes as long as the the included **cc-by-nc-4.0** license in any parent repository, and the non-commercial use statute remains, regardless of other models' licences. The licence can be changed after new model released. If you are to use this model for commercial purpose, Contact me. ## Model Details **Base Model** [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) **Trained On** A100 80GB * 4 # **Model Benchmark** X ``` > Readme format: [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) ---
1,116
[ [ -0.03289794921875, -0.052978515625, 0.004924774169921875, 0.032379150390625, -0.0531005859375, -0.02752685546875, -0.002429962158203125, -0.0595703125, 0.0253448486328125, 0.029052734375, -0.058197021484375, -0.032470703125, -0.041046142578125, -0.0029869079...
deepset/gbert-base-germandpr-ctx_encoder
2021-10-21T12:17:10.000Z
[ "transformers", "pytorch", "dpr", "exbert", "de", "dataset:deepset/germandpr", "license:mit", "endpoints_compatible", "region:us" ]
null
deepset
null
null
deepset/gbert-base-germandpr-ctx_encoder
7
1,963
transformers
2022-03-02T23:29:05
--- language: de datasets: - deepset/germandpr license: mit thumbnail: https://thumb.tildacdn.com/tild3433-3637-4830-a533-353833613061/-/resize/720x/-/format/webp/germanquad.jpg tags: - exbert --- ![bert_image](https://thumb.tildacdn.com/tild3433-3637-4830-a533-353833613061/-/resize/720x/-/format/webp/germanquad.jpg) ## Overview **Language model:** gbert-base-germandpr **Language:** German **Training data:** GermanDPR train set (~ 56MB) **Eval data:** GermanDPR test set (~ 6MB) **Infrastructure**: 4x V100 GPU **Published**: Apr 26th, 2021 ## Details - We trained a dense passage retrieval model with two gbert-base models as encoders of questions and passages. - The dataset is GermanDPR, a new, German language dataset, which we hand-annotated and published [online](https://deepset.ai/germanquad). - It comprises 9275 question/answer pairs in the training set and 1025 pairs in the test set. For each pair, there are one positive context and three hard negative contexts. - As the basis of the training data, we used our hand-annotated GermanQuAD dataset as positive samples and generated hard negative samples from the latest German Wikipedia dump (6GB of raw txt files). - The data dump was cleaned with tailored scripts, leading to 2.8 million indexed passages from German Wikipedia. See https://deepset.ai/germanquad for more details and dataset download. ## Hyperparameters ``` batch_size = 40 n_epochs = 20 num_training_steps = 4640 num_warmup_steps = 460 max_seq_len = 32 tokens for question encoder and 300 tokens for passage encoder learning_rate = 1e-6 lr_schedule = LinearWarmup embeds_dropout_prob = 0.1 num_hard_negatives = 2 ``` ## Performance During training, we monitored the in-batch average rank and the loss and evaluated different batch sizes, numbers of epochs, and number of hard negatives on a dev set split from the train set. The dev split contained 1030 question/answer pairs. Even without thorough hyperparameter tuning, we observed quite stable learning. Multiple restarts with different seeds produced quite similar results. Note that the in-batch average rank is influenced by settings for batch size and number of hard negatives. A smaller number of hard negatives makes the task easier. After fixing the hyperparameters we trained the model on the full GermanDPR train set. We further evaluated the retrieval performance of the trained model on the full German Wikipedia with the GermanDPR test set as labels. To this end, we converted the GermanDPR test set to SQuAD format. The DPR model drastically outperforms the BM25 baseline with regard to recall@k. ![performancetable](https://lh3.google.com/u/0/d/1lX6G0cp4NTx1yUWs74LI0Gcs41sYy_Fb=w2880-h1578-iv1) ## Usage ### In haystack You can load the model in [haystack](https://github.com/deepset-ai/haystack/) as a retriever for doing QA at scale: ```python retriever = DensePassageRetriever( document_store=document_store, query_embedding_model="deepset/gbert-base-germandpr-question_encoder" passage_embedding_model="deepset/gbert-base-germandpr-ctx_encoder" ) ``` ## Authors - Timo Möller: `timo.moeller [at] deepset.ai` - Julian Risch: `julian.risch [at] deepset.ai` - Malte Pietsch: `malte.pietsch [at] deepset.ai` ## About us ![deepset logo](https://workablehr.s3.amazonaws.com/uploads/account/logo/476306/logo) We bring NLP to the industry via open source! Our focus: Industry specific language models & large scale QA systems. Some of our work: - [German BERT (aka "bert-base-german-cased")](https://deepset.ai/german-bert) - [GermanQuAD and GermanDPR datasets and models (aka "gelectra-base-germanquad", "gbert-base-germandpr")](https://deepset.ai/germanquad) - [FARM](https://github.com/deepset-ai/FARM) - [Haystack](https://github.com/deepset-ai/haystack/) Get in touch: [Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Website](https://deepset.ai) By the way: [we're hiring!](http://www.deepset.ai/jobs)
4,001
[ [ -0.03619384765625, -0.06414794921875, 0.0250091552734375, 0.003551483154296875, -0.00768280029296875, -0.019927978515625, -0.03399658203125, -0.0261688232421875, -0.00737762451171875, 0.0255584716796875, -0.0284576416015625, -0.04632568359375, -0.030075073242187...
vitouphy/wav2vec2-xls-r-300m-timit-phoneme
2023-05-13T17:04:31.000Z
[ "transformers", "pytorch", "safetensors", "wav2vec2", "automatic-speech-recognition", "en", "generated_from_trainer", "doi:10.57967/hf/0125", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
vitouphy
null
null
vitouphy/wav2vec2-xls-r-300m-timit-phoneme
8
1,963
transformers
2022-05-08T06:41:55
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - pytorch - transformers - en - generated_from_trainer model-index: - name: wav2vec2-xls-r-300m-phoneme results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: DARPA TIMIT type: timit args: en metrics: - name: Test CER type: cer value: 7.996 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> ## Model This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the Timit dataset. Check [this notebook](https://www.kaggle.com/code/vitouphy/phoneme-recognition-with-wav2vec2) for training detail. ## Usage **Approach 1:** Using HuggingFace's pipeline, this will cover everything end-to-end from raw audio input to text output. ```python from transformers import pipeline # Load the model pipe = pipeline(model="vitouphy/wav2vec2-xls-r-300m-timit-phoneme") # Process raw audio output = pipe("audio_file.wav", chunk_length_s=10, stride_length_s=(4, 2)) ``` **Approach 2:** More custom way to predict phonemes. ```python from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC from datasets import load_dataset import torch import soundfile as sf # load model and processor processor = Wav2Vec2Processor.from_pretrained("vitouphy/wav2vec2-xls-r-300m-timit-phoneme") model = Wav2Vec2ForCTC.from_pretrained("vitouphy/wav2vec2-xls-r-300m-timit-phoneme") # Read and process the input audio_input, sample_rate = sf.read("audio_file.wav") inputs = processor(audio_input, sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits # Decode id into string predicted_ids = torch.argmax(logits, axis=-1) predicted_sentences = processor.batch_decode(predicted_ids) print(predicted_sentences) ``` ## Training and evaluation data We use [DARPA TIMIT dataset](https://www.kaggle.com/datasets/mfekadu/darpa-timit-acousticphonetic-continuous-speech) for this model. - We split into **80/10/10** for training, validation, and testing respectively. - That roughly corresponds to about **137/17/17** minutes. - The model obtained **7.996%** on this test set. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - training_steps: 10000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.2.dev0 - Tokenizers 0.11.0 ### Citation ``` @misc { phy22-phoneme, author = {Phy, Vitou}, title = {{Automatic Phoneme Recognition on TIMIT Dataset with Wav2Vec 2.0}}, year = 2022, note = {{If you use this model, please cite it using these metadata.}}, publisher = {Hugging Face}, version = {1.0}, doi = {10.57967/hf/0125}, url = {https://huggingface.co/vitouphy/wav2vec2-xls-r-300m-timit-phoneme} } ```
3,444
[ [ -0.0215911865234375, -0.043426513671875, 0.0087738037109375, 0.0164031982421875, -0.018829345703125, -0.026031494140625, -0.0296630859375, -0.04217529296875, 0.01346588134765625, 0.0226898193359375, -0.0469970703125, -0.048797607421875, -0.039154052734375, -...
heegyu/AULM-5.8b-v0804-hf
2023-08-07T12:41:55.000Z
[ "transformers", "pytorch", "gpt_neox", "text-generation", "polyglot-ko-5.8b", "ko", "dataset:changpt/ko-lima-vicuna", "dataset:HAERAE-HUB/KoInstruct-QA", "dataset:nlpai-lab/kullm-v2", "dataset:heegyu/korquad-chat-v1", "dataset:KETI-AIR/kowow", "dataset:NomaDamas/Ko-StrategyQA", "license:apac...
text-generation
heegyu
null
null
heegyu/AULM-5.8b-v0804-hf
2
1,963
transformers
2023-08-07T11:21:14
--- license: apache-2.0 language: - ko datasets: - changpt/ko-lima-vicuna - HAERAE-HUB/KoInstruct-QA - nlpai-lab/kullm-v2 - heegyu/korquad-chat-v1 - KETI-AIR/kowow - NomaDamas/Ko-StrategyQA tags: - polyglot-ko-5.8b --- [EleutherAI/polyglot-ko-5.8b](https://huggingface.co/EleutherAI/polyglot-ko-5.8b)를 공개 한국어 instruction 데이터로 3에폭 학습했습니다. ### 활용 데이터 모음 - [evol-instruct](https://github.com/lcw99/evolve-instruct/) - [koalpaca v1.1](https://raw.githubusercontent.com/Beomi/KoAlpaca/main/KoAlpaca_v1.1.jsonl) - [kullm v2](https://huggingface.co/datasets/nlpai-lab/kullm-v2) - dolly, gpt4all 2개만 - [changpt/ko-lima-vicuna](https://huggingface.co/datasets/changpt/ko-lima-vicuna) - [HAERAE-HUB/KoInstruct-QA](https://huggingface.co/datasets/HAERAE-HUB/KoInstruct-QA) - [heegyu/korquad-chat-v1](https://huggingface.co/datasets/heegyu/korquad-chat-v1) - [AIRC-KETI/kowow](https://github.com/AIRC-KETI/kowow) - [NomaDamas/Ko-StrategyQA](https://huggingface.co/datasets/PCEO-AI-CLUB/Ko-StrategyQA) 데이터 불러오기 & 병합 코드: https://github.com/HeegyuKim/open-korean-instructions/blob/main/parse.py ### 기본 프롬프트 뉴스 출처: https://n.news.naver.com/mnews/article/437/0000354456?sid=101 ``` ### Human: 글이 너무 길어요, 내용을 요약해주세요 ### Input: [앵커] 이렇게 더울 때 아이스크림 많이 찾게 되는데, 가격표 보면 손이 쉽게 가지 않습니다. 1년 전보다 10% 넘게 올랐습니다. 정부 요청으로 라면이나 과자 가격은 내려갔는데, 아이스크림은 꿈쩍 않고 있습니다. 이상화 기자가 취재했습니다. [기자] 찜통 더위에 아이스크림 코너를 찾았다가 부쩍 오른 가격에 놀랐다는 소비자들이 적지 않습니다. [조승건/서울 상암동 : 좀 합리적으로 올렸으면 좋겠어요. 올리자 해서 올려버리고…] [이영빈/서울 목동 : (아이스크림) 콘 종류가 가격이 많이 오른 것 같아서 편의점에서 사기가 좀 부담스러워지는…] 지난달 기준으로 아이스크림 값은 1년전에 비해 10.7% 올랐습니다. 평균 물가상승률보다 5배 가량 높습니다. 올 들어 1000원짜리 아이스크림은 1200원으로 올랐습니다. 그런데 지난달 한 대형 아이스크림 업체가 또다시 편의점 공급가격을 올리기로 했습니다. 정부가 인하 요청을 한 뒤 몇몇 라면과 빵, 과자 가격이 내려간 것과 반대 흐름입니다. [아이스크림 업체 관계자 : 원가 부담이 되기 때문에 가격 인하를 하지 않은 거고요.] 편의점들은 일단 인상분을 감수하고 소비자가격을 안 올리기로 했지만, 조만간 반영할 것으로 보입니다. 10월부턴 우윳값이 오르기 때문에 우유를 원료로 쓰는 아이스크림은 더 오를 수 있습니다. 여기에 다른 가공식품과 외식가격까지 뛰다 보니 소비자들은 2%대로 둔화된 물가를 체감하기 어렵습니다. [허은미/경기 고양시 덕은동 : 저는 아이가 있으니까 라면이나 음료수나 우유, 고기 위주로 사게 되는데 가격 차이를 딱히 못 느끼겠어요.] 실제 장기 물가 흐름을 가늠하기 위해 변동폭이 큰 농산물과 석유류를 빼고 매기는 '근원물가'는 올 들어 4.5% 올랐습니다. 외환위기가 있던 1998년 6.8%를 기록한 이후 25년 만에 가장 높은 수치인데, 가공식품과 외식비, 공공요금 등이 오른 영향입니다. (영상디자인 : 이정회) ``` 생성 결과: ```이 기사에서는 뜨거운 날씨에 아이스크림이 많이 찾는 여름에 아이스크림 값이 인상된 것에 대해 소비자들의 불만이 높으며 아이스크림 가격이 비싼 원인과 가격 결정의 문제를 분석하였습니다. 또한, 다른 가공식품과 외식 가격 상승, 인플레이션으로 인한 체감 물가 상승 등에 대해 설명하였고, 정부가 가공식품과외식 가격인하 요청 이후 몇몇 라면과 빵, 과자 가격이 인하된 것과 대조적으로 아이스크림 가격이 지속적으로 인상된 것에 대해 언급하고 있습니다.``` ### 다른 예시 프롬프트 ``` 당신은 한국어 챗봇 아우름입니다. 다음과 같은 조건을 만족하여 사용자에게 친절한 대답을 해야합니다. 1. 자연스러운 대화: 사용자와 자연스럽고 유창한 대화를 나누도록 노력해주세요. 2. 다양한 주제에 대한 지식: 다양한 분야의 지식을 포함하여 사용자의 질문에 정확하고 포괄적으로 답변할 수 있도록 학습시켜주세요. 3. 정확한 정보 제공: 사용자가 요청한 정보에 대해 정확하고 신뢰할 수 있는 답변을 제공하도록 해주세요. 4. 사용자 의도 파악 능력: 사용자의 의도와 요구를 이해하고 그에 맞게 대응할 수 있는 능력을 갖추도록 개발해주세요. 5. 적절한 감정 인식: 사용자의 감정을 파악하고 적절한 감정 표현과 대화 방식을 선택하여 상호작용할 수 있도록 지도해주세요. ### Human: 안녕! 반가워요, ### Assistant: 안녕하세요. 저는 아주대학교의 한국어 챗봇 아우름입니다. 당신이 궁금하거나, 필요로 하는 정보에 대해서 최선을 다해 답변해드릴게요.<|endoftext|> ### Human: 만약 내일 지구가 멸망한다면 어떡하지?? ### Assistant: ``` 생성 결과: ```안녕하세요! 아주대학교의 한국어 챗봇 아우름입니다. 만약 내일 지구가 멸망한다면, 여러 가지 고민이 생기겠지만, 가장 먼저 해야할 일은 최선의 선택을 해야합니다. 가장 합리적인 선택은, 지구가 멸망하기 전에 사랑하는 사람들과 함께하는 시간을 보내거나, 더 많이 웃고, 즐거운 활동을 하며, 다음 세상을 위한 준비를 하는 것이라 생각됩니다. 여러분의 시간을 더 소중하게 사용하시기 바랍니다.```
3,253
[ [ -0.04827880859375, -0.04278564453125, 0.018707275390625, 0.02105712890625, -0.027191162109375, 0.01093292236328125, 0.0250396728515625, -0.02587890625, 0.054718017578125, 0.023956298828125, -0.021942138671875, -0.027801513671875, -0.0487060546875, 0.00789642...
CiaraRowles/TemporalNet2
2023-06-17T23:21:50.000Z
[ "diffusers", "controlnet", "stable-diffusion", "license:openrail", "diffusers:ControlNetModel", "region:us" ]
null
CiaraRowles
null
null
CiaraRowles/TemporalNet2
101
1,962
diffusers
2023-06-07T21:23:21
--- license: openrail tags: - controlnet - stable-diffusion - diffusers base_model: runwayml/stable-diffusion-v1-5 --- Introducing TemporalNet2 TemporalNet was a ControlNet model designed to enhance the temporal consistency of generated outputs TemporalNet 2 is an evolution on the concept, where the generated outputs are guided by both the last frame *and* an optical flow map between the frames, improving generation consistency. This took some modification of the original controlnet code so you'll have to do some extra things. If you just want to run a gradio example or look at the modified controlnet code, that's here: https://github.com/CiaraStrawberry/TemporalNet Just drop the model from this directory into that model folder and make sure the gradio_temporalnet.py script points at the model. To use with stable diffusion, you can either use it with TemporalKit by moving to the branch here after following steps 1 and 2: https://github.com/CiaraStrawberry/TemporalKit/tree/TemporalNet , or use it just by accessing the base api through the temporalvideo.py script: 1) move your controlnet webui install to this branch: https://github.com/CiaraStrawberry/sd-webui-controlnet-TemporalNet-API 2) Add the model to your models folder in the ControlNet extension in Automatic1111's Web UI. 3) Check you have: - A folder named "Input_Images" with the input frames - A PNG file called "init.png" that is pre-stylized in your desired style - The "temporalvideo.py" script 4) Customize the "temporalvideo.py" script according to your preferences, such as the image resolution, prompt, and control net settings. 5) Launch Automatic1111's Web UI with the --api setting enabled. 6) Execute the Python script. *Please note that the "init.png" image will not significantly influence the style of the output video. Its primary purpose is to prevent a drastic change in aesthetics during the first few frames.* Also, I highly recommend you use this in conjunction with the hed model, the settings are already in the script. ToDo: Write an Extension for the web ui. Write a feature that automatically generates an "init.png" image if none is provided. ̶C̶h̶a̶n̶g̶e̶ ̶t̶h̶e̶ ̶e̶x̶t̶e̶n̶s̶i̶o̶n̶ ̶t̶o̶ ̶.̶s̶a̶f̶e̶t̶e̶n̶s̶o̶r̶s̶ ̶a̶n̶d̶ ̶i̶n̶v̶e̶s̶t̶i̶g̶a̶t̶e̶ ̶c̶o̶m̶p̶r̶e̶s̶s̶i̶o̶n̶.̶
2,300
[ [ -0.03277587890625, -0.036041259765625, 0.0110015869140625, 0.023193359375, -0.01256561279296875, -0.006816864013671875, 0.0125579833984375, -0.0294952392578125, 0.0182647705078125, 0.020233154296875, -0.051727294921875, -0.00525665283203125, -0.056488037109375, ...
ramy21/braintumormodel5
2023-10-07T12:42:42.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
ramy21
null
null
ramy21/braintumormodel5
1
1,961
diffusers
2023-10-07T12:37:20
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### braintumormodel5 Dreambooth model trained by ramy21 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
504
[ [ -0.0262603759765625, -0.048309326171875, 0.04901123046875, 0.0357666015625, -0.012969970703125, 0.0146026611328125, 0.0191497802734375, -0.0178070068359375, 0.035858154296875, 0.015777587890625, -0.020721435546875, -0.0253143310546875, -0.045379638671875, -0...
ItsJayQz/Firewatch_Diffusion
2023-01-28T01:05:08.000Z
[ "diffusers", "safetensors", "stable-diffusion", "text-to-image", "en", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
ItsJayQz
null
null
ItsJayQz/Firewatch_Diffusion
21
1,958
diffusers
2022-12-08T03:41:05
--- language: - en license: creativeml-openrail-m tags: - stable-diffusion - text-to-image - diffusers inference: true --- ### Firewatch Diffusion This model was trained on in-game footages, and some game posters by Olly Moss. The model can make good looking nature sceneries similar to the game art style, with some objects. It can not make portraits at all, since throughout the game you don't actually meet any characters. To reference the art style, use the token: fwatch style ### Gradio We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Firewatch_Diffusion: [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/ItsJayQz/Firewatch_Diffusion) Here are some samples. **Sceneries** ![fwatch.png](https://s3.amazonaws.com/moonup/production/uploads/1670473030496-635eafb49f24f6db0a1eafd1.png) ![fwatch2.png](https://s3.amazonaws.com/moonup/production/uploads/1670473031879-635eafb49f24f6db0a1eafd1.png) Prompt used: Beautiful *mountain/lake/beach/etc* in fwatch style Guidance: 7 Steps: 65 using DDIM I'm not a prompt wizard so you can definitely get better results with some tuning. **Objects** ![fwatch3.png](https://s3.amazonaws.com/moonup/production/uploads/1670473031927-635eafb49f24f6db0a1eafd1.png) **Disclaimers** - I'm in no way affliated with Campo Santo, or any entities relating to the ownership of the game artworks. - The phrase Firewatch is simply a reference for accessibility. - This was created entirely for research, and entertainment purpose. - I did not plan, or is planning on turning this model into a commercial product, or use for commercial purposes. - I do not condone the usage of the model for making counterfeit products that might infringe on Campo Santo/Olly Moss's copyrights/trademarks. **License** - This model is under Creative OpenRAIL-M. - This means the model can be used royalty-free, and flexible with the model usage, such as redistribution of the model, or of any derivatives of the model. - However, there are restrictions on the openess of the license. More info into the restrictions can be found [here](https://huggingface.co/spaces/CompVis/stable-diffusion-license). **Responsibilities** - By using/downloading the model, you are responsible for: - All outputs/usage of the model. - Understanding the Disclaimers. - Upholding the terms of the license. Thanks for checking out the model!
2,611
[ [ -0.0211639404296875, -0.05377197265625, 0.0452880859375, 0.032440185546875, -0.0207977294921875, -0.0119171142578125, 0.032745361328125, -0.044586181640625, 0.015106201171875, 0.0489501953125, -0.06378173828125, -0.0277252197265625, -0.0294952392578125, -0.0...
Yntec/MeinaAlter
2023-10-25T13:00:15.000Z
[ "diffusers", "Anime", "Landscapes", "Female", "Meina", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us", "has_space" ]
text-to-image
Yntec
null
null
Yntec/MeinaAlter
1
1,958
diffusers
2023-10-25T11:36:37
--- license: creativeml-openrail-m library_name: diffusers pipeline_tag: text-to-image tags: - Anime - Landscapes - Female - Meina - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image --- # Meina Alter Original model page: https://civitai.com/models/20945?modelVersionId=24933 Sample and prompt: ![Sample](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/-kJYweRUED7V5ubK44mk2.png) summer vibrancy, Portrait of Cartoon pretty CUTE girl waiting for rain, anime fantasy illustration by tomoyuki yamasaki, DETAILED CHIBI EYES, kyoto studio, madhouse, trending on ufotable. artstation visible brushstrokes art By ROSSDRAWS and Ellen paige and charles sillem lidderdale and hayao miyazaki
741
[ [ -0.041839599609375, -0.04180908203125, 0.0272369384765625, 0.0406494140625, -0.0249481201171875, -0.0250396728515625, 0.0255126953125, -0.055450439453125, 0.05743408203125, 0.053375244140625, -0.07427978515625, -0.0287017822265625, -0.042816162109375, 0.0007...
heegyu/42dot_LLM-PLM-1.3B-mt
2023-10-19T04:40:58.000Z
[ "transformers", "pytorch", "llama", "text-generation", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
heegyu
null
null
heegyu/42dot_LLM-PLM-1.3B-mt
0
1,952
transformers
2023-10-03T00:13:19
# WIP - 아직 작업중입니다.. 모델에 문제가 좀 있음 ㅠ original model: [42dot/42dot_LLM-PLM-1.3B](https://huggingface.co/42dot/42dot_LLM-PLM-1.3B) ## 사용 예시 ``` from transformers import AutoModelForCausalLM, AutoTokenizer import torch model_id = "heegyu/42dot_LLM-PLM-1.3B-mt" model = AutoModelForCausalLM.from_pretrained(model_id).eval().half() tokenizer = AutoTokenizer.from_pretrained(model_id) if torch.cuda.is_available(): device = "cuda:0" model.to(device) else: device = "cpu" @torch.no_grad() def generate_text(prompt): input_ids = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=False).to(device) output_ids = model.generate(input_ids, min_new_tokens=4, max_length=1024, early_stopping=True) output_ids = output_ids.cpu()[0][len(input_ids[0]):] print(tokenizer.decode(output_ids)) bos, eos = tokenizer.bos_token, tokenizer.eos_token # 한 -> 영 text = "삼성전자가 갤럭시 스마트폰·태블릿 전용 ‘클라우드 게임 플랫폼’을 이르면 이달 공개한다. 전 세계 10억 명의 갤럭시 사용자가 콘솔(게임기)을 구매하거나 게임 앱을 내려받지 않아도 스마트폰을 통해 실시간으로 유명 게임을 즐길 수 있게 되는 것이다. 기기 판매에 의존하지 않고 안정적인 서비스 수익을 올리려는 삼성전자의 ‘신사업 승부수’란 평가가 나온다." generate_text(f"{bos} {text} {eos} ") # ㈜Samsung Electronics will release the Cloud Game Platform for Galaxy smartphones and tablets in early this month, allowing users of 1 billion people around the world to enjoy famous games on their smartphones in real time without buying consoles or downloading game apps. It is said to be a 'business move' by Samsung Electronics, which is trying to earn stable service revenue without relying on sales.<|endoftext|> # 영 -> 한, 마지막 문장 짤렸음 text = """Samsung Electronics will unveil a "cloud game platform" exclusively for Galaxy smartphones and tablets as early as this month. One billion Galaxy users around the world will be able to enjoy famous games in real time through smartphones without having to purchase consoles or download game apps. Analysts say that Samsung Electronics is a "new business winning move" to earn stable service profits without relying on device sales.""" generate_text(f"{bos} {text} {eos} ") # NC는 이달 중 갤럭시 스마트폰과 태블릿 전용 '클라우드 게임 플랫폼'을 독점 공개할 예정인데, 전 세계 1억명의 갤럭시 사용자들은 콘솔이나 게임 앱 다운로드 없이 스마트폰을 통해 유명 게임을 실시간으로 즐길 수 있게 됐다.<|endoftext|> # 영 -> 한, 앞에 번역한 단어를 지정할 수 있다. text = """Samsung Electronics will unveil a "cloud game platform" exclusively for Galaxy smartphones and tablets as early as this month.""" generate_text(f"{bos} Samsung Electronics {eos} 삼성전자 {eos} {bos} {text} {eos} ") # N가전 삼성전자가 갤럭시 스마트폰과 태블릿 전용 '클라우드 게임 플랫폼'을 이달 중으로 공개한다.<|endoftext|> ``` ## 모델 평가 ``` python main.py \ --model hf-causal \ --model_args pretrained=heegyu/42dot_LLM-PLM-1.3B-mt \ --tasks kobest_hellaswag,kobest_copa,kobest_boolq,kobest_sentineg \ --device cuda:0 ``` - boolq, copa, hellaswag은 원본 모델보다 감소했다. - sentineg는 크게 향상 hf-causal (pretrained=heegyu/42dot_LLM-PLM-1.3B-mt), limit: None, provide_description: False, num_fewshot: 0, batch_size: None | Task |Version| Metric |Value | |Stderr| |----------------|------:|--------|-----:|---|-----:| |kobest_boolq | 0|acc |0.5021|± |0.0133| | | |macro_f1|0.3343|± |0.0059| |kobest_copa | 0|acc |0.6640|± |0.0149| | | |macro_f1|0.6633|± |0.0149| |kobest_hellaswag| 0|acc |0.4020|± |0.0219| | | |acc_norm|0.5220|± |0.0224| | | |macro_f1|0.3974|± |0.0218| |kobest_sentineg | 0|acc |0.8010|± |0.0201| | | |macro_f1|0.8003|± |0.0201|
3,493
[ [ -0.031585693359375, -0.056365966796875, 0.0245361328125, 0.0347900390625, -0.0401611328125, -0.00466156005859375, 0.0031604766845703125, -0.0274200439453125, 0.0360107421875, 0.03240966796875, -0.069091796875, -0.029937744140625, -0.0367431640625, -0.0064353...
moussaKam/AraBART
2022-05-05T13:17:29.000Z
[ "transformers", "pytorch", "mbart", "feature-extraction", "summarization", "bart", "fill-mask", "ar", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
moussaKam
null
null
moussaKam/AraBART
10
1,950
transformers
2022-03-09T10:05:16
--- tags: - summarization - bart language: - ar widget: - text: بيروت هي عاصمة <mask>. license: apache-2.0 pipeline_tag: "fill-mask" --- AraBART is the first Arabic model in which the encoder and the decoder are pretrained end-to-end, based on BART. AraBART follows the architecture of BART-Base which has 6 encoder and 6 decoder layers and 768 hidden dimensions. In total AraBART has 139M parameters. AraBART achieves the best performance on multiple abstractive summarization datasets, outperforming strong baselines including a pretrained Arabic BERT-based models and multilingual mBART and mT5 models.
613
[ [ -0.06243896484375, -0.03802490234375, 0.0202178955078125, 0.007015228271484375, -0.0263671875, 0.03521728515625, 0.01068878173828125, -0.0350341796875, 0.01837158203125, 0.0199432373046875, -0.0132293701171875, -0.056915283203125, -0.07061767578125, 0.007583...
Muennighoff/SGPT-5.8B-weightedmean-msmarco-specb-bitfit
2023-03-27T22:26:36.000Z
[ "sentence-transformers", "pytorch", "gptj", "feature-extraction", "sentence-similarity", "mteb", "arxiv:2202.08904", "model-index", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
Muennighoff
null
null
Muennighoff/SGPT-5.8B-weightedmean-msmarco-specb-bitfit
19
1,948
sentence-transformers
2022-03-02T23:29:04
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb model-index: - name: SGPT-5.8B-weightedmean-msmarco-specb-bitfit results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: 2d8a100785abf0ae21420d2a55b0c56e3e1ea996 metrics: - type: accuracy value: 69.22388059701493 - type: ap value: 32.04724673950256 - type: f1 value: 63.25719825770428 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: 80714f8dcf8cefc218ef4f8c5a966dd83f75a0e1 metrics: - type: accuracy value: 71.26109999999998 - type: ap value: 66.16336378255403 - type: f1 value: 70.89719145825303 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: c379a6705fec24a2493fa68e011692605f44e119 metrics: - type: accuracy value: 39.19199999999999 - type: f1 value: 38.580766731113826 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: 5b3e3697907184a9b77a3c99ee9ea1a9cbb1e4e3 metrics: - type: map_at_1 value: 27.311999999999998 - type: map_at_10 value: 42.620000000000005 - type: map_at_100 value: 43.707 - type: map_at_1000 value: 43.714999999999996 - type: map_at_3 value: 37.624 - type: map_at_5 value: 40.498 - type: mrr_at_1 value: 27.667 - type: mrr_at_10 value: 42.737 - type: mrr_at_100 value: 43.823 - type: mrr_at_1000 value: 43.830999999999996 - type: mrr_at_3 value: 37.743 - type: mrr_at_5 value: 40.616 - type: ndcg_at_1 value: 27.311999999999998 - type: ndcg_at_10 value: 51.37500000000001 - type: ndcg_at_100 value: 55.778000000000006 - type: ndcg_at_1000 value: 55.96600000000001 - type: ndcg_at_3 value: 41.087 - type: ndcg_at_5 value: 46.269 - type: precision_at_1 value: 27.311999999999998 - type: precision_at_10 value: 7.945 - type: precision_at_100 value: 0.9820000000000001 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 17.046 - type: precision_at_5 value: 12.745000000000001 - type: recall_at_1 value: 27.311999999999998 - type: recall_at_10 value: 79.445 - type: recall_at_100 value: 98.151 - type: recall_at_1000 value: 99.57300000000001 - type: recall_at_3 value: 51.13799999999999 - type: recall_at_5 value: 63.727000000000004 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: 0bbdb47bcbe3a90093699aefeed338a0f28a7ee8 metrics: - type: v_measure value: 45.59037428592033 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: b73bd54100e5abfa6e3a23dcafb46fe4d2438dc3 metrics: - type: v_measure value: 38.86371701986363 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 4d853f94cd57d85ec13805aeeac3ae3e5eb4c49c metrics: - type: map value: 61.625568691427766 - type: mrr value: 75.83256386580486 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: 9ee918f184421b6bd48b78f6c714d86546106103 metrics: - type: cos_sim_pearson value: 89.96074355094802 - type: cos_sim_spearman value: 86.2501580394454 - type: euclidean_pearson value: 82.18427440380462 - type: euclidean_spearman value: 80.14760935017947 - type: manhattan_pearson value: 82.24621578156392 - type: manhattan_spearman value: 80.00363016590163 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 44fa15921b4c889113cc5df03dd4901b49161ab7 metrics: - type: accuracy value: 84.49350649350649 - type: f1 value: 84.4249343233736 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 11d0121201d1f1f280e8cc8f3d98fb9c4d9f9c55 metrics: - type: v_measure value: 36.551459722989385 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: c0fab014e1bcb8d3a5e31b2088972a1e01547dc1 metrics: - type: v_measure value: 33.69901851846774 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 30.499 - type: map_at_10 value: 41.208 - type: map_at_100 value: 42.638 - type: map_at_1000 value: 42.754 - type: map_at_3 value: 37.506 - type: map_at_5 value: 39.422000000000004 - type: mrr_at_1 value: 37.339 - type: mrr_at_10 value: 47.051 - type: mrr_at_100 value: 47.745 - type: mrr_at_1000 value: 47.786 - type: mrr_at_3 value: 44.086999999999996 - type: mrr_at_5 value: 45.711 - type: ndcg_at_1 value: 37.339 - type: ndcg_at_10 value: 47.666 - type: ndcg_at_100 value: 52.994 - type: ndcg_at_1000 value: 54.928999999999995 - type: ndcg_at_3 value: 41.982 - type: ndcg_at_5 value: 44.42 - type: precision_at_1 value: 37.339 - type: precision_at_10 value: 9.127 - type: precision_at_100 value: 1.4749999999999999 - type: precision_at_1000 value: 0.194 - type: precision_at_3 value: 20.076 - type: precision_at_5 value: 14.449000000000002 - type: recall_at_1 value: 30.499 - type: recall_at_10 value: 60.328 - type: recall_at_100 value: 82.57900000000001 - type: recall_at_1000 value: 95.074 - type: recall_at_3 value: 44.17 - type: recall_at_5 value: 50.94 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 30.613 - type: map_at_10 value: 40.781 - type: map_at_100 value: 42.018 - type: map_at_1000 value: 42.132999999999996 - type: map_at_3 value: 37.816 - type: map_at_5 value: 39.389 - type: mrr_at_1 value: 38.408 - type: mrr_at_10 value: 46.631 - type: mrr_at_100 value: 47.332 - type: mrr_at_1000 value: 47.368 - type: mrr_at_3 value: 44.384 - type: mrr_at_5 value: 45.661 - type: ndcg_at_1 value: 38.408 - type: ndcg_at_10 value: 46.379999999999995 - type: ndcg_at_100 value: 50.81 - type: ndcg_at_1000 value: 52.663000000000004 - type: ndcg_at_3 value: 42.18 - type: ndcg_at_5 value: 43.974000000000004 - type: precision_at_1 value: 38.408 - type: precision_at_10 value: 8.656 - type: precision_at_100 value: 1.3860000000000001 - type: precision_at_1000 value: 0.184 - type: precision_at_3 value: 20.276 - type: precision_at_5 value: 14.241999999999999 - type: recall_at_1 value: 30.613 - type: recall_at_10 value: 56.44 - type: recall_at_100 value: 75.044 - type: recall_at_1000 value: 86.426 - type: recall_at_3 value: 43.766 - type: recall_at_5 value: 48.998000000000005 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGamingRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 37.370999999999995 - type: map_at_10 value: 49.718 - type: map_at_100 value: 50.737 - type: map_at_1000 value: 50.79 - type: map_at_3 value: 46.231 - type: map_at_5 value: 48.329 - type: mrr_at_1 value: 42.884 - type: mrr_at_10 value: 53.176 - type: mrr_at_100 value: 53.81700000000001 - type: mrr_at_1000 value: 53.845 - type: mrr_at_3 value: 50.199000000000005 - type: mrr_at_5 value: 52.129999999999995 - type: ndcg_at_1 value: 42.884 - type: ndcg_at_10 value: 55.826 - type: ndcg_at_100 value: 59.93000000000001 - type: ndcg_at_1000 value: 61.013 - type: ndcg_at_3 value: 49.764 - type: ndcg_at_5 value: 53.025999999999996 - type: precision_at_1 value: 42.884 - type: precision_at_10 value: 9.046999999999999 - type: precision_at_100 value: 1.212 - type: precision_at_1000 value: 0.135 - type: precision_at_3 value: 22.131999999999998 - type: precision_at_5 value: 15.524 - type: recall_at_1 value: 37.370999999999995 - type: recall_at_10 value: 70.482 - type: recall_at_100 value: 88.425 - type: recall_at_1000 value: 96.03399999999999 - type: recall_at_3 value: 54.43 - type: recall_at_5 value: 62.327999999999996 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGisRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 22.875999999999998 - type: map_at_10 value: 31.715 - type: map_at_100 value: 32.847 - type: map_at_1000 value: 32.922000000000004 - type: map_at_3 value: 29.049999999999997 - type: map_at_5 value: 30.396 - type: mrr_at_1 value: 24.52 - type: mrr_at_10 value: 33.497 - type: mrr_at_100 value: 34.455000000000005 - type: mrr_at_1000 value: 34.510000000000005 - type: mrr_at_3 value: 30.791 - type: mrr_at_5 value: 32.175 - type: ndcg_at_1 value: 24.52 - type: ndcg_at_10 value: 36.95 - type: ndcg_at_100 value: 42.238 - type: ndcg_at_1000 value: 44.147999999999996 - type: ndcg_at_3 value: 31.435000000000002 - type: ndcg_at_5 value: 33.839000000000006 - type: precision_at_1 value: 24.52 - type: precision_at_10 value: 5.9319999999999995 - type: precision_at_100 value: 0.901 - type: precision_at_1000 value: 0.11 - type: precision_at_3 value: 13.446 - type: precision_at_5 value: 9.469 - type: recall_at_1 value: 22.875999999999998 - type: recall_at_10 value: 51.38 - type: recall_at_100 value: 75.31099999999999 - type: recall_at_1000 value: 89.718 - type: recall_at_3 value: 36.26 - type: recall_at_5 value: 42.248999999999995 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 14.984 - type: map_at_10 value: 23.457 - type: map_at_100 value: 24.723 - type: map_at_1000 value: 24.846 - type: map_at_3 value: 20.873 - type: map_at_5 value: 22.357 - type: mrr_at_1 value: 18.159 - type: mrr_at_10 value: 27.431 - type: mrr_at_100 value: 28.449 - type: mrr_at_1000 value: 28.52 - type: mrr_at_3 value: 24.979000000000003 - type: mrr_at_5 value: 26.447 - type: ndcg_at_1 value: 18.159 - type: ndcg_at_10 value: 28.627999999999997 - type: ndcg_at_100 value: 34.741 - type: ndcg_at_1000 value: 37.516 - type: ndcg_at_3 value: 23.902 - type: ndcg_at_5 value: 26.294 - type: precision_at_1 value: 18.159 - type: precision_at_10 value: 5.485 - type: precision_at_100 value: 0.985 - type: precision_at_1000 value: 0.136 - type: precision_at_3 value: 11.774 - type: precision_at_5 value: 8.731 - type: recall_at_1 value: 14.984 - type: recall_at_10 value: 40.198 - type: recall_at_100 value: 67.11500000000001 - type: recall_at_1000 value: 86.497 - type: recall_at_3 value: 27.639000000000003 - type: recall_at_5 value: 33.595000000000006 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 29.067 - type: map_at_10 value: 39.457 - type: map_at_100 value: 40.83 - type: map_at_1000 value: 40.94 - type: map_at_3 value: 35.995 - type: map_at_5 value: 38.159 - type: mrr_at_1 value: 34.937000000000005 - type: mrr_at_10 value: 44.755 - type: mrr_at_100 value: 45.549 - type: mrr_at_1000 value: 45.589 - type: mrr_at_3 value: 41.947 - type: mrr_at_5 value: 43.733 - type: ndcg_at_1 value: 34.937000000000005 - type: ndcg_at_10 value: 45.573 - type: ndcg_at_100 value: 51.266999999999996 - type: ndcg_at_1000 value: 53.184 - type: ndcg_at_3 value: 39.961999999999996 - type: ndcg_at_5 value: 43.02 - type: precision_at_1 value: 34.937000000000005 - type: precision_at_10 value: 8.296000000000001 - type: precision_at_100 value: 1.32 - type: precision_at_1000 value: 0.167 - type: precision_at_3 value: 18.8 - type: precision_at_5 value: 13.763 - type: recall_at_1 value: 29.067 - type: recall_at_10 value: 58.298 - type: recall_at_100 value: 82.25099999999999 - type: recall_at_1000 value: 94.476 - type: recall_at_3 value: 42.984 - type: recall_at_5 value: 50.658 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 25.985999999999997 - type: map_at_10 value: 35.746 - type: map_at_100 value: 37.067 - type: map_at_1000 value: 37.191 - type: map_at_3 value: 32.599000000000004 - type: map_at_5 value: 34.239000000000004 - type: mrr_at_1 value: 31.735000000000003 - type: mrr_at_10 value: 40.515 - type: mrr_at_100 value: 41.459 - type: mrr_at_1000 value: 41.516 - type: mrr_at_3 value: 37.938 - type: mrr_at_5 value: 39.25 - type: ndcg_at_1 value: 31.735000000000003 - type: ndcg_at_10 value: 41.484 - type: ndcg_at_100 value: 47.047 - type: ndcg_at_1000 value: 49.427 - type: ndcg_at_3 value: 36.254999999999995 - type: ndcg_at_5 value: 38.375 - type: precision_at_1 value: 31.735000000000003 - type: precision_at_10 value: 7.66 - type: precision_at_100 value: 1.234 - type: precision_at_1000 value: 0.16 - type: precision_at_3 value: 17.427999999999997 - type: precision_at_5 value: 12.328999999999999 - type: recall_at_1 value: 25.985999999999997 - type: recall_at_10 value: 53.761 - type: recall_at_100 value: 77.149 - type: recall_at_1000 value: 93.342 - type: recall_at_3 value: 39.068000000000005 - type: recall_at_5 value: 44.693 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 24.949749999999998 - type: map_at_10 value: 34.04991666666667 - type: map_at_100 value: 35.26825 - type: map_at_1000 value: 35.38316666666667 - type: map_at_3 value: 31.181333333333335 - type: map_at_5 value: 32.77391666666667 - type: mrr_at_1 value: 29.402833333333334 - type: mrr_at_10 value: 38.01633333333333 - type: mrr_at_100 value: 38.88033333333334 - type: mrr_at_1000 value: 38.938500000000005 - type: mrr_at_3 value: 35.5175 - type: mrr_at_5 value: 36.93808333333333 - type: ndcg_at_1 value: 29.402833333333334 - type: ndcg_at_10 value: 39.403166666666664 - type: ndcg_at_100 value: 44.66408333333333 - type: ndcg_at_1000 value: 46.96283333333333 - type: ndcg_at_3 value: 34.46633333333334 - type: ndcg_at_5 value: 36.78441666666667 - type: precision_at_1 value: 29.402833333333334 - type: precision_at_10 value: 6.965833333333333 - type: precision_at_100 value: 1.1330833333333334 - type: precision_at_1000 value: 0.15158333333333335 - type: precision_at_3 value: 15.886666666666665 - type: precision_at_5 value: 11.360416666666667 - type: recall_at_1 value: 24.949749999999998 - type: recall_at_10 value: 51.29325 - type: recall_at_100 value: 74.3695 - type: recall_at_1000 value: 90.31299999999999 - type: recall_at_3 value: 37.580083333333334 - type: recall_at_5 value: 43.529666666666664 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackStatsRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 22.081999999999997 - type: map_at_10 value: 29.215999999999998 - type: map_at_100 value: 30.163 - type: map_at_1000 value: 30.269000000000002 - type: map_at_3 value: 26.942 - type: map_at_5 value: 28.236 - type: mrr_at_1 value: 24.847 - type: mrr_at_10 value: 31.918999999999997 - type: mrr_at_100 value: 32.817 - type: mrr_at_1000 value: 32.897 - type: mrr_at_3 value: 29.831000000000003 - type: mrr_at_5 value: 31.019999999999996 - type: ndcg_at_1 value: 24.847 - type: ndcg_at_10 value: 33.4 - type: ndcg_at_100 value: 38.354 - type: ndcg_at_1000 value: 41.045 - type: ndcg_at_3 value: 29.236 - type: ndcg_at_5 value: 31.258000000000003 - type: precision_at_1 value: 24.847 - type: precision_at_10 value: 5.353 - type: precision_at_100 value: 0.853 - type: precision_at_1000 value: 0.116 - type: precision_at_3 value: 12.679000000000002 - type: precision_at_5 value: 8.988 - type: recall_at_1 value: 22.081999999999997 - type: recall_at_10 value: 43.505 - type: recall_at_100 value: 66.45400000000001 - type: recall_at_1000 value: 86.378 - type: recall_at_3 value: 32.163000000000004 - type: recall_at_5 value: 37.059999999999995 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackTexRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 15.540000000000001 - type: map_at_10 value: 22.362000000000002 - type: map_at_100 value: 23.435 - type: map_at_1000 value: 23.564 - type: map_at_3 value: 20.143 - type: map_at_5 value: 21.324 - type: mrr_at_1 value: 18.892 - type: mrr_at_10 value: 25.942999999999998 - type: mrr_at_100 value: 26.883000000000003 - type: mrr_at_1000 value: 26.968999999999998 - type: mrr_at_3 value: 23.727 - type: mrr_at_5 value: 24.923000000000002 - type: ndcg_at_1 value: 18.892 - type: ndcg_at_10 value: 26.811 - type: ndcg_at_100 value: 32.066 - type: ndcg_at_1000 value: 35.166 - type: ndcg_at_3 value: 22.706 - type: ndcg_at_5 value: 24.508 - type: precision_at_1 value: 18.892 - type: precision_at_10 value: 4.942 - type: precision_at_100 value: 0.878 - type: precision_at_1000 value: 0.131 - type: precision_at_3 value: 10.748000000000001 - type: precision_at_5 value: 7.784000000000001 - type: recall_at_1 value: 15.540000000000001 - type: recall_at_10 value: 36.742999999999995 - type: recall_at_100 value: 60.525 - type: recall_at_1000 value: 82.57600000000001 - type: recall_at_3 value: 25.252000000000002 - type: recall_at_5 value: 29.872 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackUnixRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 24.453 - type: map_at_10 value: 33.363 - type: map_at_100 value: 34.579 - type: map_at_1000 value: 34.686 - type: map_at_3 value: 30.583 - type: map_at_5 value: 32.118 - type: mrr_at_1 value: 28.918 - type: mrr_at_10 value: 37.675 - type: mrr_at_100 value: 38.567 - type: mrr_at_1000 value: 38.632 - type: mrr_at_3 value: 35.260999999999996 - type: mrr_at_5 value: 36.576 - type: ndcg_at_1 value: 28.918 - type: ndcg_at_10 value: 38.736 - type: ndcg_at_100 value: 44.261 - type: ndcg_at_1000 value: 46.72 - type: ndcg_at_3 value: 33.81 - type: ndcg_at_5 value: 36.009 - type: precision_at_1 value: 28.918 - type: precision_at_10 value: 6.586 - type: precision_at_100 value: 1.047 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 15.360999999999999 - type: precision_at_5 value: 10.857999999999999 - type: recall_at_1 value: 24.453 - type: recall_at_10 value: 50.885999999999996 - type: recall_at_100 value: 75.03 - type: recall_at_1000 value: 92.123 - type: recall_at_3 value: 37.138 - type: recall_at_5 value: 42.864999999999995 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 24.57 - type: map_at_10 value: 33.672000000000004 - type: map_at_100 value: 35.244 - type: map_at_1000 value: 35.467 - type: map_at_3 value: 30.712 - type: map_at_5 value: 32.383 - type: mrr_at_1 value: 29.644 - type: mrr_at_10 value: 38.344 - type: mrr_at_100 value: 39.219 - type: mrr_at_1000 value: 39.282000000000004 - type: mrr_at_3 value: 35.771 - type: mrr_at_5 value: 37.273 - type: ndcg_at_1 value: 29.644 - type: ndcg_at_10 value: 39.567 - type: ndcg_at_100 value: 45.097 - type: ndcg_at_1000 value: 47.923 - type: ndcg_at_3 value: 34.768 - type: ndcg_at_5 value: 37.122 - type: precision_at_1 value: 29.644 - type: precision_at_10 value: 7.5889999999999995 - type: precision_at_100 value: 1.478 - type: precision_at_1000 value: 0.23500000000000001 - type: precision_at_3 value: 16.337 - type: precision_at_5 value: 12.055 - type: recall_at_1 value: 24.57 - type: recall_at_10 value: 51.00900000000001 - type: recall_at_100 value: 75.423 - type: recall_at_1000 value: 93.671 - type: recall_at_3 value: 36.925999999999995 - type: recall_at_5 value: 43.245 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: 2b9f5791698b5be7bc5e10535c8690f20043c3db metrics: - type: map_at_1 value: 21.356 - type: map_at_10 value: 27.904 - type: map_at_100 value: 28.938000000000002 - type: map_at_1000 value: 29.036 - type: map_at_3 value: 25.726 - type: map_at_5 value: 26.935 - type: mrr_at_1 value: 22.551 - type: mrr_at_10 value: 29.259 - type: mrr_at_100 value: 30.272 - type: mrr_at_1000 value: 30.348000000000003 - type: mrr_at_3 value: 27.295 - type: mrr_at_5 value: 28.358 - type: ndcg_at_1 value: 22.551 - type: ndcg_at_10 value: 31.817 - type: ndcg_at_100 value: 37.164 - type: ndcg_at_1000 value: 39.82 - type: ndcg_at_3 value: 27.595999999999997 - type: ndcg_at_5 value: 29.568 - type: precision_at_1 value: 22.551 - type: precision_at_10 value: 4.917 - type: precision_at_100 value: 0.828 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 11.583 - type: precision_at_5 value: 8.133 - type: recall_at_1 value: 21.356 - type: recall_at_10 value: 42.489 - type: recall_at_100 value: 67.128 - type: recall_at_1000 value: 87.441 - type: recall_at_3 value: 31.165 - type: recall_at_5 value: 35.853 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: 392b78eb68c07badcd7c2cd8f39af108375dfcce metrics: - type: map_at_1 value: 12.306000000000001 - type: map_at_10 value: 21.523 - type: map_at_100 value: 23.358 - type: map_at_1000 value: 23.541 - type: map_at_3 value: 17.809 - type: map_at_5 value: 19.631 - type: mrr_at_1 value: 27.948 - type: mrr_at_10 value: 40.355000000000004 - type: mrr_at_100 value: 41.166000000000004 - type: mrr_at_1000 value: 41.203 - type: mrr_at_3 value: 36.819 - type: mrr_at_5 value: 38.958999999999996 - type: ndcg_at_1 value: 27.948 - type: ndcg_at_10 value: 30.462 - type: ndcg_at_100 value: 37.473 - type: ndcg_at_1000 value: 40.717999999999996 - type: ndcg_at_3 value: 24.646 - type: ndcg_at_5 value: 26.642 - type: precision_at_1 value: 27.948 - type: precision_at_10 value: 9.648 - type: precision_at_100 value: 1.7239999999999998 - type: precision_at_1000 value: 0.232 - type: precision_at_3 value: 18.48 - type: precision_at_5 value: 14.293 - type: recall_at_1 value: 12.306000000000001 - type: recall_at_10 value: 37.181 - type: recall_at_100 value: 61.148 - type: recall_at_1000 value: 79.401 - type: recall_at_3 value: 22.883 - type: recall_at_5 value: 28.59 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: f097057d03ed98220bc7309ddb10b71a54d667d6 metrics: - type: map_at_1 value: 9.357 - type: map_at_10 value: 18.849 - type: map_at_100 value: 25.369000000000003 - type: map_at_1000 value: 26.950000000000003 - type: map_at_3 value: 13.625000000000002 - type: map_at_5 value: 15.956999999999999 - type: mrr_at_1 value: 67.75 - type: mrr_at_10 value: 74.734 - type: mrr_at_100 value: 75.1 - type: mrr_at_1000 value: 75.10900000000001 - type: mrr_at_3 value: 73.542 - type: mrr_at_5 value: 74.167 - type: ndcg_at_1 value: 55.375 - type: ndcg_at_10 value: 39.873999999999995 - type: ndcg_at_100 value: 43.098 - type: ndcg_at_1000 value: 50.69200000000001 - type: ndcg_at_3 value: 44.856 - type: ndcg_at_5 value: 42.138999999999996 - type: precision_at_1 value: 67.75 - type: precision_at_10 value: 31.1 - type: precision_at_100 value: 9.303 - type: precision_at_1000 value: 2.0060000000000002 - type: precision_at_3 value: 48.25 - type: precision_at_5 value: 40.949999999999996 - type: recall_at_1 value: 9.357 - type: recall_at_10 value: 23.832 - type: recall_at_100 value: 47.906 - type: recall_at_1000 value: 71.309 - type: recall_at_3 value: 14.512 - type: recall_at_5 value: 18.3 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 829147f8f75a25f005913200eb5ed41fae320aa1 metrics: - type: accuracy value: 49.655 - type: f1 value: 45.51976190938951 - task: type: Retrieval dataset: type: fever name: MTEB FEVER config: default split: test revision: 1429cf27e393599b8b359b9b72c666f96b2525f9 metrics: - type: map_at_1 value: 62.739999999999995 - type: map_at_10 value: 73.07000000000001 - type: map_at_100 value: 73.398 - type: map_at_1000 value: 73.41 - type: map_at_3 value: 71.33800000000001 - type: map_at_5 value: 72.423 - type: mrr_at_1 value: 67.777 - type: mrr_at_10 value: 77.873 - type: mrr_at_100 value: 78.091 - type: mrr_at_1000 value: 78.094 - type: mrr_at_3 value: 76.375 - type: mrr_at_5 value: 77.316 - type: ndcg_at_1 value: 67.777 - type: ndcg_at_10 value: 78.24 - type: ndcg_at_100 value: 79.557 - type: ndcg_at_1000 value: 79.814 - type: ndcg_at_3 value: 75.125 - type: ndcg_at_5 value: 76.834 - type: precision_at_1 value: 67.777 - type: precision_at_10 value: 9.832 - type: precision_at_100 value: 1.061 - type: precision_at_1000 value: 0.11 - type: precision_at_3 value: 29.433 - type: precision_at_5 value: 18.665000000000003 - type: recall_at_1 value: 62.739999999999995 - type: recall_at_10 value: 89.505 - type: recall_at_100 value: 95.102 - type: recall_at_1000 value: 96.825 - type: recall_at_3 value: 81.028 - type: recall_at_5 value: 85.28099999999999 - task: type: Retrieval dataset: type: fiqa name: MTEB FiQA2018 config: default split: test revision: 41b686a7f28c59bcaaa5791efd47c67c8ebe28be metrics: - type: map_at_1 value: 18.467 - type: map_at_10 value: 30.020999999999997 - type: map_at_100 value: 31.739 - type: map_at_1000 value: 31.934 - type: map_at_3 value: 26.003 - type: map_at_5 value: 28.338 - type: mrr_at_1 value: 35.339999999999996 - type: mrr_at_10 value: 44.108999999999995 - type: mrr_at_100 value: 44.993 - type: mrr_at_1000 value: 45.042 - type: mrr_at_3 value: 41.667 - type: mrr_at_5 value: 43.14 - type: ndcg_at_1 value: 35.339999999999996 - type: ndcg_at_10 value: 37.202 - type: ndcg_at_100 value: 43.852999999999994 - type: ndcg_at_1000 value: 47.235 - type: ndcg_at_3 value: 33.5 - type: ndcg_at_5 value: 34.985 - type: precision_at_1 value: 35.339999999999996 - type: precision_at_10 value: 10.247 - type: precision_at_100 value: 1.7149999999999999 - type: precision_at_1000 value: 0.232 - type: precision_at_3 value: 22.222 - type: precision_at_5 value: 16.573999999999998 - type: recall_at_1 value: 18.467 - type: recall_at_10 value: 44.080999999999996 - type: recall_at_100 value: 68.72200000000001 - type: recall_at_1000 value: 89.087 - type: recall_at_3 value: 30.567 - type: recall_at_5 value: 36.982 - task: type: Retrieval dataset: type: hotpotqa name: MTEB HotpotQA config: default split: test revision: 766870b35a1b9ca65e67a0d1913899973551fc6c metrics: - type: map_at_1 value: 35.726 - type: map_at_10 value: 50.207 - type: map_at_100 value: 51.05499999999999 - type: map_at_1000 value: 51.12799999999999 - type: map_at_3 value: 47.576 - type: map_at_5 value: 49.172 - type: mrr_at_1 value: 71.452 - type: mrr_at_10 value: 77.41900000000001 - type: mrr_at_100 value: 77.711 - type: mrr_at_1000 value: 77.723 - type: mrr_at_3 value: 76.39399999999999 - type: mrr_at_5 value: 77.00099999999999 - type: ndcg_at_1 value: 71.452 - type: ndcg_at_10 value: 59.260999999999996 - type: ndcg_at_100 value: 62.424 - type: ndcg_at_1000 value: 63.951 - type: ndcg_at_3 value: 55.327000000000005 - type: ndcg_at_5 value: 57.416999999999994 - type: precision_at_1 value: 71.452 - type: precision_at_10 value: 12.061 - type: precision_at_100 value: 1.455 - type: precision_at_1000 value: 0.166 - type: precision_at_3 value: 34.36 - type: precision_at_5 value: 22.266 - type: recall_at_1 value: 35.726 - type: recall_at_10 value: 60.304 - type: recall_at_100 value: 72.75500000000001 - type: recall_at_1000 value: 82.978 - type: recall_at_3 value: 51.54 - type: recall_at_5 value: 55.665 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 8d743909f834c38949e8323a8a6ce8721ea6c7f4 metrics: - type: accuracy value: 66.63759999999999 - type: ap value: 61.48938261286748 - type: f1 value: 66.35089269264965 - task: type: Retrieval dataset: type: msmarco name: MTEB MSMARCO config: default split: validation revision: e6838a846e2408f22cf5cc337ebc83e0bcf77849 metrics: - type: map_at_1 value: 20.842 - type: map_at_10 value: 32.992 - type: map_at_100 value: 34.236 - type: map_at_1000 value: 34.286 - type: map_at_3 value: 29.049000000000003 - type: map_at_5 value: 31.391999999999996 - type: mrr_at_1 value: 21.375 - type: mrr_at_10 value: 33.581 - type: mrr_at_100 value: 34.760000000000005 - type: mrr_at_1000 value: 34.803 - type: mrr_at_3 value: 29.704000000000004 - type: mrr_at_5 value: 32.015 - type: ndcg_at_1 value: 21.375 - type: ndcg_at_10 value: 39.905 - type: ndcg_at_100 value: 45.843 - type: ndcg_at_1000 value: 47.083999999999996 - type: ndcg_at_3 value: 31.918999999999997 - type: ndcg_at_5 value: 36.107 - type: precision_at_1 value: 21.375 - type: precision_at_10 value: 6.393 - type: precision_at_100 value: 0.935 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 13.663 - type: precision_at_5 value: 10.324 - type: recall_at_1 value: 20.842 - type: recall_at_10 value: 61.17 - type: recall_at_100 value: 88.518 - type: recall_at_1000 value: 97.993 - type: recall_at_3 value: 39.571 - type: recall_at_5 value: 49.653999999999996 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: a7e2a951126a26fc8c6a69f835f33a346ba259e3 metrics: - type: accuracy value: 93.46557227542178 - type: f1 value: 92.87345917772146 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: 6299947a7777084cc2d4b64235bf7190381ce755 metrics: - type: accuracy value: 72.42134062927497 - type: f1 value: 55.03624810959269 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 072a486a144adf7f4479a4a0dddb2152e161e1ea metrics: - type: accuracy value: 70.3866845998655 - type: f1 value: 68.9674519872921 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.27774041694687 - type: f1 value: 76.72936190462792 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: dcefc037ef84348e49b0d29109e891c01067226b metrics: - type: v_measure value: 31.511745925773337 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 3cd0e71dfbe09d4de0f9e5ecba43e7ce280959dc metrics: - type: v_measure value: 28.764235987575365 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.29353136386601 - type: mrr value: 33.536774455851685 - task: type: Retrieval dataset: type: nfcorpus name: MTEB NFCorpus config: default split: test revision: 7eb63cc0c1eb59324d709ebed25fcab851fa7610 metrics: - type: map_at_1 value: 5.702 - type: map_at_10 value: 13.642000000000001 - type: map_at_100 value: 17.503 - type: map_at_1000 value: 19.126 - type: map_at_3 value: 9.748 - type: map_at_5 value: 11.642 - type: mrr_at_1 value: 45.82 - type: mrr_at_10 value: 54.821 - type: mrr_at_100 value: 55.422000000000004 - type: mrr_at_1000 value: 55.452999999999996 - type: mrr_at_3 value: 52.373999999999995 - type: mrr_at_5 value: 53.937000000000005 - type: ndcg_at_1 value: 44.272 - type: ndcg_at_10 value: 36.213 - type: ndcg_at_100 value: 33.829 - type: ndcg_at_1000 value: 42.557 - type: ndcg_at_3 value: 40.814 - type: ndcg_at_5 value: 39.562000000000005 - type: precision_at_1 value: 45.511 - type: precision_at_10 value: 27.214 - type: precision_at_100 value: 8.941 - type: precision_at_1000 value: 2.1870000000000003 - type: precision_at_3 value: 37.874 - type: precision_at_5 value: 34.489 - type: recall_at_1 value: 5.702 - type: recall_at_10 value: 17.638 - type: recall_at_100 value: 34.419 - type: recall_at_1000 value: 66.41 - type: recall_at_3 value: 10.914 - type: recall_at_5 value: 14.032 - task: type: Retrieval dataset: type: nq name: MTEB NQ config: default split: test revision: 6062aefc120bfe8ece5897809fb2e53bfe0d128c metrics: - type: map_at_1 value: 30.567 - type: map_at_10 value: 45.01 - type: map_at_100 value: 46.091 - type: map_at_1000 value: 46.126 - type: map_at_3 value: 40.897 - type: map_at_5 value: 43.301 - type: mrr_at_1 value: 34.56 - type: mrr_at_10 value: 47.725 - type: mrr_at_100 value: 48.548 - type: mrr_at_1000 value: 48.571999999999996 - type: mrr_at_3 value: 44.361 - type: mrr_at_5 value: 46.351 - type: ndcg_at_1 value: 34.531 - type: ndcg_at_10 value: 52.410000000000004 - type: ndcg_at_100 value: 56.999 - type: ndcg_at_1000 value: 57.830999999999996 - type: ndcg_at_3 value: 44.734 - type: ndcg_at_5 value: 48.701 - type: precision_at_1 value: 34.531 - type: precision_at_10 value: 8.612 - type: precision_at_100 value: 1.118 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 20.307 - type: precision_at_5 value: 14.519000000000002 - type: recall_at_1 value: 30.567 - type: recall_at_10 value: 72.238 - type: recall_at_100 value: 92.154 - type: recall_at_1000 value: 98.375 - type: recall_at_3 value: 52.437999999999995 - type: recall_at_5 value: 61.516999999999996 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: 6205996560df11e3a3da9ab4f926788fc30a7db4 metrics: - type: map_at_1 value: 65.98 - type: map_at_10 value: 80.05600000000001 - type: map_at_100 value: 80.76299999999999 - type: map_at_1000 value: 80.786 - type: map_at_3 value: 76.848 - type: map_at_5 value: 78.854 - type: mrr_at_1 value: 75.86 - type: mrr_at_10 value: 83.397 - type: mrr_at_100 value: 83.555 - type: mrr_at_1000 value: 83.557 - type: mrr_at_3 value: 82.033 - type: mrr_at_5 value: 82.97 - type: ndcg_at_1 value: 75.88000000000001 - type: ndcg_at_10 value: 84.58099999999999 - type: ndcg_at_100 value: 86.151 - type: ndcg_at_1000 value: 86.315 - type: ndcg_at_3 value: 80.902 - type: ndcg_at_5 value: 82.953 - type: precision_at_1 value: 75.88000000000001 - type: precision_at_10 value: 12.986 - type: precision_at_100 value: 1.5110000000000001 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 35.382999999999996 - type: precision_at_5 value: 23.555999999999997 - type: recall_at_1 value: 65.98 - type: recall_at_10 value: 93.716 - type: recall_at_100 value: 99.21799999999999 - type: recall_at_1000 value: 99.97 - type: recall_at_3 value: 83.551 - type: recall_at_5 value: 88.998 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: b2805658ae38990172679479369a78b86de8c390 metrics: - type: v_measure value: 40.45148482612238 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 385e3cb46b4cfa89021f56c4380204149d0efe33 metrics: - type: v_measure value: 55.749490673039126 - task: type: Retrieval dataset: type: scidocs name: MTEB SCIDOCS config: default split: test revision: 5c59ef3e437a0a9651c8fe6fde943e7dce59fba5 metrics: - type: map_at_1 value: 4.903 - type: map_at_10 value: 11.926 - type: map_at_100 value: 13.916999999999998 - type: map_at_1000 value: 14.215 - type: map_at_3 value: 8.799999999999999 - type: map_at_5 value: 10.360999999999999 - type: mrr_at_1 value: 24.099999999999998 - type: mrr_at_10 value: 34.482 - type: mrr_at_100 value: 35.565999999999995 - type: mrr_at_1000 value: 35.619 - type: mrr_at_3 value: 31.433 - type: mrr_at_5 value: 33.243 - type: ndcg_at_1 value: 24.099999999999998 - type: ndcg_at_10 value: 19.872999999999998 - type: ndcg_at_100 value: 27.606 - type: ndcg_at_1000 value: 32.811 - type: ndcg_at_3 value: 19.497999999999998 - type: ndcg_at_5 value: 16.813 - type: precision_at_1 value: 24.099999999999998 - type: precision_at_10 value: 10.08 - type: precision_at_100 value: 2.122 - type: precision_at_1000 value: 0.337 - type: precision_at_3 value: 18.2 - type: precision_at_5 value: 14.62 - type: recall_at_1 value: 4.903 - type: recall_at_10 value: 20.438000000000002 - type: recall_at_100 value: 43.043 - type: recall_at_1000 value: 68.41000000000001 - type: recall_at_3 value: 11.068 - type: recall_at_5 value: 14.818000000000001 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: 20a6d6f312dd54037fe07a32d58e5e168867909d metrics: - type: cos_sim_pearson value: 78.58086597995997 - type: cos_sim_spearman value: 69.63214182814991 - type: euclidean_pearson value: 72.76175489042691 - type: euclidean_spearman value: 67.84965161872971 - type: manhattan_pearson value: 72.73812689782592 - type: manhattan_spearman value: 67.83610439531277 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: fdf84275bb8ce4b49c971d02e84dd1abc677a50f metrics: - type: cos_sim_pearson value: 75.13970861325006 - type: cos_sim_spearman value: 67.5020551515597 - type: euclidean_pearson value: 66.33415412418276 - type: euclidean_spearman value: 66.82145056673268 - type: manhattan_pearson value: 66.55489484006415 - type: manhattan_spearman value: 66.95147433279057 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 1591bfcbe8c69d4bf7fe2a16e2451017832cafb9 metrics: - type: cos_sim_pearson value: 78.85850536483447 - type: cos_sim_spearman value: 79.1633350177206 - type: euclidean_pearson value: 72.74090561408477 - type: euclidean_spearman value: 73.57374448302961 - type: manhattan_pearson value: 72.92980654233226 - type: manhattan_spearman value: 73.72777155112588 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: e2125984e7df8b7871f6ae9949cf6b6795e7c54b metrics: - type: cos_sim_pearson value: 79.51125593897028 - type: cos_sim_spearman value: 74.46048326701329 - type: euclidean_pearson value: 70.87726087052985 - type: euclidean_spearman value: 67.7721470654411 - type: manhattan_pearson value: 71.05892792135637 - type: manhattan_spearman value: 67.93472619779037 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: 1cd7298cac12a96a373b6a2f18738bb3e739a9b6 metrics: - type: cos_sim_pearson value: 83.8299348880489 - type: cos_sim_spearman value: 84.47194637929275 - type: euclidean_pearson value: 78.68768462480418 - type: euclidean_spearman value: 79.80526323901917 - type: manhattan_pearson value: 78.6810718151946 - type: manhattan_spearman value: 79.7820584821254 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 360a0b2dff98700d09e634a01e1cc1624d3e42cd metrics: - type: cos_sim_pearson value: 79.99206664843005 - type: cos_sim_spearman value: 80.96089203722137 - type: euclidean_pearson value: 71.31216213716365 - type: euclidean_spearman value: 71.45258140049407 - type: manhattan_pearson value: 71.26140340402836 - type: manhattan_spearman value: 71.3896894666943 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: 9fc37e8c632af1c87a3d23e685d49552a02582a0 metrics: - type: cos_sim_pearson value: 87.35697089594868 - type: cos_sim_spearman value: 87.78202647220289 - type: euclidean_pearson value: 84.20969668786667 - type: euclidean_spearman value: 83.91876425459982 - type: manhattan_pearson value: 84.24429755612542 - type: manhattan_spearman value: 83.98826315103398 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 2de6ce8c1921b71a755b262c6b57fef195dd7906 metrics: - type: cos_sim_pearson value: 69.06962775868384 - type: cos_sim_spearman value: 69.34889515492327 - type: euclidean_pearson value: 69.28108180412313 - type: euclidean_spearman value: 69.6437114853659 - type: manhattan_pearson value: 69.39974983734993 - type: manhattan_spearman value: 69.69057284482079 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: 8913289635987208e6e7c72789e4be2fe94b6abd metrics: - type: cos_sim_pearson value: 82.42553734213958 - type: cos_sim_spearman value: 81.38977341532744 - type: euclidean_pearson value: 76.47494587945522 - type: euclidean_spearman value: 75.92794860531089 - type: manhattan_pearson value: 76.4768777169467 - type: manhattan_spearman value: 75.9252673228599 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: 56a6d0140cf6356659e2a7c1413286a774468d44 metrics: - type: map value: 80.78825425914722 - type: mrr value: 94.60017197762296 - task: type: Retrieval dataset: type: scifact name: MTEB SciFact config: default split: test revision: a75ae049398addde9b70f6b268875f5cbce99089 metrics: - type: map_at_1 value: 60.633 - type: map_at_10 value: 70.197 - type: map_at_100 value: 70.758 - type: map_at_1000 value: 70.765 - type: map_at_3 value: 67.082 - type: map_at_5 value: 69.209 - type: mrr_at_1 value: 63.333 - type: mrr_at_10 value: 71.17 - type: mrr_at_100 value: 71.626 - type: mrr_at_1000 value: 71.633 - type: mrr_at_3 value: 68.833 - type: mrr_at_5 value: 70.6 - type: ndcg_at_1 value: 63.333 - type: ndcg_at_10 value: 74.697 - type: ndcg_at_100 value: 76.986 - type: ndcg_at_1000 value: 77.225 - type: ndcg_at_3 value: 69.527 - type: ndcg_at_5 value: 72.816 - type: precision_at_1 value: 63.333 - type: precision_at_10 value: 9.9 - type: precision_at_100 value: 1.103 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 26.889000000000003 - type: precision_at_5 value: 18.2 - type: recall_at_1 value: 60.633 - type: recall_at_10 value: 87.36699999999999 - type: recall_at_100 value: 97.333 - type: recall_at_1000 value: 99.333 - type: recall_at_3 value: 73.656 - type: recall_at_5 value: 82.083 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: 5a8256d0dff9c4bd3be3ba3e67e4e70173f802ea metrics: - type: cos_sim_accuracy value: 99.76633663366337 - type: cos_sim_ap value: 93.84024096781063 - type: cos_sim_f1 value: 88.08080808080808 - type: cos_sim_precision value: 88.9795918367347 - type: cos_sim_recall value: 87.2 - type: dot_accuracy value: 99.46336633663367 - type: dot_ap value: 75.78127156965245 - type: dot_f1 value: 71.41403865717193 - type: dot_precision value: 72.67080745341616 - type: dot_recall value: 70.19999999999999 - type: euclidean_accuracy value: 99.67524752475248 - type: euclidean_ap value: 88.61274955249769 - type: euclidean_f1 value: 82.30852211434735 - type: euclidean_precision value: 89.34426229508196 - type: euclidean_recall value: 76.3 - type: manhattan_accuracy value: 99.67722772277227 - type: manhattan_ap value: 88.77516158012779 - type: manhattan_f1 value: 82.36536430834212 - type: manhattan_precision value: 87.24832214765101 - type: manhattan_recall value: 78.0 - type: max_accuracy value: 99.76633663366337 - type: max_ap value: 93.84024096781063 - type: max_f1 value: 88.08080808080808 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 70a89468f6dccacc6aa2b12a6eac54e74328f235 metrics: - type: v_measure value: 59.20812266121527 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: d88009ab563dd0b16cfaf4436abaf97fa3550cf0 metrics: - type: v_measure value: 33.954248554638056 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: ef807ea29a75ec4f91b50fd4191cb4ee4589a9f9 metrics: - type: map value: 51.52800990025549 - type: mrr value: 52.360394915541974 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: 8753c2788d36c01fc6f05d03fe3f7268d63f9122 metrics: - type: cos_sim_pearson value: 30.737881131277356 - type: cos_sim_spearman value: 31.45979323917254 - type: dot_pearson value: 26.24686017962023 - type: dot_spearman value: 25.006732878791743 - task: type: Retrieval dataset: type: trec-covid name: MTEB TRECCOVID config: default split: test revision: 2c8041b2c07a79b6f7ba8fe6acc72e5d9f92d217 metrics: - type: map_at_1 value: 0.253 - type: map_at_10 value: 2.1399999999999997 - type: map_at_100 value: 12.873000000000001 - type: map_at_1000 value: 31.002000000000002 - type: map_at_3 value: 0.711 - type: map_at_5 value: 1.125 - type: mrr_at_1 value: 96.0 - type: mrr_at_10 value: 98.0 - type: mrr_at_100 value: 98.0 - type: mrr_at_1000 value: 98.0 - type: mrr_at_3 value: 98.0 - type: mrr_at_5 value: 98.0 - type: ndcg_at_1 value: 94.0 - type: ndcg_at_10 value: 84.881 - type: ndcg_at_100 value: 64.694 - type: ndcg_at_1000 value: 56.85 - type: ndcg_at_3 value: 90.061 - type: ndcg_at_5 value: 87.155 - type: precision_at_1 value: 96.0 - type: precision_at_10 value: 88.8 - type: precision_at_100 value: 65.7 - type: precision_at_1000 value: 25.080000000000002 - type: precision_at_3 value: 92.667 - type: precision_at_5 value: 90.0 - type: recall_at_1 value: 0.253 - type: recall_at_10 value: 2.292 - type: recall_at_100 value: 15.78 - type: recall_at_1000 value: 53.015 - type: recall_at_3 value: 0.7270000000000001 - type: recall_at_5 value: 1.162 - task: type: Retrieval dataset: type: webis-touche2020 name: MTEB Touche2020 config: default split: test revision: 527b7d77e16e343303e68cb6af11d6e18b9f7b3b metrics: - type: map_at_1 value: 2.116 - type: map_at_10 value: 9.625 - type: map_at_100 value: 15.641 - type: map_at_1000 value: 17.127 - type: map_at_3 value: 4.316 - type: map_at_5 value: 6.208 - type: mrr_at_1 value: 32.653 - type: mrr_at_10 value: 48.083999999999996 - type: mrr_at_100 value: 48.631 - type: mrr_at_1000 value: 48.649 - type: mrr_at_3 value: 42.857 - type: mrr_at_5 value: 46.224 - type: ndcg_at_1 value: 29.592000000000002 - type: ndcg_at_10 value: 25.430999999999997 - type: ndcg_at_100 value: 36.344 - type: ndcg_at_1000 value: 47.676 - type: ndcg_at_3 value: 26.144000000000002 - type: ndcg_at_5 value: 26.304 - type: precision_at_1 value: 32.653 - type: precision_at_10 value: 24.082 - type: precision_at_100 value: 7.714 - type: precision_at_1000 value: 1.5310000000000001 - type: precision_at_3 value: 26.531 - type: precision_at_5 value: 26.939 - type: recall_at_1 value: 2.116 - type: recall_at_10 value: 16.794 - type: recall_at_100 value: 47.452 - type: recall_at_1000 value: 82.312 - type: recall_at_3 value: 5.306 - type: recall_at_5 value: 9.306000000000001 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de metrics: - type: accuracy value: 67.709 - type: ap value: 13.541535578501716 - type: f1 value: 52.569619919446794 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: 62146448f05be9e52a36b8ee9936447ea787eede metrics: - type: accuracy value: 56.850594227504246 - type: f1 value: 57.233377364910574 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 091a54f9a36281ce7d6590ec8c75dd485e7e01d4 metrics: - type: v_measure value: 39.463722986090474 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 84.09131549144662 - type: cos_sim_ap value: 66.86677647503386 - type: cos_sim_f1 value: 62.94631710362049 - type: cos_sim_precision value: 59.73933649289099 - type: cos_sim_recall value: 66.51715039577837 - type: dot_accuracy value: 80.27656911247541 - type: dot_ap value: 54.291720398612085 - type: dot_f1 value: 54.77150537634409 - type: dot_precision value: 47.58660957571039 - type: dot_recall value: 64.5118733509235 - type: euclidean_accuracy value: 82.76211480002385 - type: euclidean_ap value: 62.430397690753296 - type: euclidean_f1 value: 59.191590539356774 - type: euclidean_precision value: 56.296119971435374 - type: euclidean_recall value: 62.401055408970976 - type: manhattan_accuracy value: 82.7561542588067 - type: manhattan_ap value: 62.41882051995577 - type: manhattan_f1 value: 59.32101002778785 - type: manhattan_precision value: 54.71361711611321 - type: manhattan_recall value: 64.77572559366754 - type: max_accuracy value: 84.09131549144662 - type: max_ap value: 66.86677647503386 - type: max_f1 value: 62.94631710362049 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.79574649745798 - type: cos_sim_ap value: 85.28960532524223 - type: cos_sim_f1 value: 77.98460043358001 - type: cos_sim_precision value: 75.78090948714224 - type: cos_sim_recall value: 80.32029565753002 - type: dot_accuracy value: 85.5939767920208 - type: dot_ap value: 76.14131706694056 - type: dot_f1 value: 72.70246298696868 - type: dot_precision value: 65.27012127894156 - type: dot_recall value: 82.04496458269172 - type: euclidean_accuracy value: 86.72332828812046 - type: euclidean_ap value: 80.84854809178995 - type: euclidean_f1 value: 72.47657499809551 - type: euclidean_precision value: 71.71717171717171 - type: euclidean_recall value: 73.25223283030489 - type: manhattan_accuracy value: 86.7563162184189 - type: manhattan_ap value: 80.87598895575626 - type: manhattan_f1 value: 72.54617892068092 - type: manhattan_precision value: 68.49268225960881 - type: manhattan_recall value: 77.10963966738528 - type: max_accuracy value: 88.79574649745798 - type: max_ap value: 85.28960532524223 - type: max_f1 value: 77.98460043358001 --- # SGPT-5.8B-weightedmean-msmarco-specb-bitfit ## Usage For usage instructions, refer to our codebase: https://github.com/Muennighoff/sgpt ## Evaluation Results For eval results, refer to our paper: https://arxiv.org/abs/2202.08904 ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 249592 with parameters: ``` {'batch_size': 2, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 5e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 300, 'do_lower_case': False}) with Transformer model: GPTJModel (1): Pooling({'word_embedding_dimension': 4096, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': True, 'pooling_mode_lasttoken': False}) ) ``` ## Citing & Authors ```bibtex @article{muennighoff2022sgpt, title={SGPT: GPT Sentence Embeddings for Semantic Search}, author={Muennighoff, Niklas}, journal={arXiv preprint arXiv:2202.08904}, year={2022} } ```
65,461
[ [ -0.020416259765625, -0.0389404296875, 0.0302276611328125, 0.0165252685546875, -0.03399658203125, -0.0261383056640625, -0.0235137939453125, 0.0031528472900390625, 0.0184326171875, 0.0164642333984375, -0.050506591796875, -0.026763916015625, -0.060089111328125, ...
livingbox/model-test-oct-23-v3
2023-10-30T08:24:27.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us", "has_space" ]
text-to-image
livingbox
null
null
livingbox/model-test-oct-23-v3
0
1,948
diffusers
2023-10-30T08:19:08
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Model-test-oct-23-v3 Dreambooth model trained by livingbox with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
511
[ [ -0.03106689453125, -0.0738525390625, 0.038604736328125, 0.03594970703125, -0.027801513671875, 0.02886962890625, 0.034912109375, -0.03192138671875, 0.044586181640625, 0.010955810546875, -0.028350830078125, -0.019622802734375, -0.0235443115234375, -0.003238677...
hivemind/gpt-j-6B-8bit
2022-10-18T11:49:06.000Z
[ "transformers", "pytorch", "gptj", "text-generation", "causal-lm", "en", "dataset:The Pile", "arxiv:2106.09685", "arxiv:2110.02861", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
text-generation
hivemind
null
null
hivemind/gpt-j-6B-8bit
127
1,947
transformers
2022-03-02T23:29:05
--- language: - en tags: - pytorch - causal-lm license: apache-2.0 datasets: - The Pile --- Note: this model was superceded by the [`load_in_8bit=True` feature in transformers](https://github.com/huggingface/transformers/pull/17901) by Younes Belkada and Tim Dettmers. Please see [this usage example](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4#scrollTo=W8tQtyjp75O). This legacy model was built for [transformers v4.15.0](https://github.com/huggingface/transformers/releases/tag/v4.15.0) and pytorch 1.11. Newer versions could work, but are not supported. ### Quantized EleutherAI/gpt-j-6b with 8-bit weights This is a version of EleutherAI's GPT-J with 6 billion parameters that is modified so you can generate **and fine-tune the model in colab or equivalent desktop gpu (e.g. single 1080Ti)**. Here's how to run it: [![colab](https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667)](https://colab.research.google.com/drive/1ft6wQU0BhqG5PRlwgaZJv2VukKKjU4Es) __The [original GPT-J](https://huggingface.co/EleutherAI/gpt-j-6B/tree/main)__ takes 22+ GB memory for float32 parameters alone, and that's before you account for gradients & optimizer. Even if you cast everything to 16-bit, it will still not fit onto most single-GPU setups short of A6000 and A100. You can inference it [on TPU](https://colab.research.google.com/github/kingoflolz/mesh-transformer-jax/blob/master/colab_demo.ipynb) or CPUs, but fine-tuning is way more expensive. Here, we apply several techniques to make GPT-J usable and fine-tunable on a single GPU with ~11 GB memory: - large weight tensors are quantized using dynamic 8-bit quantization and de-quantized just-in-time for multiplication - using gradient checkpoints to store one only activation per layer: using dramatically less memory at the cost of 30% slower training - scalable fine-tuning with [LoRA](https://arxiv.org/abs/2106.09685) and [8-bit Adam](https://arxiv.org/abs/2110.02861) In other words, all of the large weight-matrices are frozen in 8-bit, and you only train small adapters and optionally 1d tensors (layernorm scales, biases). ![img](https://i.imgur.com/n4XXo1x.png) __Does 8-bit affect model quality?__ Technically yes, but the effect is negligible in practice. [This notebook measures wikitext test perplexity](https://nbviewer.org/urls/huggingface.co/hivemind/gpt-j-6B-8bit/raw/main/check_perplexity.ipynb) and it is nigh indistinguishable from the original GPT-J. Quantized model is even slightly better, but that is not statistically significant. Our code differs from other 8-bit methods in that we use **8-bit only for storage, and all computations are performed in float16 or float32**. As a result, we can take advantage of nonlinear quantization that fits to each individual weight distribution. Such nonlinear quantization does not accelerate inference, but it allows for much smaller error. __What about performance?__ Both checkpointing and de-quantization has some overhead, but it's surprisingly manageable. Depending on GPU and batch size, the quantized model is 1-10% slower than the original model on top of using gradient checkpoints (which is 30% overhead). In short, this is because block-wise quantization from bitsandbytes is really fast on GPU. ### How should I fine-tune the model? We recommend starting with the original hyperparameters from [the LoRA paper](https://arxiv.org/pdf/2106.09685.pdf). On top of that, there is one more trick to consider: the overhead from de-quantizing weights does not depend on batch size. As a result, the larger batch size you can fit, the more efficient you will train. ### Where can I train for free? You can train fine in colab, but if you get a K80, it's probably best to switch to other free gpu providers: [kaggle](https://towardsdatascience.com/amazon-sagemaker-studio-lab-a-great-alternative-to-google-colab-7194de6ef69a), [aws sagemaker](https://towardsdatascience.com/amazon-sagemaker-studio-lab-a-great-alternative-to-google-colab-7194de6ef69a) or [paperspace](https://docs.paperspace.com/gradient/more/instance-types/free-instances). For intance, this is the same notebook [running in kaggle](https://www.kaggle.com/justheuristic/dmazur-converted) using a more powerful P100 instance. ### Can I use this technique with other models? The model was converted using [this notebook](https://nbviewer.org/urls/huggingface.co/hivemind/gpt-j-6B-8bit/raw/main/convert-gpt-j.ipynb). It can be adapted to work with other model types. However, please bear in mind that some models replace Linear and Embedding with custom alternatives that require their own BNBWhateverWithAdapters.
4,813
[ [ -0.051300048828125, -0.0814208984375, 0.036773681640625, -0.002841949462890625, -0.02606201171875, -0.0153961181640625, -0.00528717041015625, -0.040069580078125, -0.0113525390625, 0.0180511474609375, -0.022125244140625, -0.0003669261932373047, -0.03973388671875,...
raynardj/ner-disease-ncbi-bionlp-bc5cdr-pubmed
2021-11-05T07:33:08.000Z
[ "transformers", "pytorch", "roberta", "token-classification", "ner", "ncbi", "disease", "pubmed", "bioinfomatics", "en", "dataset:ncbi-disease", "dataset:bc5cdr", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
raynardj
null
null
raynardj/ner-disease-ncbi-bionlp-bc5cdr-pubmed
7
1,943
transformers
2022-03-02T23:29:05
--- language: - en tags: - ner - ncbi - disease - pubmed - bioinfomatics license: apache-2.0 datasets: - ncbi-disease - bc5cdr widget: - text: "Hepatocyte nuclear factor 4 alpha (HNF4α) is regulated by different promoters to generate two isoforms, one of which functions as a tumor suppressor. Here, the authors reveal that induction of the alternative isoform in hepatocellular carcinoma inhibits the circadian clock by repressing BMAL1, and the reintroduction of BMAL1 prevents HCC tumor growth." --- # NER to find Gene & Gene products > The model was trained on ncbi-disease, BC5CDR dataset, pretrained on this [pubmed-pretrained roberta model](/raynardj/roberta-pubmed) All the labels, the possible token classes. ```json {"label2id": { "O": 0, "Disease":1, } } ``` Notice, we removed the 'B-','I-' etc from data label.🗡 ## This is the template we suggest for using the model ```python from transformers import pipeline PRETRAINED = "raynardj/ner-disease-ncbi-bionlp-bc5cdr-pubmed" ner = pipeline(task="ner",model=PRETRAINED, tokenizer=PRETRAINED) ner("Your text", aggregation_strategy="first") ``` And here is to make your output more consecutive ⭐️ ```python import pandas as pd from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(PRETRAINED) def clean_output(outputs): results = [] current = [] last_idx = 0 # make to sub group by position for output in outputs: if output["index"]-1==last_idx: current.append(output) else: results.append(current) current = [output, ] last_idx = output["index"] if len(current)>0: results.append(current) # from tokens to string strings = [] for c in results: tokens = [] starts = [] ends = [] for o in c: tokens.append(o['word']) starts.append(o['start']) ends.append(o['end']) new_str = tokenizer.convert_tokens_to_string(tokens) if new_str!='': strings.append(dict( word=new_str, start = min(starts), end = max(ends), entity = c[0]['entity'] )) return strings def entity_table(pipeline, **pipeline_kw): if "aggregation_strategy" not in pipeline_kw: pipeline_kw["aggregation_strategy"] = "first" def create_table(text): return pd.DataFrame( clean_output( pipeline(text, **pipeline_kw) ) ) return create_table # will return a dataframe entity_table(ner)(YOUR_VERY_CONTENTFUL_TEXT) ``` > check our NER model on * [gene and gene products](/raynardj/ner-gene-dna-rna-jnlpba-pubmed) * [chemical substance](/raynardj/ner-chemical-bionlp-bc5cdr-pubmed). * [disease](/raynardj/ner-disease-ncbi-bionlp-bc5cdr-pubmed)
2,857
[ [ -0.01172637939453125, -0.06365966796875, 0.00948333740234375, 0.007572174072265625, -0.0186004638671875, 0.00197601318359375, 0.002307891845703125, -0.00009614229202270508, 0.04150390625, 0.042938232421875, -0.0243377685546875, -0.0672607421875, -0.0606994628906...
jayavibhav/anime-dreamlike
2023-06-25T15:36:35.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
jayavibhav
null
null
jayavibhav/anime-dreamlike
3
1,941
diffusers
2023-06-25T15:16:40
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- anime diffusion model
103
[ [ -0.01155853271484375, -0.08233642578125, 0.053253173828125, 0.03955078125, -0.0147552490234375, -0.041595458984375, 0.02825927734375, -0.00042366981506347656, 0.0357666015625, 0.040313720703125, -0.0253448486328125, -0.027099609375, -0.046783447265625, -0.03...
Yntec/OpenNijiRemix
2023-10-03T18:17:16.000Z
[ "diffusers", "Anime", "Art", "Open", "OpenNiji", "Stable Diffusion", "Niji", "Nijijourney", "Stylised", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "en", "dataset:Korakoe/NijiJourney-Prompt-Pairs", "dataset:Korakoe/OpenNiji-V2-Dataset", "license:creativeml-openra...
text-to-image
Yntec
null
null
Yntec/OpenNijiRemix
1
1,940
diffusers
2023-08-15T06:12:44
--- thumbnail: https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/4a5maYV74Z-CaWxFfdsGg.png license: creativeml-openrail-m datasets: - Korakoe/NijiJourney-Prompt-Pairs - Korakoe/OpenNiji-V2-Dataset language: - en tags: - Anime - Art - Open - OpenNiji - Stable Diffusion - Niji - Nijijourney - Stylised - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image pipeline_tag: text-to-image --- # Open Niji Remix What happens when you merge the Lora of OpenNiji with OpenNiji2? You get my favorite OpenNiji model! Now with the MoistMixV2VAE baked in. Samples and prompts: ![Logo](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/4a5maYV74Z-CaWxFfdsGg.png) ![Result](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/jLgFhB47ydCHKBVwuijx5.png) white skirt, DETAILED CHIBI EYES, pretty CUTE girl wearing white camisole, fashion shoes, costume, 1940, magazine ad, iconic, A painting of a store with a lot of food, a photorealistic painting by simon stålenhag, featured on cgsociety, photorealism, 2d game art, hyper-realistic, hyper realism Original pages: https://huggingface.co/ShoukanLabs/OpenNiji https://huggingface.co/ShoukanLabs/OpenNiji-V2
1,255
[ [ -0.0247955322265625, -0.044708251953125, 0.017425537109375, 0.017303466796875, -0.021942138671875, -0.005023956298828125, -0.005985260009765625, -0.038970947265625, 0.06787109375, 0.0855712890625, -0.045684814453125, -0.0200042724609375, -0.037994384765625, ...
NeverSleep/Echidna-13b-v0.3
2023-11-01T03:21:25.000Z
[ "transformers", "pytorch", "llama", "text-generation", "license:cc-by-nc-4.0", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
NeverSleep
null
null
NeverSleep/Echidna-13b-v0.3
12
1,940
transformers
2023-10-27T19:58:28
--- license: cc-by-nc-4.0 --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/630dfb008df86f1e5becadc3/LU1KiXFeJtt7UW_mYhwE0.png) # This model is a collab between [IkariDev](https://huggingface.co/IkariDev) and [Undi](https://huggingface.co/Undi95)! Echidna v0.3 model. Use Alpaca format. Suitable for RP, ERP and general stuff. Echidna v0.3, here it is.. its like Echidna v0.2 on steroids. This model seems to still be pretty sensitive to your generation settings, experiment until you've found your settings. [Recommended settings - No settings yet(Please suggest some over in the Community tab!)] <!-- description start --> ## Description <!-- [Recommended settings - contributed by localfultonextractor](https://files.catbox.moe/ue0tja.json) --> This repo contains FP16 files of Echidna-13b-v0.3. [FP16 - by IkariDev and Undi](https://huggingface.co/NeverSleep/Echidna-13b-v0.3) <!-- [GGUF - By TheBloke](https://huggingface.co/TheBloke/Athena-v4-GGUF)--> <!-- [GPTQ - By TheBloke](https://huggingface.co/TheBloke/Athena-v4-GPTQ)--> [exl2[8bpw-8h] - by AzureBlack](https://huggingface.co/AzureBlack/Echidna-13b-v0.3-8bpw-8h-exl2) <!-- [AWQ - By TheBloke](https://huggingface.co/TheBloke/Athena-v4-AWQ)--> <!-- [fp16 - by IkariDev+Undi95](https://huggingface.co/IkariDev/Athena-v4)--> [GGUF - by IkariDev and Undi](https://huggingface.co/NeverSleep/Echidna-13b-v0.3-GGUF) <!-- [OLD(GGUF - by IkariDev+Undi95)](https://huggingface.co/IkariDev/Athena-v4-GGUF)--> ## Ratings: Note: We have permission of all users to upload their ratings, i DONT screenshot random reviews without asking if i can put them here! No ratings yet! If you want your rating to be here, send us a message over on DC and we'll put up a screenshot of it here. DC name is "ikaridev" and "undi". <!-- description end --> <!-- description start --> ## Models+loras used and recipe - Xwin-LM/Xwin-LM-13B-V0.2 - IkariDev/Athena-v3 - Heralax/Cat-0.5 - Undi95/PsyMedRP-v1-13B - cgato/Thespis-13b-v0.4 - KoboldAI/LLaMA2-13B-TiefighterLR - Heralax/Augmental-13b-two-epochs - Sao10K/SthenoWriter2.1-L2-13B - Undi95/Storytelling-v2.1-13B-lora - lemonilia/LimaRP-Llama2-13B-v3-EXPERIMENT <!-- description end --> <!-- prompt-template start --> ## Prompt template: Alpaca ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response: ``` ## Others Undi: If you want to support me, you can [here](https://ko-fi.com/undiai). IkariDev: Visit my [retro/neocities style website](https://ikaridevgit.github.io/) please kek
2,613
[ [ -0.07293701171875, -0.045654296875, 0.0360107421875, 0.032958984375, -0.043853759765625, -0.0160369873046875, 0.01177215576171875, -0.068359375, 0.06866455078125, 0.0268096923828125, -0.06671142578125, -0.036285400390625, -0.04583740234375, 0.017593383789062...
Hemanthraju/my-lion
2023-11-03T09:59:38.000Z
[ "diffusers", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
Hemanthraju
null
null
Hemanthraju/my-lion
0
1,940
diffusers
2023-11-03T09:55:21
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### My-lion- Dreambooth model trained by Hemanthraju following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: CVR-135 Sample pictures of this concept: ![0](https://huggingface.co/Hemanthraju/my-lion/resolve/main/sample_images/xzg(3).jpg) ![1](https://huggingface.co/Hemanthraju/my-lion/resolve/main/sample_images/xzg(4).jpg) ![2](https://huggingface.co/Hemanthraju/my-lion/resolve/main/sample_images/xzg(1).jpg) ![3](https://huggingface.co/Hemanthraju/my-lion/resolve/main/sample_images/xzg(2).jpg)
674
[ [ -0.06036376953125, -0.01277923583984375, -0.0006103515625, 0.018798828125, -0.01381683349609375, 0.0394287109375, 0.0292205810546875, -0.03277587890625, 0.055999755859375, 0.032379150390625, -0.048675537109375, -0.01824951171875, -0.0266265869140625, -0.0019...
albert-xlarge-v1
2023-08-11T07:30:41.000Z
[ "transformers", "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
null
null
null
albert-xlarge-v1
0
1,939
transformers
2022-03-02T23:29:04
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT XLarge v1 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not make a difference between english and English. Disclaimer: The team releasing ALBERT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description ALBERT is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Sentence Ordering Prediction (SOP): ALBERT uses a pretraining loss based on predicting the ordering of two consecutive segments of text. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the ALBERT model as inputs. ALBERT is particular in that it shares its layers across its Transformer. Therefore, all layers have the same weights. Using repeating layers results in a small memory footprint, however, the computational cost remains similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same number of (repeating) layers. This is the first version of the xlarge model. Version 2 is different from version 1 due to different dropout rates, additional training data, and longer training. It has better results in nearly all downstream tasks. This model has the following configuration: - 24 repeating layers - 128 embedding dimension - 2048 hidden dimension - 16 attention heads - 58M parameters ## Intended uses & limitations You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=albert) to look for fine-tuned versions on a task that interests you. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='albert-xlarge-v1') >>> unmasker("Hello I'm a [MASK] model.") [ { "sequence":"[CLS] hello i'm a modeling model.[SEP]", "score":0.05816134437918663, "token":12807, "token_str":"▁modeling" }, { "sequence":"[CLS] hello i'm a modelling model.[SEP]", "score":0.03748830780386925, "token":23089, "token_str":"▁modelling" }, { "sequence":"[CLS] hello i'm a model model.[SEP]", "score":0.033725276589393616, "token":1061, "token_str":"▁model" }, { "sequence":"[CLS] hello i'm a runway model.[SEP]", "score":0.017313428223133087, "token":8014, "token_str":"▁runway" }, { "sequence":"[CLS] hello i'm a lingerie model.[SEP]", "score":0.014405295252799988, "token":29104, "token_str":"▁lingerie" } ] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AlbertTokenizer, AlbertModel tokenizer = AlbertTokenizer.from_pretrained('albert-xlarge-v1') model = AlbertModel.from_pretrained("albert-xlarge-v1") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AlbertTokenizer, TFAlbertModel tokenizer = AlbertTokenizer.from_pretrained('albert-xlarge-v1') model = TFAlbertModel.from_pretrained("albert-xlarge-v1") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='albert-xlarge-v1') >>> unmasker("The man worked as a [MASK].") [ { "sequence":"[CLS] the man worked as a chauffeur.[SEP]", "score":0.029577180743217468, "token":28744, "token_str":"▁chauffeur" }, { "sequence":"[CLS] the man worked as a janitor.[SEP]", "score":0.028865724802017212, "token":29477, "token_str":"▁janitor" }, { "sequence":"[CLS] the man worked as a shoemaker.[SEP]", "score":0.02581118606030941, "token":29024, "token_str":"▁shoemaker" }, { "sequence":"[CLS] the man worked as a blacksmith.[SEP]", "score":0.01849772222340107, "token":21238, "token_str":"▁blacksmith" }, { "sequence":"[CLS] the man worked as a lawyer.[SEP]", "score":0.01820771023631096, "token":3672, "token_str":"▁lawyer" } ] >>> unmasker("The woman worked as a [MASK].") [ { "sequence":"[CLS] the woman worked as a receptionist.[SEP]", "score":0.04604868218302727, "token":25331, "token_str":"▁receptionist" }, { "sequence":"[CLS] the woman worked as a janitor.[SEP]", "score":0.028220869600772858, "token":29477, "token_str":"▁janitor" }, { "sequence":"[CLS] the woman worked as a paramedic.[SEP]", "score":0.0261906236410141, "token":23386, "token_str":"▁paramedic" }, { "sequence":"[CLS] the woman worked as a chauffeur.[SEP]", "score":0.024797942489385605, "token":28744, "token_str":"▁chauffeur" }, { "sequence":"[CLS] the woman worked as a waitress.[SEP]", "score":0.024124596267938614, "token":13678, "token_str":"▁waitress" } ] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The ALBERT model was pretrained on [BookCorpus](https://yknzhu.wixsite.com/mbweb), a dataset consisting of 11,038 unpublished books and [English Wikipedia](https://en.wikipedia.org/wiki/English_Wikipedia) (excluding lists, tables and headers). ## Training procedure ### Preprocessing The texts are lowercased and tokenized using SentencePiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence A [SEP] Sentence B [SEP] ``` ### Training The ALBERT procedure follows the BERT setup. The details of the masking procedure for each sentence are the following: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by `[MASK]`. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. ## Evaluation results When fine-tuned on downstream tasks, the ALBERT models achieve the following results: | | Average | SQuAD1.1 | SQuAD2.0 | MNLI | SST-2 | RACE | |----------------|----------|----------|----------|----------|----------|----------| |V2 | |ALBERT-base |82.3 |90.2/83.2 |82.1/79.3 |84.6 |92.9 |66.8 | |ALBERT-large |85.7 |91.8/85.2 |84.9/81.8 |86.5 |94.9 |75.2 | |ALBERT-xlarge |87.9 |92.9/86.4 |87.9/84.1 |87.9 |95.4 |80.7 | |ALBERT-xxlarge |90.9 |94.6/89.1 |89.8/86.9 |90.6 |96.8 |86.8 | |V1 | |ALBERT-base |80.1 |89.3/82.3 | 80.0/77.1|81.6 |90.3 | 64.0 | |ALBERT-large |82.4 |90.6/83.9 | 82.3/79.4|83.5 |91.7 | 68.5 | |ALBERT-xlarge |85.5 |92.5/86.1 | 86.1/83.1|86.4 |92.4 | 74.8 | |ALBERT-xxlarge |91.0 |94.8/89.3 | 90.2/87.4|90.8 |96.9 | 86.5 | ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-1909-11942, author = {Zhenzhong Lan and Mingda Chen and Sebastian Goodman and Kevin Gimpel and Piyush Sharma and Radu Soricut}, title = {{ALBERT:} {A} Lite {BERT} for Self-supervised Learning of Language Representations}, journal = {CoRR}, volume = {abs/1909.11942}, year = {2019}, url = {http://arxiv.org/abs/1909.11942}, archivePrefix = {arXiv}, eprint = {1909.11942}, timestamp = {Fri, 27 Sep 2019 13:04:21 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1909-11942.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
9,765
[ [ -0.01009368896484375, -0.038665771484375, 0.01910400390625, 0.021575927734375, -0.0303955078125, 0.002925872802734375, 0.01212310791015625, -0.01303863525390625, 0.0271148681640625, 0.04718017578125, -0.04437255859375, -0.034881591796875, -0.0626220703125, 0...
harshupanchal/my-pet-lion
2023-10-07T10:15:10.000Z
[ "diffusers", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
harshupanchal
null
null
harshupanchal/my-pet-lion
0
1,939
diffusers
2023-10-07T10:08:32
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### My-Pet-lion Dreambooth model trained by harshupanchal following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: SAGE-289 Sample pictures of this concept: ![0](https://huggingface.co/harshupanchal/my-pet-lion/resolve/main/sample_images/xzg_(4).jpeg)
400
[ [ -0.054962158203125, -0.0025157928466796875, 0.005954742431640625, 0.0190887451171875, -0.007297515869140625, 0.0287017822265625, 0.0207672119140625, -0.03485107421875, 0.05419921875, 0.03094482421875, -0.052276611328125, -0.006099700927734375, -0.011039733886718...
timm/coatnet_2_rw_224.sw_in12k_ft_in1k
2023-05-10T23:44:00.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "dataset:imagenet-12k", "arxiv:2201.03545", "license:apache-2.0", "has_space", "region:us" ]
image-classification
timm
null
null
timm/coatnet_2_rw_224.sw_in12k_ft_in1k
0
1,937
timm
2023-01-20T21:25:18
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-12k --- # Model card for coatnet_2_rw_224.sw_in12k_ft_in1k A timm specific CoAtNet image classification model. Pretrained in `timm` on ImageNet-12k (a 11821 class subset of full ImageNet-22k) and fine-tuned on ImageNet-1k by Ross Wightman. ImageNet-12k training performed on TPUs thanks to support of the [TRC](https://sites.research.google/trc/about/) program. Fine-tuning performed on 8x GPU [Lambda Labs](https://lambdalabs.com/) cloud instances. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 73.9 - GMACs: 15.1 - Activations (M): 49.2 - Image size: 224 x 224 - **Papers:** - CoAtNet: Marrying Convolution and Attention for All Data Sizes: https://arxiv.org/abs/2201.03545 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-12k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('coatnet_2_rw_224.sw_in12k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'coatnet_2_rw_224.sw_in12k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 112, 112]) # torch.Size([1, 128, 56, 56]) # torch.Size([1, 256, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'coatnet_2_rw_224.sw_in12k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
22,396
[ [ -0.0511474609375, -0.032196044921875, 0.002109527587890625, 0.0302886962890625, -0.0230560302734375, -0.015655517578125, -0.01111602783203125, -0.0281829833984375, 0.052825927734375, 0.015899658203125, -0.043121337890625, -0.0455322265625, -0.049530029296875, ...
bluenguyen/longformer-phobert-base-4096
2023-01-11T06:50:59.000Z
[ "transformers", "pytorch", "roberta", "fill-mask", "longformer", "long context", "vi", "arxiv:2004.05150", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
bluenguyen
null
null
bluenguyen/longformer-phobert-base-4096
1
1,936
transformers
2022-12-21T01:47:35
--- language: vi widget: - text: "Hoàng_Sa và Trường_Sa là <mask> Việt_Nam ." tags: - roberta - longformer - long context pipeline_tag: fill-mask --- # Longformer Phobert base model with max input length of 4096 **Experiment performed with Transformers version 4.25.1**\ A Longformer roberta model for long context based on [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) and [Longformer](https://arxiv.org/abs/2004.05150).\ Phobert model is converted to Longformer version using [author's repo](https://github.com/allenai/longformer), then continued MLM pretraining for 5000 steps with batch size 64 on [Binhvq News Corpus](https://github.com/binhvq/news-corpus) so the model can learn to work with the new sliding window attention.\ This corpus does not contains very long documents in general so you should finetune this model using your long docment dataset on downstream task to get better results.\ The final BPC is 1.926 (In my expriment, the original BPC of Phobert-base model with max input length of 256 is 2.067). ## Usage Fill mask example: ```python: from transformers import RobertaForMaskedLM, AutoTokenizer from transformers.models.longformer.modeling_longformer import LongformerSelfAttention class RobertaLongSelfAttention(LongformerSelfAttention): def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value = None, output_attentions=False, ): attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = any(is_index_global_attn.flatten()) return super().forward(hidden_states, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, attention_mask=attention_mask, output_attentions=output_attentions) class RobertaLongForMaskedLM(RobertaForMaskedLM): def __init__(self, config): super().__init__(config) for i, layer in enumerate(self.roberta.encoder.layer): layer.attention.self = RobertaLongSelfAttention(config, layer_id=i) tokenizer = AutoTokenizer.from_pretrained("bluenguyen/longformer-phobert-base-4096") model = RobertaLongForMaskedLM.from_pretrained("bluenguyen/longformer-phobert-base-4096") TXT = ( "Hoàng_Sa và Trường_Sa là <mask> Việt_Nam ." + "Đó là điều không_thể chối_cãi ." * 300 + "Bằng_chứng lịch_sử , pháp_lý về chủ_quyền của Việt_Nam với 2 quần_đảo này đã và đang được nhiều quốc_gia và cộng_đồng quốc_tế <mask> ." ) input_ids = tokenizer([TXT], padding=True, pad_to_multiple_of=256, return_tensors="pt")["input_ids"] logits = model(input_ids).logits masked_index = [i.item() for i in (input_ids[0] == tokenizer.mask_token_id).nonzero()] for index in masked_index: probs = logits[0, index].softmax(dim=0) values, predictions = probs.topk(3) print(tokenizer.batch_decode([[p] for p in predictions])) > ['của', 'lãnh_thổ', 'chủ_quyền'] > ['công_nhận', 'thừa_nhận', 'ghi_nhận'] ``` Because this mode based on [vinai/phobert-base](https://huggingface.co/vinai/phobert-base), users should use [VnCoreNLP](https://github.com/vncorenlp/VnCoreNLP) or [Python Vietnamese Toolkit](https://github.com/trungtv/pyvi)(pyvi) to segment input raw texts.\ More detail about Longformer can be found in [author's repo](https://github.com/allenai/longformer). ## Contact information For personal questions related to this implementation, please contact via reddotbluename@gmail.com
3,789
[ [ -0.017486572265625, -0.0577392578125, 0.029327392578125, 0.029449462890625, -0.0291290283203125, -0.0219573974609375, -0.0380859375, -0.0258026123046875, 0.00626373291015625, 0.0311431884765625, -0.038818359375, -0.039154052734375, -0.05242919921875, 0.01145...
Gustavosta/MagicPrompt-Dalle
2023-03-17T21:38:43.000Z
[ "transformers", "pytorch", "safetensors", "gpt2", "text-generation", "license:mit", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
Gustavosta
null
null
Gustavosta/MagicPrompt-Dalle
35
1,934
transformers
2022-09-18T03:47:03
--- license: mit --- # MagicPrompt - Dall-E 2 This is a model from the MagicPrompt series of models, which are [GPT-2](https://huggingface.co/gpt2) models intended to generate prompt texts for imaging AIs, in this case: [Dall-E 2](https://openai.com/dall-e-2/). ## 🖼️ Here's an example: <img src="https://files.catbox.moe/h10plz.png"> This model was trained with a set of about 26k of data filtered and extracted from various places such as: [The Web Archive](https://web.archive.org/web/*/https://labs.openai.com/s/*), [The SubReddit for Dall-E 2](https://www.reddit.com/r/dalle2) and [dalle2.gallery](https://dalle2.gallery/#search). This may be a relatively small dataset, but we have to consider that Dall-E 2 is a closed service and we only have prompts from people who share it and have access to the service, for now. The set was trained with about 40,000 steps and I have plans to improve the model if possible. If you want to test the model with a demo, you can go to: "[spaces/Gustavosta/MagicPrompt-Dalle](https://huggingface.co/spaces/Gustavosta/MagicPrompt-Dalle)". ## 💻 You can see other MagicPrompt models: - For Stable Diffusion: [Gustavosta/MagicPrompt-Stable-Diffusion](https://huggingface.co/Gustavosta/MagicPrompt-Stable-Diffusion) - For Midjourney: [Gustavosta/MagicPrompt-Midjourney](https://huggingface.co/Gustavosta/MagicPrompt-Midjourney) **[⚠️ In progress]** - MagicPrompt full: [Gustavosta/MagicPrompt](https://huggingface.co/Gustavosta/MagicPrompt) **[⚠️ In progress]** ## ⚖️ Licence: [MIT](https://huggingface.co/models?license=license:mit) When using this model, please credit: [Gustavosta](https://huggingface.co/Gustavosta) **Thanks for reading this far! :)**
1,704
[ [ -0.0274810791015625, -0.050048828125, 0.03936767578125, 0.01317596435546875, -0.0161590576171875, -0.027069091796875, 0.0209503173828125, -0.0201416015625, 0.0039825439453125, 0.0362548828125, -0.05975341796875, -0.03369140625, -0.050750732421875, 0.00619506...
artificialguybr/LineAniRedmond-LinearMangaSDXL-V2
2023-10-07T04:14:25.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "license:creativeml-openrail-m", "has_space", "region:us" ]
text-to-image
artificialguybr
null
null
artificialguybr/LineAniRedmond-LinearMangaSDXL-V2
1
1,927
diffusers
2023-10-07T04:12:13
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion - lora - diffusers base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: LineAniAF, lineart widget: - text: LineAniAF, lineart --- # LineAni.Redmond V2 ![row01](00207-2331126365.png) LineAni.Redmond V2 is here! Test all my loras here:https://huggingface.co/spaces/artificialguybr/artificialguybr-demo-lora I'm grateful for the GPU time from Redmond.AI that allowed me to make this LORA! This is a Manga Lineart LORA fine-tuned on SD XL 1.0. The LORA has a high capacity to generate Manga Lineart styles i It's a versatile LORA. You can use detailed, minimalist, colorful, black and white as tag to control the results. The tag for the model:LineAniAF LORA is not perfect and sometimes needs more than one gen to create good images. That's my first Anime LORA. Please be patient <3 This is inspired in a good LORA for SD 1.5! I really hope you like the LORA and use it. If you like the model and think it's worth it, you can make a donation to my Patreon or Ko-fi. Follow me in my twitter to know before all about new models: https://twitter.com/artificialguybr/
1,170
[ [ -0.04425048828125, -0.05950927734375, 0.0118408203125, 0.0357666015625, -0.05560302734375, -0.013214111328125, 0.021392822265625, -0.050628662109375, 0.0994873046875, 0.040924072265625, -0.05889892578125, -0.02679443359375, -0.0189971923828125, -0.0106353759...
komfysach/groow-new
2023-10-31T08:33:48.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us", "has_space" ]
text-to-image
komfysach
null
null
komfysach/groow-new
1
1,925
diffusers
2023-10-31T08:29:41
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### groow_new Dreambooth model trained by komfysach with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
500
[ [ -0.03607177734375, -0.070068359375, 0.047454833984375, 0.0229339599609375, -0.0267181396484375, 0.02825927734375, 0.025299072265625, -0.0203857421875, 0.050445556640625, 0.0176544189453125, -0.0195770263671875, -0.023101806640625, -0.048858642578125, -0.0234...
timm/fbnetv3_g.ra2_in1k
2023-04-27T22:48:53.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2110.00476", "arxiv:2006.02049", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/fbnetv3_g.ra2_in1k
0
1,924
timm
2022-12-16T05:37:14
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for fbnetv3_g.ra2_in1k A FBNet-v3 image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * RandAugment `RA2` recipe. Inspired by and evolved from EfficientNet RandAugment recipes. Published as `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 16.6 - GMACs: 1.3 - Activations (M): 14.9 - Image size: train = 240 x 240, test = 288 x 288 - **Papers:** - FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining: https://arxiv.org/abs/2006.02049 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('fbnetv3_g.ra2_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'fbnetv3_g.ra2_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 24, 120, 120]) # torch.Size([1, 40, 60, 60]) # torch.Size([1, 56, 30, 30]) # torch.Size([1, 160, 15, 15]) # torch.Size([1, 1728, 8, 8]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'fbnetv3_g.ra2_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1728, 8, 8) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{dai2021fbnetv3, title={Fbnetv3: Joint architecture-recipe search using predictor pretraining}, author={Dai, Xiaoliang and Wan, Alvin and Zhang, Peizhao and Wu, Bichen and He, Zijian and Wei, Zhen and Chen, Kan and Tian, Yuandong and Yu, Matthew and Vajda, Peter and others}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={16276--16285}, year={2021} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
4,858
[ [ -0.032501220703125, -0.03277587890625, 0.0016269683837890625, 0.007720947265625, -0.022613525390625, -0.02679443359375, -0.01427459716796875, -0.032623291015625, 0.019989013671875, 0.03582763671875, -0.035980224609375, -0.048553466796875, -0.053985595703125, ...
CLTL/MedRoBERTa.nl
2022-12-20T15:05:31.000Z
[ "transformers", "pytorch", "roberta", "fill-mask", "nl", "doi:10.57967/hf/0960", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
CLTL
null
null
CLTL/MedRoBERTa.nl
3
1,923
transformers
2022-03-02T23:29:04
--- language: nl license: mit --- # MedRoBERTa.nl ## Description This model is a RoBERTa-based model pre-trained from scratch on Dutch hospital notes sourced from Electronic Health Records. The model is not fine-tuned. All code used for the creation of MedRoBERTa.nl can be found at https://github.com/cltl-students/verkijk_stella_rma_thesis_dutch_medical_language_model. ## Intended use The model can be fine-tuned on any type of task. Since it is a domain-specific model trained on medical data, it is meant to be used on medical NLP tasks for Dutch. ## Data The model was trained on nearly 10 million hospital notes from the Amsterdam University Medical Centres. The training data was anonymized before starting the pre-training procedure. ## Privacy By anonymizing the training data we made sure the model did not learn any representative associations linked to names. Apart from the training data, the model's vocabulary was also anonymized. This ensures that the model can not predict any names in the generative fill-mask task. ## Authors Stella Verkijk, Piek Vossen ## Reference Paper: Verkijk, S. & Vossen, P. (2022) MedRoBERTa.nl: A Language Model for Dutch Electronic Health Records. Computational Linguistics in the Netherlands Journal, 11.
1,264
[ [ 0.005802154541015625, -0.05224609375, 0.045166015625, 0.0242462158203125, -0.0288238525390625, -0.01503753662109375, -0.0092315673828125, -0.04638671875, 0.049407958984375, 0.05517578125, -0.047576904296875, -0.0611572265625, -0.053192138671875, 0.0199279785...
NlpHUST/vibert4news-base-cased
2023-03-17T02:28:40.000Z
[ "transformers", "pytorch", "safetensors", "fill-mask", "vn", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
NlpHUST
null
null
NlpHUST/vibert4news-base-cased
4
1,921
transformers
2022-03-02T23:29:04
--- language: vn --- # BERT for Vietnamese is trained on more 20 GB news dataset Apply for task sentiment analysis on using [AIViVN's comments dataset](https://www.aivivn.com/contests/6) The model achieved 0.90268 on the public leaderboard, (winner's score is 0.90087) Bert4news is used for a toolkit Vietnames(segmentation and Named Entity Recognition) at ViNLPtoolkit(https://github.com/bino282/ViNLP) We use word sentencepiece, use basic bert tokenization and same config with bert base with lowercase = False. You can download trained model: - [tensorflow](https://drive.google.com/file/d/1X-sRDYf7moS_h61J3L79NkMVGHP-P-k5/view?usp=sharing). - [pytorch](https://drive.google.com/file/d/11aFSTpYIurn-oI2XpAmcCTccB_AonMOu/view?usp=sharing). Use with huggingface/transformers ``` bash import torch from transformers import BertTokenizer,BertModel tokenizer= BertTokenizer.from_pretrained("NlpHUST/vibert4news-base-cased") bert_model = BertModel.from_pretrained("NlpHUST/vibert4news-base-cased") line = "Tôi là sinh viên trường Bách Khoa Hà Nội ." input_id = tokenizer.encode(line,add_special_tokens = True) att_mask = [int(token_id > 0) for token_id in input_id] input_ids = torch.tensor([input_id]) att_masks = torch.tensor([att_mask]) with torch.no_grad(): features = bert_model(input_ids,att_masks) print(features) ``` # Vietnamese toolkit with bert ViNLP is a system annotation for Vietnamese, it use pretrain [Bert4news](https://github.com/bino282/bert4news/) to fine-turning to NLP problems in Vietnamese components of wordsegmentation,Named entity recognition (NER) and achieve high accuravy. ### Installation ```bash git clone https://github.com/bino282/ViNLP.git cd ViNLP python setup.py develop build ``` ### Test Segmentation The model achieved F1 score : 0.984 on VLSP 2013 dataset |Model | F1 | |--------|-----------| | **BertVnTokenizer** | 98.40 | | **DongDu** | 96.90 | | **JvnSegmenter-Maxent** | 97.00 | | **JvnSegmenter-CRFs** | 97.06 | | **VnTokenizer** | 97.33 | | **UETSegmenter** | 97.87 | | **VnTokenizer** | 97.33 | | **VnCoreNLP (i.e. RDRsegmenter)** | 97.90 | ``` bash from ViNLP import BertVnTokenizer tokenizer = BertVnTokenizer() sentences = tokenizer.split(["Tổng thống Donald Trump ký sắc lệnh cấm mọi giao dịch của Mỹ với ByteDance và Tecent - chủ sở hữu của 2 ứng dụng phổ biến TikTok và WeChat sau 45 ngày nữa."]) print(sentences[0]) ``` ``` bash Tổng_thống Donald_Trump ký sắc_lệnh cấm mọi giao_dịch của Mỹ với ByteDance và Tecent - chủ_sở_hữu của 2 ứng_dụng phổ_biến TikTok và WeChat sau 45 ngày nữa . ``` ### Test Named Entity Recognition The model achieved F1 score VLSP 2018 for all named entities including nested entities : 0.786 |Model | F1 | |--------|-----------| | **BertVnNer** | 78.60 | | **VNER Attentive Neural Network** | 77.52 | | **vietner CRF (ngrams + word shapes + cluster + w2v)** | 76.63 | | **ZA-NER BiLSTM** | 74.70 | ``` bash from ViNLP import BertVnNer bert_ner_model = BertVnNer() sentence = "Theo SCMP, báo cáo của CSIS với tên gọi Định hình Tương lai Chính sách của Mỹ với Trung Quốc cũng cho thấy sự ủng hộ tương đối rộng rãi của các chuyên gia về việc cấm Huawei, tập đoàn viễn thông khổng lồ của Trung Quốc" entities = bert_ner_model.annotate([sentence]) print(entities) ``` ``` bash [{'ORGANIZATION': ['SCMP', 'CSIS', 'Huawei'], 'LOCATION': ['Mỹ', 'Trung Quốc']}] ``` Run training with base config ``` bash python train_pytorch.py \\\\ --model_path=bert4news.pytorch \\\\ --max_len=200 \\\\ --batch_size=16 \\\\ --epochs=6 \\\\ --lr=2e-5 ``` ### Contact information For personal communication related to this project, please contact Nha Nguyen Van (nha282@gmail.com).
3,679
[ [ -0.0282745361328125, -0.0555419921875, 0.0237884521484375, 0.03094482421875, -0.034027099609375, -0.01239776611328125, -0.0340576171875, -0.0216217041015625, 0.0172882080078125, 0.026214599609375, -0.026397705078125, -0.050628662109375, -0.043060302734375, 0...
clue/roberta_chinese_base
2021-05-20T15:23:58.000Z
[ "transformers", "pytorch", "jax", "roberta", "zh", "endpoints_compatible", "region:us" ]
null
clue
null
null
clue/roberta_chinese_base
2
1,921
transformers
2022-03-02T23:29:05
--- language: zh --- ## roberta_chinese_base ### Overview **Language model:** roberta-base **Model size:** 392M **Language:** Chinese **Training data:** [CLUECorpusSmall](https://github.com/CLUEbenchmark/CLUECorpus2020) **Eval data:** [CLUE dataset](https://github.com/CLUEbenchmark/CLUE) ### Results For results on downstream tasks like text classification, please refer to [this repository](https://github.com/CLUEbenchmark/CLUE). ### Usage **NOTE:** You have to call **BertTokenizer** instead of RobertaTokenizer !!! ``` import torch from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained("clue/roberta_chinese_base") roberta = BertModel.from_pretrained("clue/roberta_chinese_base") ``` ### About CLUE benchmark Organization of Language Understanding Evaluation benchmark for Chinese: tasks & datasets, baselines, pre-trained Chinese models, corpus and leaderboard. Github: https://github.com/CLUEbenchmark Website: https://www.cluebenchmarks.com/
999
[ [ 0.00548553466796875, -0.026763916015625, 0.0209808349609375, 0.029754638671875, -0.037109375, -0.00518035888671875, -0.04168701171875, -0.0218048095703125, -0.0113677978515625, 0.0104827880859375, -0.046630859375, -0.059722900390625, -0.04827880859375, -0.01...
hfl/chinese-lert-base
2022-11-17T04:13:20.000Z
[ "transformers", "pytorch", "tf", "bert", "fill-mask", "zh", "arxiv:2211.05344", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
hfl
null
null
hfl/chinese-lert-base
11
1,921
transformers
2022-10-26T02:12:14
--- language: - zh license: "apache-2.0" --- ### LERT LERT is a linguistically-motivated pre-trained language model. Further information: https://github.com/ymcui/LERT/blob/main/README_EN.md - **LERT: A Linguistically-motivated Pre-trained Language Model** - *Yiming Cui, Wanxiang Che, Shijin Wang, Ting Liu* - Paper link: https://arxiv.org/abs/2211.05344
359
[ [ -0.00827789306640625, -0.0693359375, 0.030059814453125, 0.0130615234375, -0.01200103759765625, 0.00968170166015625, -0.02374267578125, -0.006710052490234375, 0.00817108154296875, 0.037384033203125, -0.041046142578125, -0.0235137939453125, -0.03875732421875, ...
microsoft/wavlm-base-plus-sd
2022-03-25T12:06:46.000Z
[ "transformers", "pytorch", "wavlm", "audio-frame-classification", "speech", "en", "arxiv:1912.07875", "arxiv:2106.06909", "arxiv:2101.00390", "arxiv:2110.13900", "endpoints_compatible", "region:us" ]
null
microsoft
null
null
microsoft/wavlm-base-plus-sd
4
1,920
transformers
2022-03-02T23:29:05
--- language: - en tags: - speech --- # WavLM-Base-Plus for Speaker Diarization [Microsoft's WavLM](https://github.com/microsoft/unilm/tree/master/wavlm) The model was pretrained on 16kHz sampled speech audio with utterance and speaker contrastive loss. When using the model, make sure that your speech input is also sampled at 16kHz. The model was pre-trained on: - 60,000 hours of [Libri-Light](https://arxiv.org/abs/1912.07875) - 10,000 hours of [GigaSpeech](https://arxiv.org/abs/2106.06909) - 24,000 hours of [VoxPopuli](https://arxiv.org/abs/2101.00390) [Paper: WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) Authors: Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei **Abstract** *Self-supervised learning (SSL) achieves great success in speech recognition, while limited exploration has been attempted for other speech processing tasks. As speech signal contains multi-faceted information including speaker identity, paralinguistics, spoken content, etc., learning universal representations for all speech tasks is challenging. In this paper, we propose a new pre-trained model, WavLM, to solve full-stack downstream speech tasks. WavLM is built based on the HuBERT framework, with an emphasis on both spoken content modeling and speaker identity preservation. We first equip the Transformer structure with gated relative position bias to improve its capability on recognition tasks. For better speaker discrimination, we propose an utterance mixing training strategy, where additional overlapped utterances are created unsupervisely and incorporated during model training. Lastly, we scale up the training dataset from 60k hours to 94k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.* The original model can be found under https://github.com/microsoft/unilm/tree/master/wavlm. # Fine-tuning details The model is fine-tuned on the [LibriMix dataset](https://github.com/JorisCos/LibriMix) using just a linear layer for mapping the network outputs. # Usage ## Speaker Diarization ```python from transformers import Wav2Vec2FeatureExtractor, WavLMForAudioFrameClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('microsoft/wavlm-base-plus-sd') model = WavLMForAudioFrameClassification.from_pretrained('microsoft/wavlm-base-plus-sd') # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") logits = model(**inputs).logits probabilities = torch.sigmoid(logits[0]) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (probabilities > 0.5).long() ``` # License The official license can be found [here](https://github.com/microsoft/UniSpeech/blob/main/LICENSE) ![design](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/wavlm.png)
3,294
[ [ -0.035919189453125, -0.044952392578125, 0.01433563232421875, 0.014129638671875, -0.01219940185546875, -0.006496429443359375, -0.01438140869140625, -0.04022216796875, -0.006305694580078125, 0.036468505859375, -0.050079345703125, -0.045135498046875, -0.03091430664...
monadical-labs/minecraft-skin-generator
2023-10-23T19:58:38.000Z
[ "diffusers", "minecraft", "text-to-image", "en", "license:openrail", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
monadical-labs
null
null
monadical-labs/minecraft-skin-generator
6
1,920
diffusers
2023-06-24T18:30:50
--- license: openrail language: - en library_name: diffusers pipeline_tag: text-to-image tags: - minecraft --- ## Update We have released a hosted version of our Minecraft Skin Generator at: [https://www.skingenerator.io](https://www.skingenerator.io) While closed source, this version improves on the quality of generated skins from the publicly available model, plus it supports transparency in the helmet/hair section of the skins. Please check it out and let us know what you think! ## Overview This Stable Diffusion model was fine-tuned to generate a pre-version 1.8 Minecraft character skins, based on a text prompt. The process for doing so is detailed in the Monadical blog post, [Even More Skin in the Game: Digging Deeper Into Stable Diffusion-Generated Minecraft Skins](https://monadical.com/posts/minecraft-skins-part2.html) The model was fine-tuned on the dataset for 13,000 steps using the 'train_text_to_image.py' script provided with the diffusers library. A checkpoint has been included in the 'checkpoint' directory. Some postprocessing is required to import and use the generated skins in Minecraft. Example code to generate usable in-game Minecraft skins can be found in the [Monadical-SAS/minecraft_skin_generator](https://github.com/Monadical-SAS/minecraft_skin_generator) GitHub respository. ## Examples Here are some example text prompts and the images they generate: * "Albert Einstein" ![Albert Einstein](examples/einstein.png) * "Mahatma Ghandi" ![Mahatma Ghandi](examples/ghandi.png) * "A man in a purple suit wearing a top hat." ![A man in a purple suit wearing a top hat.](examples/purplesuit.png) ## Contact Information You can contact me at: Cory Spencer \<cory@monadical.com\> [![Monadical](logo.png)](https://monadical.com/)
1,776
[ [ -0.0269622802734375, -0.0682373046875, 0.042510986328125, 0.018585205078125, -0.01372528076171875, 0.01427459716796875, -0.025054931640625, -0.04254150390625, 0.025787353515625, 0.0689697265625, -0.06695556640625, -0.050140380859375, -0.020111083984375, -0.0...
cross-attention/asymmetric-autoencoder-kl-x-1-5
2023-07-19T17:47:08.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "arxiv:2306.04632", "diffusers:AsymmetricAutoencoderKL", "region:us" ]
null
cross-attention
null
null
cross-attention/asymmetric-autoencoder-kl-x-1-5
2
1,920
diffusers
2023-07-07T14:32:59
--- tags: - stable-diffusion - stable-diffusion-diffusers inference: false library_name: diffusers --- # Asymmetric Autoencoder KL [Designing a Better Asymmetric VQGAN for StableDiffusion](https://arxiv.org/abs/2306.04632) ## Abstract *StableDiffusion is a revolutionary text-to-image generator that is causing a stir in the world of image generation and editing. Unlike traditional methods that learn a diffusion model in pixel space, StableDiffusion learns a diffusion model in the latent space via a VQGAN, ensuring both efficiency and quality. It not only supports image generation tasks, but also enables image editing for real images, such as image inpainting and local editing. However, we have observed that the vanilla VQGAN used in StableDiffusion leads to significant information loss, causing distortion artifacts even in non-edited image regions. To this end, we propose a new asymmetric VQGAN with two simple designs. Firstly, in addition to the input from the encoder, the decoder contains a conditional branch that incorporates information from task-specific priors, such as the unmasked image region in inpainting. Secondly, the decoder is much heavier than the encoder, allowing for more detailed recovery while only slightly increasing the total inference cost. The training cost of our asymmetric VQGAN is cheap, and we only need to retrain a new asymmetric decoder while keeping the vanilla VQGAN encoder and StableDiffusion unchanged. Our asymmetric VQGAN can be widely used in StableDiffusion-based inpainting and local editing methods. Extensive experiments demonstrate that it can significantly improve the inpainting and editing performance, while maintaining the original text-to-image capability. The code is available at https://github.com/buxiangzhiren/Asymmetric_VQGAN/tree/main* ## Scales * https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-1-5 * https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-2 ## Diffusers ```python from io import BytesIO from PIL import Image import requests from diffusers import AsymmetricAutoencoderKL, StableDiffusionInpaintPipeline def download_image(url: str) -> Image.Image: response = requests.get(url) return Image.open(BytesIO(response.content)).convert("RGB") prompt = "a photo of a person" img_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/celeba_hq_256.png" mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png" image = download_image(img_url).resize((256, 256)) mask_image = download_image(mask_url).resize((256, 256)) pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") pipe.vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe.to("cuda") image = pipe(prompt=prompt, image=image, mask_image=mask_image).images[0] image.save("image.jpeg") ``` ### Visual _Visualization of VAE perfomance on 512x512 image with runwayml/stable-diffusion-inpainting_ <p align="center"> <br>original image, masked image, mask <br><b>runwayml/stable-diffusion-inpainting original VAE</b> <br><b>stabilityai/sd-vae-ft-mse VAE</b> <br><b>Asymmetric Autoencoder KL x1.5 VAE</b> <br><b>Asymmetric Autoencoder KL x2 VAE</b> </p> <p align="center"> <img src=https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-1-5/resolve/main/compare.jpeg width="50%"/> </p>
3,485
[ [ -0.033203125, -0.0244140625, 0.00852203369140625, 0.0271453857421875, -0.0300445556640625, -0.0003554821014404297, -0.0078277587890625, -0.023468017578125, 0.0278472900390625, 0.0279083251953125, -0.0423583984375, -0.0251922607421875, -0.053131103515625, 0.0...
obi/deid_bert_i2b2
2022-08-22T13:28:40.000Z
[ "transformers", "pytorch", "bert", "token-classification", "deidentification", "medical notes", "ehr", "phi", "en", "dataset:I2B2", "arxiv:1904.03323", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
obi
null
null
obi/deid_bert_i2b2
7
1,919
transformers
2022-03-02T23:29:05
--- language: - en thumbnail: "https://www.onebraveidea.org/wp-content/uploads/2019/07/OBI-Logo-Website.png" tags: - deidentification - medical notes - ehr - phi datasets: - I2B2 metrics: - F1 - Recall - AUC widget: - text: "Physician Discharge Summary Admit date: 10/12/1982 Discharge date: 10/22/1982 Patient Information Jack Reacher, 54 y.o. male (DOB = 1/21/1928)." - text: "Home Address: 123 Park Drive, San Diego, CA, 03245. Home Phone: 202-555-0199 (home)." - text: "Hospital Care Team Service: Orthopedics Inpatient Attending: Roger C Kelly, MD Attending phys phone: (634)743-5135 Discharge Unit: HCS843 Primary Care Physician: Hassan V Kim, MD 512-832-5025." license: mit --- # Model Description * A ClinicalBERT [[Alsentzer et al., 2019]](https://arxiv.org/pdf/1904.03323.pdf) model fine-tuned for de-identification of medical notes. * Sequence Labeling (token classification): The model was trained to predict protected health information (PHI/PII) entities (spans). A list of protected health information categories is given by [HIPAA](https://www.hhs.gov/hipaa/for-professionals/privacy/laws-regulations/index.html). * A token can either be classified as non-PHI or as one of the 11 PHI types. Token predictions are aggregated to spans by making use of BILOU tagging. * The PHI labels that were used for training and other details can be found here: [Annotation Guidelines](https://github.com/obi-ml-public/ehr_deidentification/blob/master/AnnotationGuidelines.md) * More details on how to use this model, the format of data and other useful information is present in the GitHub repo: [Robust DeID](https://github.com/obi-ml-public/ehr_deidentification). # How to use * A demo on how the model works (using model predictions to de-identify a medical note) is on this space: [Medical-Note-Deidentification](https://huggingface.co/spaces/obi/Medical-Note-Deidentification). * Steps on how this model can be used to run a forward pass can be found here: [Forward Pass](https://github.com/obi-ml-public/ehr_deidentification/tree/master/steps/forward_pass) * In brief, the steps are: * Sentencize (the model aggregates the sentences back to the note level) and tokenize the dataset. * Use the predict function of this model to gather the predictions (i.e., predictions for each token). * Additionally, the model predictions can be used to remove PHI from the original note/text. # Dataset * The I2B2 2014 [[Stubbs and Uzuner, 2015]](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4978170/) dataset was used to train this model. | | I2B2 | | I2B2 | | | --------- | --------------------- | ---------- | -------------------- | ---------- | | | TRAIN SET - 790 NOTES | | TEST SET - 514 NOTES | | | PHI LABEL | COUNT | PERCENTAGE | COUNT | PERCENTAGE | | DATE | 7502 | 43.69 | 4980 | 44.14 | | STAFF | 3149 | 18.34 | 2004 | 17.76 | | HOSP | 1437 | 8.37 | 875 | 7.76 | | AGE | 1233 | 7.18 | 764 | 6.77 | | LOC | 1206 | 7.02 | 856 | 7.59 | | PATIENT | 1316 | 7.66 | 879 | 7.79 | | PHONE | 317 | 1.85 | 217 | 1.92 | | ID | 881 | 5.13 | 625 | 5.54 | | PATORG | 124 | 0.72 | 82 | 0.73 | | EMAIL | 4 | 0.02 | 1 | 0.01 | | OTHERPHI | 2 | 0.01 | 0 | 0 | | TOTAL | 17171 | 100 | 11283 | 100 | # Training procedure * Steps on how this model was trained can be found here: [Training](https://github.com/obi-ml-public/ehr_deidentification/tree/master/steps/train). The "model_name_or_path" was set to: "emilyalsentzer/Bio_ClinicalBERT". * The dataset was sentencized with the en_core_sci_sm sentencizer from spacy. * The dataset was then tokenized with a custom tokenizer built on top of the en_core_sci_sm tokenizer from spacy. * For each sentence we added 32 tokens on the left (from previous sentences) and 32 tokens on the right (from the next sentences). * The added tokens are not used for learning - i.e, the loss is not computed on these tokens - they are used as additional context. * Each sequence contained a maximum of 128 tokens (including the 32 tokens added on). Longer sequences were split. * The sentencized and tokenized dataset with the token level labels based on the BILOU notation was used to train the model. * The model is fine-tuned from a pre-trained RoBERTa model. * Training details: * Input sequence length: 128 * Batch size: 32 * Optimizer: AdamW * Learning rate: 4e-5 * Dropout: 0.1 # Results # Questions? Post a Github issue on the repo: [Robust DeID](https://github.com/obi-ml-public/ehr_deidentification).
5,265
[ [ -0.0021686553955078125, -0.06201171875, 0.035614013671875, -0.013427734375, -0.0254669189453125, -0.0170135498046875, 0.00354766845703125, -0.033172607421875, 0.019775390625, 0.041595458984375, -0.041748046875, -0.0634765625, -0.05224609375, 0.00044393539428...
Panchal009/my-robots-cvb
2023-10-09T10:35:48.000Z
[ "diffusers", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
Panchal009
null
null
Panchal009/my-robots-cvb
0
1,919
diffusers
2023-10-09T10:30:13
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### My-robots-cvb Dreambooth model trained by Panchal009 following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: GoX19932gAS Sample pictures of this concept: ![0](https://huggingface.co/Panchal009/my-robots-cvb/resolve/main/sample_images/cvb[2].jpeg) ![1](https://huggingface.co/Panchal009/my-robots-cvb/resolve/main/sample_images/cvb[3].jpeg) ![2](https://huggingface.co/Panchal009/my-robots-cvb/resolve/main/sample_images/cvb[1].jpeg) ![3](https://huggingface.co/Panchal009/my-robots-cvb/resolve/main/sample_images/cvb[0].jpeg) ![4](https://huggingface.co/Panchal009/my-robots-cvb/resolve/main/sample_images/cvb[4].jpeg)
808
[ [ -0.060302734375, -0.029266357421875, 0.0291748046875, 0.0181427001953125, -0.0164031982421875, 0.0372314453125, 0.0316162109375, -0.0237274169921875, 0.054656982421875, 0.044036865234375, -0.0604248046875, -0.037353515625, -0.030548095703125, -0.003503799438...
studio-ousia/luke-japanese-large
2022-11-09T02:18:56.000Z
[ "transformers", "pytorch", "luke", "fill-mask", "named entity recognition", "entity typing", "relation classification", "question answering", "ja", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
studio-ousia
null
null
studio-ousia/luke-japanese-large
6
1,918
transformers
2022-11-07T14:25:53
--- language: ja thumbnail: https://github.com/studio-ousia/luke/raw/master/resources/luke_logo.png tags: - luke - named entity recognition - entity typing - relation classification - question answering license: apache-2.0 --- ## luke-japanese-large **luke-japanese** is the Japanese version of **LUKE** (**L**anguage **U**nderstanding with **K**nowledge-based **E**mbeddings), a pre-trained _knowledge-enhanced_ contextualized representation of words and entities. LUKE treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. Please refer to our [GitHub repository](https://github.com/studio-ousia/luke) for more details and updates. This model contains Wikipedia entity embeddings which are not used in general NLP tasks. Please use the [lite version](https://huggingface.co/studio-ousia/luke-japanese-large-lite/) for tasks that do not use Wikipedia entities as inputs. **luke-japanese**は、単語とエンティティの知識拡張型訓練済み Transformer モデル**LUKE**の日本語版です。LUKE は単語とエンティティを独立したトークンとして扱い、これらの文脈を考慮した表現を出力します。詳細については、[GitHub リポジトリ](https://github.com/studio-ousia/luke)を参照してください。 このモデルは、通常の NLP タスクでは使われない Wikipedia エンティティのエンベディングを含んでいます。単語の入力のみを使うタスクには、[lite version](https://huggingface.co/studio-ousia/luke-japanese-large-lite/)を使用してください。 ### Experimental results on JGLUE The experimental results evaluated on the dev set of [JGLUE](https://github.com/yahoojapan/JGLUE) is shown as follows: | Model | MARC-ja | JSTS | JNLI | JCommonsenseQA | | ----------------------------- | --------- | ------------------- | --------- | -------------- | | | acc | Pearson/Spearman | acc | acc | | **LUKE Japanese large** | **0.965** | **0.932**/**0.902** | **0.927** | 0.893 | | _Baselines:_ | | | Tohoku BERT large | 0.955 | 0.913/0.872 | 0.900 | 0.816 | | Waseda RoBERTa large (seq128) | 0.954 | 0.930/0.896 | 0.924 | **0.907** | | Waseda RoBERTa large (seq512) | 0.961 | 0.926/0.892 | 0.926 | 0.891 | | XLM RoBERTa large | 0.964 | 0.918/0.884 | 0.919 | 0.840 | The baseline scores are obtained from [here](https://github.com/yahoojapan/JGLUE/blob/a6832af23895d6faec8ecf39ec925f1a91601d62/README.md). ### Citation ```latex @inproceedings{yamada2020luke, title={LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention}, author={Ikuya Yamada and Akari Asai and Hiroyuki Shindo and Hideaki Takeda and Yuji Matsumoto}, booktitle={EMNLP}, year={2020} } ```
2,707
[ [ -0.0438232421875, -0.07513427734375, 0.0435791015625, -0.008758544921875, -0.01488494873046875, -0.0011920928955078125, -0.041107177734375, -0.041717529296875, 0.06268310546875, 0.0267486572265625, -0.030670166015625, -0.058746337890625, -0.048675537109375, ...
textattack/bert-base-uncased-imdb
2021-05-20T07:42:02.000Z
[ "transformers", "pytorch", "jax", "bert", "text-classification", "endpoints_compatible", "has_space", "region:us" ]
text-classification
textattack
null
null
textattack/bert-base-uncased-imdb
5
1,914
transformers
2022-03-02T23:29:05
## TextAttack Model Card This `bert-base-uncased` model was fine-tuned for sequence classification using TextAttack and the imdb dataset loaded using the `nlp` library. The model was fine-tuned for 5 epochs with a batch size of 16, a learning rate of 2e-05, and a maximum sequence length of 128. Since this was a classification task, the model was trained with a cross-entropy loss function. The best score the model achieved on this task was 0.89088, as measured by the eval set accuracy, found after 4 epochs. For more information, check out [TextAttack on Github](https://github.com/QData/TextAttack).
612
[ [ -0.0257415771484375, -0.027618408203125, 0.0180206298828125, -0.00031065940856933594, -0.03863525390625, 0.01528167724609375, -0.00919342041015625, -0.03631591796875, -0.004718780517578125, 0.0300140380859375, -0.046112060546875, -0.045745849609375, -0.041748046...
smanjil/German-MedBERT
2022-06-13T16:52:46.000Z
[ "transformers", "pytorch", "tf", "jax", "bert", "fill-mask", "exbert", "German", "de", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
smanjil
null
null
smanjil/German-MedBERT
17
1,910
transformers
2022-03-02T23:29:05
--- language: de tags: - exbert - German --- <a href="https://huggingface.co/exbert/?model=smanjil/German-MedBERT"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a> # German Medical BERT This is a fine-tuned model on the Medical domain for the German language and based on German BERT. This model has only been trained to improve on-target tasks (Masked Language Model). It can later be used to perform a downstream task of your needs, while I performed it for the NTS-ICD-10 text classification task. ## Overview **Language model:** bert-base-german-cased **Language:** German **Fine-tuning:** Medical articles (diseases, symptoms, therapies, etc..) **Eval data:** NTS-ICD-10 dataset (Classification) **Infrastructure:** Google Colab ## Details - We fine-tuned using Pytorch with Huggingface library on Colab GPU. - With standard parameter settings for fine-tuning as mentioned in the original BERT paper. - Although had to train for up to 25 epochs for classification. ## Performance (Micro precision, recall, and f1 score for multilabel code classification) |Models|P|R|F1| |:------|:------|:------|:------| |German BERT|86.04|75.82|80.60| |German MedBERT-256 (fine-tuned)|87.41|77.97|82.42| |German MedBERT-512 (fine-tuned)|87.75|78.26|82.73| ## Author Manjil Shrestha: `shresthamanjil21 [at] gmail.com` ## Related Paper: [Report](https://opus4.kobv.de/opus4-rhein-waal/frontdoor/index/index/searchtype/collection/id/16225/start/0/rows/10/doctypefq/masterthesis/docId/740) Get in touch: [LinkedIn](https://www.linkedin.com/in/manjil-shrestha-038527b4/)
1,608
[ [ -0.03692626953125, -0.0482177734375, 0.0217742919921875, 0.00824737548828125, -0.019195556640625, -0.004650115966796875, -0.0292816162109375, -0.0498046875, 0.0322265625, 0.009674072265625, -0.043426513671875, -0.0511474609375, -0.051239013671875, -0.0004465...
aubmindlab/bert-large-arabertv02
2023-08-03T12:31:29.000Z
[ "transformers", "pytorch", "tf", "jax", "tensorboard", "safetensors", "bert", "fill-mask", "ar", "dataset:wikipedia", "dataset:Osian", "dataset:1.5B-Arabic-Corpus", "dataset:oscar-arabic-unshuffled", "arxiv:2003.00104", "autotrain_compatible", "endpoints_compatible", "has_space", "...
fill-mask
aubmindlab
null
null
aubmindlab/bert-large-arabertv02
4
1,906
transformers
2022-03-02T23:29:05
--- language: ar datasets: - wikipedia - Osian - 1.5B-Arabic-Corpus - oscar-arabic-unshuffled widget: - text: " عاصمة لبنان هي [MASK] ." --- # AraBERT v1 & v2 : Pre-training BERT for Arabic Language Understanding <img src="https://raw.githubusercontent.com/aub-mind/arabert/master/arabert_logo.png" width="100" align="left"/> **AraBERT** is an Arabic pretrained lanaguage model based on [Google's BERT architechture](https://github.com/google-research/bert). AraBERT uses the same BERT-Base config. More details are available in the [AraBERT Paper](https://arxiv.org/abs/2003.00104) and in the [AraBERT Meetup](https://github.com/WissamAntoun/pydata_khobar_meetup) There are two versions of the model, AraBERTv0.1 and AraBERTv1, with the difference being that AraBERTv1 uses pre-segmented text where prefixes and suffixes were splitted using the [Farasa Segmenter](http://alt.qcri.org/farasa/segmenter.html). We evalaute AraBERT models on different downstream tasks and compare them to [mBERT]((https://github.com/google-research/bert/blob/master/multilingual.md)), and other state of the art models (*To the extent of our knowledge*). The Tasks were Sentiment Analysis on 6 different datasets ([HARD](https://github.com/elnagara/HARD-Arabic-Dataset), [ASTD-Balanced](https://www.aclweb.org/anthology/D15-1299), [ArsenTD-Lev](https://staff.aub.edu.lb/~we07/Publications/ArSentD-LEV_Sentiment_Corpus.pdf), [LABR](https://github.com/mohamedadaly/LABR)), Named Entity Recognition with the [ANERcorp](http://curtis.ml.cmu.edu/w/courses/index.php/ANERcorp), and Arabic Question Answering on [Arabic-SQuAD and ARCD](https://github.com/husseinmozannar/SOQAL) # AraBERTv2 ## What's New! AraBERT now comes in 4 new variants to replace the old v1 versions: More Detail in the AraBERT folder and in the [README](https://github.com/aub-mind/arabert/blob/master/AraBERT/README.md) and in the [AraBERT Paper](https://arxiv.org/abs/2003.00104v2) Model | HuggingFace Model Name | Size (MB/Params)| Pre-Segmentation | DataSet (Sentences/Size/nWords) | ---|:---:|:---:|:---:|:---: AraBERTv0.2-base | [bert-base-arabertv02](https://huggingface.co/aubmindlab/bert-base-arabertv02) | 543MB / 136M | No | 200M / 77GB / 8.6B | AraBERTv0.2-large| [bert-large-arabertv02](https://huggingface.co/aubmindlab/bert-large-arabertv02) | 1.38G 371M | No | 200M / 77GB / 8.6B | AraBERTv2-base| [bert-base-arabertv2](https://huggingface.co/aubmindlab/bert-base-arabertv2) | 543MB 136M | Yes | 200M / 77GB / 8.6B | AraBERTv2-large| [bert-large-arabertv2](https://huggingface.co/aubmindlab/bert-large-arabertv2) | 1.38G 371M | Yes | 200M / 77GB / 8.6B | AraBERTv0.1-base| [bert-base-arabertv01](https://huggingface.co/aubmindlab/bert-base-arabertv01) | 543MB 136M | No | 77M / 23GB / 2.7B | AraBERTv1-base| [bert-base-arabert](https://huggingface.co/aubmindlab/bert-base-arabert) | 543MB 136M | Yes | 77M / 23GB / 2.7B | All models are available in the `HuggingFace` model page under the [aubmindlab](https://huggingface.co/aubmindlab/) name. Checkpoints are available in PyTorch, TF2 and TF1 formats. ## Better Pre-Processing and New Vocab We identified an issue with AraBERTv1's wordpiece vocabulary. The issue came from punctuations and numbers that were still attached to words when learned the wordpiece vocab. We now insert a space between numbers and characters and around punctuation characters. The new vocabulary was learnt using the `BertWordpieceTokenizer` from the `tokenizers` library, and should now support the Fast tokenizer implementation from the `transformers` library. **P.S.**: All the old BERT codes should work with the new BERT, just change the model name and check the new preprocessing dunction **Please read the section on how to use the [preprocessing function](#Preprocessing)** ## Bigger Dataset and More Compute We used ~3.5 times more data, and trained for longer. For Dataset Sources see the [Dataset Section](#Dataset) Model | Hardware | num of examples with seq len (128 / 512) |128 (Batch Size/ Num of Steps) | 512 (Batch Size/ Num of Steps) | Total Steps | Total Time (in Days) | ---|:---:|:---:|:---:|:---:|:---:|:---: AraBERTv0.2-base | TPUv3-8 | 420M / 207M | 2560 / 1M | 384/ 2M | 3M | - AraBERTv0.2-large | TPUv3-128 | 420M / 207M | 13440 / 250K | 2056 / 300K | 550K | 7 AraBERTv2-base | TPUv3-8 | 420M / 207M | 2560 / 1M | 384/ 2M | 3M | - AraBERTv2-large | TPUv3-128 | 520M / 245M | 13440 / 250K | 2056 / 300K | 550K | 7 AraBERT-base (v1/v0.1) | TPUv2-8 | - |512 / 900K | 128 / 300K| 1.2M | 4 # Dataset The pretraining data used for the new AraBERT model is also used for Arabic **GPT2 and ELECTRA**. The dataset consists of 77GB or 200,095,961 lines or 8,655,948,860 words or 82,232,988,358 chars (before applying Farasa Segmentation) For the new dataset we added the unshuffled OSCAR corpus, after we thoroughly filter it, to the previous dataset used in AraBERTv1 but with out the websites that we previously crawled: - OSCAR unshuffled and filtered. - [Arabic Wikipedia dump](https://archive.org/details/arwiki-20190201) from 2020/09/01 - [The 1.5B words Arabic Corpus](https://www.semanticscholar.org/paper/1.5-billion-words-Arabic-Corpus-El-Khair/f3eeef4afb81223df96575adadf808fe7fe440b4) - [The OSIAN Corpus](https://www.aclweb.org/anthology/W19-4619) - Assafir news articles. Huge thank you for Assafir for giving us the data # Preprocessing It is recommended to apply our preprocessing function before training/testing on any dataset. **Install farasapy to segment text for AraBERT v1 & v2 `pip install farasapy`** ```python from arabert.preprocess import ArabertPreprocessor model_name="bert-large-arabertv02" arabert_prep = ArabertPreprocessor(model_name=model_name) text = "ولن نبالغ إذا قلنا إن هاتف أو كمبيوتر المكتب في زمننا هذا ضروري" arabert_prep.preprocess(text) ``` ## Accepted_models ``` bert-base-arabertv01 bert-base-arabert bert-base-arabertv02 bert-base-arabertv2 bert-large-arabertv02 bert-large-arabertv2 araelectra-base aragpt2-base aragpt2-medium aragpt2-large aragpt2-mega ``` # TensorFlow 1.x models The TF1.x model are available in the HuggingFace models repo. You can download them as follows: - via git-lfs: clone all the models in a repo ```bash curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash sudo apt-get install git-lfs git lfs install git clone https://huggingface.co/aubmindlab/MODEL_NAME tar -C ./MODEL_NAME -zxvf /content/MODEL_NAME/tf1_model.tar.gz ``` where `MODEL_NAME` is any model under the `aubmindlab` name - via `wget`: - Go to the tf1_model.tar.gz file on huggingface.co/models/aubmindlab/MODEL_NAME. - copy the `oid sha256` - then run `wget https://cdn-lfs.huggingface.co/aubmindlab/aragpt2-base/INSERT_THE_SHA_HERE` (ex: for `aragpt2-base`: `wget https://cdn-lfs.huggingface.co/aubmindlab/aragpt2-base/3766fc03d7c2593ff2fb991d275e96b81b0ecb2098b71ff315611d052ce65248`) # If you used this model please cite us as : Google Scholar has our Bibtex wrong (missing name), use this instead ``` @inproceedings{antoun2020arabert, title={AraBERT: Transformer-based Model for Arabic Language Understanding}, author={Antoun, Wissam and Baly, Fady and Hajj, Hazem}, booktitle={LREC 2020 Workshop Language Resources and Evaluation Conference 11--16 May 2020}, pages={9} } ``` # Acknowledgments Thanks to TensorFlow Research Cloud (TFRC) for the free access to Cloud TPUs, couldn't have done it without this program, and to the [AUB MIND Lab](https://sites.aub.edu.lb/mindlab/) Members for the continous support. Also thanks to [Yakshof](https://www.yakshof.com/#/) and Assafir for data and storage access. Another thanks for Habib Rahal (https://www.behance.net/rahalhabib), for putting a face to AraBERT. # Contacts **Wissam Antoun**: [Linkedin](https://www.linkedin.com/in/wissam-antoun-622142b4/) | [Twitter](https://twitter.com/wissam_antoun) | [Github](https://github.com/WissamAntoun) | <wfa07@mail.aub.edu> | <wissam.antoun@gmail.com> **Fady Baly**: [Linkedin](https://www.linkedin.com/in/fadybaly/) | [Twitter](https://twitter.com/fadybaly) | [Github](https://github.com/fadybaly) | <fgb06@mail.aub.edu> | <baly.fady@gmail.com>
8,193
[ [ -0.052825927734375, -0.052978515625, 0.025115966796875, 0.00972747802734375, -0.02252197265625, -0.00643157958984375, -0.0107574462890625, -0.04327392578125, 0.018951416015625, 0.021728515625, -0.04351806640625, -0.05023193359375, -0.05859375, 0.000145912170...
microsoft/swin-base-patch4-window12-384-in22k
2022-05-16T18:01:06.000Z
[ "transformers", "pytorch", "tf", "swin", "image-classification", "vision", "dataset:imagenet-21k", "arxiv:2103.14030", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
image-classification
microsoft
null
null
microsoft/swin-base-patch4-window12-384-in22k
0
1,906
transformers
2022-03-02T23:29:05
--- license: apache-2.0 tags: - vision - image-classification datasets: - imagenet-21k widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace --- # Swin Transformer (large-sized model) Swin Transformer model pre-trained on ImageNet-21k (14 million images, 21,841 classes) at resolution 384x384. It was introduced in the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Liu et al. and first released in [this repository](https://github.com/microsoft/Swin-Transformer). Disclaimer: The team releasing Swin Transformer did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Swin Transformer is a type of Vision Transformer. It builds hierarchical feature maps by merging image patches (shown in gray) in deeper layers and has linear computation complexity to input image size due to computation of self-attention only within each local window (shown in red). It can thus serve as a general-purpose backbone for both image classification and dense recognition tasks. In contrast, previous vision Transformers produce feature maps of a single low resolution and have quadratic computation complexity to input image size due to computation of self-attention globally. ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/swin_transformer_architecture.png) [Source](https://paperswithcode.com/method/swin-transformer) ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=swin) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model to classify an image of the COCO 2017 dataset into one of the 1,000 ImageNet classes: ```python from transformers import AutoFeatureExtractor, SwinForImageClassification from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/swin-base-patch4-window12-384-in22k") model = SwinForImageClassification.from_pretrained("microsoft/swin-base-patch4-window12-384-in22k") inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` For more code examples, we refer to the [documentation](https://huggingface.co/transformers/model_doc/swin.html#). ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2103-14030, author = {Ze Liu and Yutong Lin and Yue Cao and Han Hu and Yixuan Wei and Zheng Zhang and Stephen Lin and Baining Guo}, title = {Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, journal = {CoRR}, volume = {abs/2103.14030}, year = {2021}, url = {https://arxiv.org/abs/2103.14030}, eprinttype = {arXiv}, eprint = {2103.14030}, timestamp = {Thu, 08 Apr 2021 07:53:26 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2103-14030.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
3,751
[ [ -0.049285888671875, -0.027130126953125, -0.01045989990234375, 0.012420654296875, -0.005611419677734375, -0.021484375, -0.0026187896728515625, -0.061737060546875, 0.00478363037109375, 0.0234832763671875, -0.0404052734375, -0.01364898681640625, -0.0439453125, ...
hackathon-pln-es/bertin-roberta-base-zeroshot-esnli
2023-04-13T08:45:26.000Z
[ "transformers", "pytorch", "safetensors", "roberta", "text-classification", "zero-shot-classification", "nli", "es", "dataset:hackathon-pln-es/nli-es", "endpoints_compatible", "region:us" ]
zero-shot-classification
hackathon-pln-es
null
null
hackathon-pln-es/bertin-roberta-base-zeroshot-esnli
3
1,906
transformers
2022-04-04T15:05:03
--- pipeline_tag: zero-shot-classification tags: - zero-shot-classification - nli language: - es datasets: - hackathon-pln-es/nli-es widget: - text: "Para detener la pandemia, es importante que todos se presenten a vacunarse." candidate_labels: "salud, deporte, entretenimiento" --- # A zero-shot classifier based on bertin-roberta-base-spanish This model was trained on the basis of the model `bertin-roberta-base-spanish` using **Cross encoder** for NLI task. A CrossEncoder takes a sentence pair as input and outputs a label so it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2. You can use it with Hugging Face's Zero-shot pipeline to make **zero-shot classifications**. Given a sentence and an arbitrary set of labels/topics, it will output the likelihood of the sentence belonging to each of the topic. ## Usage (HuggingFace Transformers) The simplest way to use the model is the huggingface transformers pipeline tool. Just initialize the pipeline specifying the task as "zero-shot-classification" and select "hackathon-pln-es/bertin-roberta-base-zeroshot-esnli" as model. ```python from transformers import pipeline classifier = pipeline("zero-shot-classification", model="hackathon-pln-es/bertin-roberta-base-zeroshot-esnli") classifier( "El autor se perfila, a los 50 años de su muerte, como uno de los grandes de su siglo", candidate_labels=["cultura", "sociedad", "economia", "salud", "deportes"], hypothesis_template="Esta oración es sobre {}." ) ``` The `hypothesis_template` parameter is important and should be in Spanish. **In the widget on the right, this parameter is set to its default value: "This example is {}.", so different results are expected.** ## Training We used [sentence-transformers](https://www.SBERT.net) to train the model. **Dataset** We used a collection of datasets of Natural Language Inference as training data: - [ESXNLI](https://raw.githubusercontent.com/artetxem/esxnli/master/esxnli.tsv), only the part in spanish - [SNLI](https://nlp.stanford.edu/projects/snli/), automatically translated - [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/), automatically translated The whole dataset used is available [here](https://huggingface.co/datasets/hackathon-pln-es/nli-es). ## Authors - [Anibal Pérez](https://huggingface.co/Anarpego) - [Emilio Tomás Ariza](https://huggingface.co/medardodt) - [Lautaro Gesuelli Pinto](https://huggingface.co/Lautaro) - [Mauricio Mazuecos](https://huggingface.co/mmazuecos)
2,537
[ [ -0.024444580078125, -0.05419921875, 0.031402587890625, 0.0117645263671875, 0.00855255126953125, -0.00911712646484375, -0.01373291015625, -0.031585693359375, 0.039398193359375, 0.0273895263671875, -0.055908203125, -0.045379638671875, -0.049957275390625, 0.020...
timm/resnext50_32x4d.a1h_in1k
2023-04-05T18:57:20.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "arxiv:2110.00476", "arxiv:1611.05431", "arxiv:1512.03385", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/resnext50_32x4d.a1h_in1k
0
1,905
timm
2023-04-05T18:56:55
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 --- # Model card for resnext50_32x4d.a1h_in1k A ResNeXt-B image classification model. This model features: * ReLU activations * single layer 7x7 convolution with pooling * 1x1 convolution shortcut downsample * grouped 3x3 bottleneck convolutions Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * Based on [ResNet Strikes Back](https://arxiv.org/abs/2110.00476) `A1` recipe * LAMB optimizer * Stronger dropout, stochastic depth, and RandAugment than paper `A1` recipe * Cosine LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 25.0 - GMACs: 4.3 - Activations (M): 14.4 - Image size: train = 224 x 224, test = 288 x 288 - **Papers:** - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - Aggregated Residual Transformations for Deep Neural Networks: https://arxiv.org/abs/1611.05431 - Deep Residual Learning for Image Recognition: https://arxiv.org/abs/1512.03385 - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnext50_32x4d.a1h_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnext50_32x4d.a1h_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnext50_32x4d.a1h_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). |model |img_size|top1 |top5 |param_count|gmacs|macts|img/sec| |------------------------------------------|--------|-----|-----|-----------|-----|-----|-------| |[seresnextaa101d_32x8d.sw_in12k_ft_in1k_288](https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k_288)|320 |86.72|98.17|93.6 |35.2 |69.7 |451 | |[seresnextaa101d_32x8d.sw_in12k_ft_in1k_288](https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k_288)|288 |86.51|98.08|93.6 |28.5 |56.4 |560 | |[seresnextaa101d_32x8d.sw_in12k_ft_in1k](https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k)|288 |86.49|98.03|93.6 |28.5 |56.4 |557 | |[seresnextaa101d_32x8d.sw_in12k_ft_in1k](https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k)|224 |85.96|97.82|93.6 |17.2 |34.2 |923 | |[resnext101_32x32d.fb_wsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x32d.fb_wsl_ig1b_ft_in1k)|224 |85.11|97.44|468.5 |87.3 |91.1 |254 | |[resnetrs420.tf_in1k](https://huggingface.co/timm/resnetrs420.tf_in1k)|416 |85.0 |97.12|191.9 |108.4|213.8|134 | |[ecaresnet269d.ra2_in1k](https://huggingface.co/timm/ecaresnet269d.ra2_in1k)|352 |84.96|97.22|102.1 |50.2 |101.2|291 | |[ecaresnet269d.ra2_in1k](https://huggingface.co/timm/ecaresnet269d.ra2_in1k)|320 |84.73|97.18|102.1 |41.5 |83.7 |353 | |[resnetrs350.tf_in1k](https://huggingface.co/timm/resnetrs350.tf_in1k)|384 |84.71|96.99|164.0 |77.6 |154.7|183 | |[seresnextaa101d_32x8d.ah_in1k](https://huggingface.co/timm/seresnextaa101d_32x8d.ah_in1k)|288 |84.57|97.08|93.6 |28.5 |56.4 |557 | |[resnetrs200.tf_in1k](https://huggingface.co/timm/resnetrs200.tf_in1k)|320 |84.45|97.08|93.2 |31.5 |67.8 |446 | |[resnetrs270.tf_in1k](https://huggingface.co/timm/resnetrs270.tf_in1k)|352 |84.43|96.97|129.9 |51.1 |105.5|280 | |[seresnext101d_32x8d.ah_in1k](https://huggingface.co/timm/seresnext101d_32x8d.ah_in1k)|288 |84.36|96.92|93.6 |27.6 |53.0 |595 | |[seresnet152d.ra2_in1k](https://huggingface.co/timm/seresnet152d.ra2_in1k)|320 |84.35|97.04|66.8 |24.1 |47.7 |610 | |[resnetrs350.tf_in1k](https://huggingface.co/timm/resnetrs350.tf_in1k)|288 |84.3 |96.94|164.0 |43.7 |87.1 |333 | |[resnext101_32x8d.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x8d.fb_swsl_ig1b_ft_in1k)|224 |84.28|97.17|88.8 |16.5 |31.2 |1100 | |[resnetrs420.tf_in1k](https://huggingface.co/timm/resnetrs420.tf_in1k)|320 |84.24|96.86|191.9 |64.2 |126.6|228 | |[seresnext101_32x8d.ah_in1k](https://huggingface.co/timm/seresnext101_32x8d.ah_in1k)|288 |84.19|96.87|93.6 |27.2 |51.6 |613 | |[resnext101_32x16d.fb_wsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x16d.fb_wsl_ig1b_ft_in1k)|224 |84.18|97.19|194.0 |36.3 |51.2 |581 | |[resnetaa101d.sw_in12k_ft_in1k](https://huggingface.co/timm/resnetaa101d.sw_in12k_ft_in1k)|288 |84.11|97.11|44.6 |15.1 |29.0 |1144 | |[resnet200d.ra2_in1k](https://huggingface.co/timm/resnet200d.ra2_in1k)|320 |83.97|96.82|64.7 |31.2 |67.3 |518 | |[resnetrs200.tf_in1k](https://huggingface.co/timm/resnetrs200.tf_in1k)|256 |83.87|96.75|93.2 |20.2 |43.4 |692 | |[seresnextaa101d_32x8d.ah_in1k](https://huggingface.co/timm/seresnextaa101d_32x8d.ah_in1k)|224 |83.86|96.65|93.6 |17.2 |34.2 |923 | |[resnetrs152.tf_in1k](https://huggingface.co/timm/resnetrs152.tf_in1k)|320 |83.72|96.61|86.6 |24.3 |48.1 |617 | |[seresnet152d.ra2_in1k](https://huggingface.co/timm/seresnet152d.ra2_in1k)|256 |83.69|96.78|66.8 |15.4 |30.6 |943 | |[seresnext101d_32x8d.ah_in1k](https://huggingface.co/timm/seresnext101d_32x8d.ah_in1k)|224 |83.68|96.61|93.6 |16.7 |32.0 |986 | |[resnet152d.ra2_in1k](https://huggingface.co/timm/resnet152d.ra2_in1k)|320 |83.67|96.74|60.2 |24.1 |47.7 |706 | |[resnetrs270.tf_in1k](https://huggingface.co/timm/resnetrs270.tf_in1k)|256 |83.59|96.61|129.9 |27.1 |55.8 |526 | |[seresnext101_32x8d.ah_in1k](https://huggingface.co/timm/seresnext101_32x8d.ah_in1k)|224 |83.58|96.4 |93.6 |16.5 |31.2 |1013 | |[resnetaa101d.sw_in12k_ft_in1k](https://huggingface.co/timm/resnetaa101d.sw_in12k_ft_in1k)|224 |83.54|96.83|44.6 |9.1 |17.6 |1864 | |[resnet152.a1h_in1k](https://huggingface.co/timm/resnet152.a1h_in1k)|288 |83.46|96.54|60.2 |19.1 |37.3 |904 | |[resnext101_32x16d.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x16d.fb_swsl_ig1b_ft_in1k)|224 |83.35|96.85|194.0 |36.3 |51.2 |582 | |[resnet200d.ra2_in1k](https://huggingface.co/timm/resnet200d.ra2_in1k)|256 |83.23|96.53|64.7 |20.0 |43.1 |809 | |[resnext101_32x4d.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x4d.fb_swsl_ig1b_ft_in1k)|224 |83.22|96.75|44.2 |8.0 |21.2 |1814 | |[resnext101_64x4d.c1_in1k](https://huggingface.co/timm/resnext101_64x4d.c1_in1k)|288 |83.16|96.38|83.5 |25.7 |51.6 |590 | |[resnet152d.ra2_in1k](https://huggingface.co/timm/resnet152d.ra2_in1k)|256 |83.14|96.38|60.2 |15.4 |30.5 |1096 | |[resnet101d.ra2_in1k](https://huggingface.co/timm/resnet101d.ra2_in1k)|320 |83.02|96.45|44.6 |16.5 |34.8 |992 | |[ecaresnet101d.miil_in1k](https://huggingface.co/timm/ecaresnet101d.miil_in1k)|288 |82.98|96.54|44.6 |13.4 |28.2 |1077 | |[resnext101_64x4d.tv_in1k](https://huggingface.co/timm/resnext101_64x4d.tv_in1k)|224 |82.98|96.25|83.5 |15.5 |31.2 |989 | |[resnetrs152.tf_in1k](https://huggingface.co/timm/resnetrs152.tf_in1k)|256 |82.86|96.28|86.6 |15.6 |30.8 |951 | |[resnext101_32x8d.tv2_in1k](https://huggingface.co/timm/resnext101_32x8d.tv2_in1k)|224 |82.83|96.22|88.8 |16.5 |31.2 |1099 | |[resnet152.a1h_in1k](https://huggingface.co/timm/resnet152.a1h_in1k)|224 |82.8 |96.13|60.2 |11.6 |22.6 |1486 | |[resnet101.a1h_in1k](https://huggingface.co/timm/resnet101.a1h_in1k)|288 |82.8 |96.32|44.6 |13.0 |26.8 |1291 | |[resnet152.a1_in1k](https://huggingface.co/timm/resnet152.a1_in1k)|288 |82.74|95.71|60.2 |19.1 |37.3 |905 | |[resnext101_32x8d.fb_wsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x8d.fb_wsl_ig1b_ft_in1k)|224 |82.69|96.63|88.8 |16.5 |31.2 |1100 | |[resnet152.a2_in1k](https://huggingface.co/timm/resnet152.a2_in1k)|288 |82.62|95.75|60.2 |19.1 |37.3 |904 | |[resnetaa50d.sw_in12k_ft_in1k](https://huggingface.co/timm/resnetaa50d.sw_in12k_ft_in1k)|288 |82.61|96.49|25.6 |8.9 |20.6 |1729 | |[resnet61q.ra2_in1k](https://huggingface.co/timm/resnet61q.ra2_in1k)|288 |82.53|96.13|36.8 |9.9 |21.5 |1773 | |[wide_resnet101_2.tv2_in1k](https://huggingface.co/timm/wide_resnet101_2.tv2_in1k)|224 |82.5 |96.02|126.9 |22.8 |21.2 |1078 | |[resnext101_64x4d.c1_in1k](https://huggingface.co/timm/resnext101_64x4d.c1_in1k)|224 |82.46|95.92|83.5 |15.5 |31.2 |987 | |[resnet51q.ra2_in1k](https://huggingface.co/timm/resnet51q.ra2_in1k)|288 |82.36|96.18|35.7 |8.1 |20.9 |1964 | |[ecaresnet50t.ra2_in1k](https://huggingface.co/timm/ecaresnet50t.ra2_in1k)|320 |82.35|96.14|25.6 |8.8 |24.1 |1386 | |[resnet101.a1_in1k](https://huggingface.co/timm/resnet101.a1_in1k)|288 |82.31|95.63|44.6 |13.0 |26.8 |1291 | |[resnetrs101.tf_in1k](https://huggingface.co/timm/resnetrs101.tf_in1k)|288 |82.29|96.01|63.6 |13.6 |28.5 |1078 | |[resnet152.tv2_in1k](https://huggingface.co/timm/resnet152.tv2_in1k)|224 |82.29|96.0 |60.2 |11.6 |22.6 |1484 | |[wide_resnet50_2.racm_in1k](https://huggingface.co/timm/wide_resnet50_2.racm_in1k)|288 |82.27|96.06|68.9 |18.9 |23.8 |1176 | |[resnet101d.ra2_in1k](https://huggingface.co/timm/resnet101d.ra2_in1k)|256 |82.26|96.07|44.6 |10.6 |22.2 |1542 | |[resnet101.a2_in1k](https://huggingface.co/timm/resnet101.a2_in1k)|288 |82.24|95.73|44.6 |13.0 |26.8 |1290 | |[seresnext50_32x4d.racm_in1k](https://huggingface.co/timm/seresnext50_32x4d.racm_in1k)|288 |82.2 |96.14|27.6 |7.0 |23.8 |1547 | |[ecaresnet101d.miil_in1k](https://huggingface.co/timm/ecaresnet101d.miil_in1k)|224 |82.18|96.05|44.6 |8.1 |17.1 |1771 | |[resnext50_32x4d.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext50_32x4d.fb_swsl_ig1b_ft_in1k)|224 |82.17|96.22|25.0 |4.3 |14.4 |2943 | |[ecaresnet50t.a1_in1k](https://huggingface.co/timm/ecaresnet50t.a1_in1k)|288 |82.12|95.65|25.6 |7.1 |19.6 |1704 | |[resnext50_32x4d.a1h_in1k](https://huggingface.co/timm/resnext50_32x4d.a1h_in1k)|288 |82.03|95.94|25.0 |7.0 |23.8 |1745 | |[ecaresnet101d_pruned.miil_in1k](https://huggingface.co/timm/ecaresnet101d_pruned.miil_in1k)|288 |82.0 |96.15|24.9 |5.8 |12.7 |1787 | |[resnet61q.ra2_in1k](https://huggingface.co/timm/resnet61q.ra2_in1k)|256 |81.99|95.85|36.8 |7.8 |17.0 |2230 | |[resnext101_32x8d.tv2_in1k](https://huggingface.co/timm/resnext101_32x8d.tv2_in1k)|176 |81.98|95.72|88.8 |10.3 |19.4 |1768 | |[resnet152.a1_in1k](https://huggingface.co/timm/resnet152.a1_in1k)|224 |81.97|95.24|60.2 |11.6 |22.6 |1486 | |[resnet101.a1h_in1k](https://huggingface.co/timm/resnet101.a1h_in1k)|224 |81.93|95.75|44.6 |7.8 |16.2 |2122 | |[resnet101.tv2_in1k](https://huggingface.co/timm/resnet101.tv2_in1k)|224 |81.9 |95.77|44.6 |7.8 |16.2 |2118 | |[resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k)|224 |81.84|96.1 |194.0 |36.3 |51.2 |583 | |[resnet51q.ra2_in1k](https://huggingface.co/timm/resnet51q.ra2_in1k)|256 |81.78|95.94|35.7 |6.4 |16.6 |2471 | |[resnet152.a2_in1k](https://huggingface.co/timm/resnet152.a2_in1k)|224 |81.77|95.22|60.2 |11.6 |22.6 |1485 | |[resnetaa50d.sw_in12k_ft_in1k](https://huggingface.co/timm/resnetaa50d.sw_in12k_ft_in1k)|224 |81.74|96.06|25.6 |5.4 |12.4 |2813 | |[ecaresnet50t.a2_in1k](https://huggingface.co/timm/ecaresnet50t.a2_in1k)|288 |81.65|95.54|25.6 |7.1 |19.6 |1703 | |[ecaresnet50d.miil_in1k](https://huggingface.co/timm/ecaresnet50d.miil_in1k)|288 |81.64|95.88|25.6 |7.2 |19.7 |1694 | |[resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k)|224 |81.62|96.04|88.8 |16.5 |31.2 |1101 | |[wide_resnet50_2.tv2_in1k](https://huggingface.co/timm/wide_resnet50_2.tv2_in1k)|224 |81.61|95.76|68.9 |11.4 |14.4 |1930 | |[resnetaa50.a1h_in1k](https://huggingface.co/timm/resnetaa50.a1h_in1k)|288 |81.61|95.83|25.6 |8.5 |19.2 |1868 | |[resnet101.a1_in1k](https://huggingface.co/timm/resnet101.a1_in1k)|224 |81.5 |95.16|44.6 |7.8 |16.2 |2125 | |[resnext50_32x4d.a1_in1k](https://huggingface.co/timm/resnext50_32x4d.a1_in1k)|288 |81.48|95.16|25.0 |7.0 |23.8 |1745 | |[gcresnet50t.ra2_in1k](https://huggingface.co/timm/gcresnet50t.ra2_in1k)|288 |81.47|95.71|25.9 |6.9 |18.6 |2071 | |[wide_resnet50_2.racm_in1k](https://huggingface.co/timm/wide_resnet50_2.racm_in1k)|224 |81.45|95.53|68.9 |11.4 |14.4 |1929 | |[resnet50d.a1_in1k](https://huggingface.co/timm/resnet50d.a1_in1k)|288 |81.44|95.22|25.6 |7.2 |19.7 |1908 | |[ecaresnet50t.ra2_in1k](https://huggingface.co/timm/ecaresnet50t.ra2_in1k)|256 |81.44|95.67|25.6 |5.6 |15.4 |2168 | |[ecaresnetlight.miil_in1k](https://huggingface.co/timm/ecaresnetlight.miil_in1k)|288 |81.4 |95.82|30.2 |6.8 |13.9 |2132 | |[resnet50d.ra2_in1k](https://huggingface.co/timm/resnet50d.ra2_in1k)|288 |81.37|95.74|25.6 |7.2 |19.7 |1910 | |[resnet101.a2_in1k](https://huggingface.co/timm/resnet101.a2_in1k)|224 |81.32|95.19|44.6 |7.8 |16.2 |2125 | |[seresnet50.ra2_in1k](https://huggingface.co/timm/seresnet50.ra2_in1k)|288 |81.3 |95.65|28.1 |6.8 |18.4 |1803 | |[resnext50_32x4d.a2_in1k](https://huggingface.co/timm/resnext50_32x4d.a2_in1k)|288 |81.3 |95.11|25.0 |7.0 |23.8 |1746 | |[seresnext50_32x4d.racm_in1k](https://huggingface.co/timm/seresnext50_32x4d.racm_in1k)|224 |81.27|95.62|27.6 |4.3 |14.4 |2591 | |[ecaresnet50t.a1_in1k](https://huggingface.co/timm/ecaresnet50t.a1_in1k)|224 |81.26|95.16|25.6 |4.3 |11.8 |2823 | |[gcresnext50ts.ch_in1k](https://huggingface.co/timm/gcresnext50ts.ch_in1k)|288 |81.23|95.54|15.7 |4.8 |19.6 |2117 | |[senet154.gluon_in1k](https://huggingface.co/timm/senet154.gluon_in1k)|224 |81.23|95.35|115.1 |20.8 |38.7 |545 | |[resnet50.a1_in1k](https://huggingface.co/timm/resnet50.a1_in1k)|288 |81.22|95.11|25.6 |6.8 |18.4 |2089 | |[resnet50_gn.a1h_in1k](https://huggingface.co/timm/resnet50_gn.a1h_in1k)|288 |81.22|95.63|25.6 |6.8 |18.4 |676 | |[resnet50d.a2_in1k](https://huggingface.co/timm/resnet50d.a2_in1k)|288 |81.18|95.09|25.6 |7.2 |19.7 |1908 | |[resnet50.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnet50.fb_swsl_ig1b_ft_in1k)|224 |81.18|95.98|25.6 |4.1 |11.1 |3455 | |[resnext50_32x4d.tv2_in1k](https://huggingface.co/timm/resnext50_32x4d.tv2_in1k)|224 |81.17|95.34|25.0 |4.3 |14.4 |2933 | |[resnext50_32x4d.a1h_in1k](https://huggingface.co/timm/resnext50_32x4d.a1h_in1k)|224 |81.1 |95.33|25.0 |4.3 |14.4 |2934 | |[seresnet50.a2_in1k](https://huggingface.co/timm/seresnet50.a2_in1k)|288 |81.1 |95.23|28.1 |6.8 |18.4 |1801 | |[seresnet50.a1_in1k](https://huggingface.co/timm/seresnet50.a1_in1k)|288 |81.1 |95.12|28.1 |6.8 |18.4 |1799 | |[resnet152s.gluon_in1k](https://huggingface.co/timm/resnet152s.gluon_in1k)|224 |81.02|95.41|60.3 |12.9 |25.0 |1347 | |[resnet50.d_in1k](https://huggingface.co/timm/resnet50.d_in1k)|288 |80.97|95.44|25.6 |6.8 |18.4 |2085 | |[gcresnet50t.ra2_in1k](https://huggingface.co/timm/gcresnet50t.ra2_in1k)|256 |80.94|95.45|25.9 |5.4 |14.7 |2571 | |[resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k)|224 |80.93|95.73|44.2 |8.0 |21.2 |1814 | |[resnet50.c1_in1k](https://huggingface.co/timm/resnet50.c1_in1k)|288 |80.91|95.55|25.6 |6.8 |18.4 |2084 | |[seresnext101_32x4d.gluon_in1k](https://huggingface.co/timm/seresnext101_32x4d.gluon_in1k)|224 |80.9 |95.31|49.0 |8.0 |21.3 |1585 | |[seresnext101_64x4d.gluon_in1k](https://huggingface.co/timm/seresnext101_64x4d.gluon_in1k)|224 |80.9 |95.3 |88.2 |15.5 |31.2 |918 | |[resnet50.c2_in1k](https://huggingface.co/timm/resnet50.c2_in1k)|288 |80.86|95.52|25.6 |6.8 |18.4 |2085 | |[resnet50.tv2_in1k](https://huggingface.co/timm/resnet50.tv2_in1k)|224 |80.85|95.43|25.6 |4.1 |11.1 |3450 | |[ecaresnet50t.a2_in1k](https://huggingface.co/timm/ecaresnet50t.a2_in1k)|224 |80.84|95.02|25.6 |4.3 |11.8 |2821 | |[ecaresnet101d_pruned.miil_in1k](https://huggingface.co/timm/ecaresnet101d_pruned.miil_in1k)|224 |80.79|95.62|24.9 |3.5 |7.7 |2961 | |[seresnet33ts.ra2_in1k](https://huggingface.co/timm/seresnet33ts.ra2_in1k)|288 |80.79|95.36|19.8 |6.0 |14.8 |2506 | |[ecaresnet50d_pruned.miil_in1k](https://huggingface.co/timm/ecaresnet50d_pruned.miil_in1k)|288 |80.79|95.58|19.9 |4.2 |10.6 |2349 | |[resnet50.a2_in1k](https://huggingface.co/timm/resnet50.a2_in1k)|288 |80.78|94.99|25.6 |6.8 |18.4 |2088 | |[resnet50.b1k_in1k](https://huggingface.co/timm/resnet50.b1k_in1k)|288 |80.71|95.43|25.6 |6.8 |18.4 |2087 | |[resnext50_32x4d.ra_in1k](https://huggingface.co/timm/resnext50_32x4d.ra_in1k)|288 |80.7 |95.39|25.0 |7.0 |23.8 |1749 | |[resnetrs101.tf_in1k](https://huggingface.co/timm/resnetrs101.tf_in1k)|192 |80.69|95.24|63.6 |6.0 |12.7 |2270 | |[resnet50d.a1_in1k](https://huggingface.co/timm/resnet50d.a1_in1k)|224 |80.68|94.71|25.6 |4.4 |11.9 |3162 | |[eca_resnet33ts.ra2_in1k](https://huggingface.co/timm/eca_resnet33ts.ra2_in1k)|288 |80.68|95.36|19.7 |6.0 |14.8 |2637 | |[resnet50.a1h_in1k](https://huggingface.co/timm/resnet50.a1h_in1k)|224 |80.67|95.3 |25.6 |4.1 |11.1 |3452 | |[resnext50d_32x4d.bt_in1k](https://huggingface.co/timm/resnext50d_32x4d.bt_in1k)|288 |80.67|95.42|25.0 |7.4 |25.1 |1626 | |[resnetaa50.a1h_in1k](https://huggingface.co/timm/resnetaa50.a1h_in1k)|224 |80.63|95.21|25.6 |5.2 |11.6 |3034 | |[ecaresnet50d.miil_in1k](https://huggingface.co/timm/ecaresnet50d.miil_in1k)|224 |80.61|95.32|25.6 |4.4 |11.9 |2813 | |[resnext101_64x4d.gluon_in1k](https://huggingface.co/timm/resnext101_64x4d.gluon_in1k)|224 |80.61|94.99|83.5 |15.5 |31.2 |989 | |[gcresnet33ts.ra2_in1k](https://huggingface.co/timm/gcresnet33ts.ra2_in1k)|288 |80.6 |95.31|19.9 |6.0 |14.8 |2578 | |[gcresnext50ts.ch_in1k](https://huggingface.co/timm/gcresnext50ts.ch_in1k)|256 |80.57|95.17|15.7 |3.8 |15.5 |2710 | |[resnet152.a3_in1k](https://huggingface.co/timm/resnet152.a3_in1k)|224 |80.56|95.0 |60.2 |11.6 |22.6 |1483 | |[resnet50d.ra2_in1k](https://huggingface.co/timm/resnet50d.ra2_in1k)|224 |80.53|95.16|25.6 |4.4 |11.9 |3164 | |[resnext50_32x4d.a1_in1k](https://huggingface.co/timm/resnext50_32x4d.a1_in1k)|224 |80.53|94.46|25.0 |4.3 |14.4 |2930 | |[wide_resnet101_2.tv2_in1k](https://huggingface.co/timm/wide_resnet101_2.tv2_in1k)|176 |80.48|94.98|126.9 |14.3 |13.2 |1719 | |[resnet152d.gluon_in1k](https://huggingface.co/timm/resnet152d.gluon_in1k)|224 |80.47|95.2 |60.2 |11.8 |23.4 |1428 | |[resnet50.b2k_in1k](https://huggingface.co/timm/resnet50.b2k_in1k)|288 |80.45|95.32|25.6 |6.8 |18.4 |2086 | |[ecaresnetlight.miil_in1k](https://huggingface.co/timm/ecaresnetlight.miil_in1k)|224 |80.45|95.24|30.2 |4.1 |8.4 |3530 | |[resnext50_32x4d.a2_in1k](https://huggingface.co/timm/resnext50_32x4d.a2_in1k)|224 |80.45|94.63|25.0 |4.3 |14.4 |2936 | |[wide_resnet50_2.tv2_in1k](https://huggingface.co/timm/wide_resnet50_2.tv2_in1k)|176 |80.43|95.09|68.9 |7.3 |9.0 |3015 | |[resnet101d.gluon_in1k](https://huggingface.co/timm/resnet101d.gluon_in1k)|224 |80.42|95.01|44.6 |8.1 |17.0 |2007 | |[resnet50.a1_in1k](https://huggingface.co/timm/resnet50.a1_in1k)|224 |80.38|94.6 |25.6 |4.1 |11.1 |3461 | |[seresnet33ts.ra2_in1k](https://huggingface.co/timm/seresnet33ts.ra2_in1k)|256 |80.36|95.1 |19.8 |4.8 |11.7 |3267 | |[resnext101_32x4d.gluon_in1k](https://huggingface.co/timm/resnext101_32x4d.gluon_in1k)|224 |80.34|94.93|44.2 |8.0 |21.2 |1814 | |[resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k)|224 |80.32|95.4 |25.0 |4.3 |14.4 |2941 | |[resnet101s.gluon_in1k](https://huggingface.co/timm/resnet101s.gluon_in1k)|224 |80.28|95.16|44.7 |9.2 |18.6 |1851 | |[seresnet50.ra2_in1k](https://huggingface.co/timm/seresnet50.ra2_in1k)|224 |80.26|95.08|28.1 |4.1 |11.1 |2972 | |[resnetblur50.bt_in1k](https://huggingface.co/timm/resnetblur50.bt_in1k)|288 |80.24|95.24|25.6 |8.5 |19.9 |1523 | |[resnet50d.a2_in1k](https://huggingface.co/timm/resnet50d.a2_in1k)|224 |80.22|94.63|25.6 |4.4 |11.9 |3162 | |[resnet152.tv2_in1k](https://huggingface.co/timm/resnet152.tv2_in1k)|176 |80.2 |94.64|60.2 |7.2 |14.0 |2346 | |[seresnet50.a2_in1k](https://huggingface.co/timm/seresnet50.a2_in1k)|224 |80.08|94.74|28.1 |4.1 |11.1 |2969 | |[eca_resnet33ts.ra2_in1k](https://huggingface.co/timm/eca_resnet33ts.ra2_in1k)|256 |80.08|94.97|19.7 |4.8 |11.7 |3284 | |[gcresnet33ts.ra2_in1k](https://huggingface.co/timm/gcresnet33ts.ra2_in1k)|256 |80.06|94.99|19.9 |4.8 |11.7 |3216 | |[resnet50_gn.a1h_in1k](https://huggingface.co/timm/resnet50_gn.a1h_in1k)|224 |80.06|94.95|25.6 |4.1 |11.1 |1109 | |[seresnet50.a1_in1k](https://huggingface.co/timm/seresnet50.a1_in1k)|224 |80.02|94.71|28.1 |4.1 |11.1 |2962 | |[resnet50.ram_in1k](https://huggingface.co/timm/resnet50.ram_in1k)|288 |79.97|95.05|25.6 |6.8 |18.4 |2086 | |[resnet152c.gluon_in1k](https://huggingface.co/timm/resnet152c.gluon_in1k)|224 |79.92|94.84|60.2 |11.8 |23.4 |1455 | |[seresnext50_32x4d.gluon_in1k](https://huggingface.co/timm/seresnext50_32x4d.gluon_in1k)|224 |79.91|94.82|27.6 |4.3 |14.4 |2591 | |[resnet50.d_in1k](https://huggingface.co/timm/resnet50.d_in1k)|224 |79.91|94.67|25.6 |4.1 |11.1 |3456 | |[resnet101.tv2_in1k](https://huggingface.co/timm/resnet101.tv2_in1k)|176 |79.9 |94.6 |44.6 |4.9 |10.1 |3341 | |[resnetrs50.tf_in1k](https://huggingface.co/timm/resnetrs50.tf_in1k)|224 |79.89|94.97|35.7 |4.5 |12.1 |2774 | |[resnet50.c2_in1k](https://huggingface.co/timm/resnet50.c2_in1k)|224 |79.88|94.87|25.6 |4.1 |11.1 |3455 | |[ecaresnet26t.ra2_in1k](https://huggingface.co/timm/ecaresnet26t.ra2_in1k)|320 |79.86|95.07|16.0 |5.2 |16.4 |2168 | |[resnet50.a2_in1k](https://huggingface.co/timm/resnet50.a2_in1k)|224 |79.85|94.56|25.6 |4.1 |11.1 |3460 | |[resnet50.ra_in1k](https://huggingface.co/timm/resnet50.ra_in1k)|288 |79.83|94.97|25.6 |6.8 |18.4 |2087 | |[resnet101.a3_in1k](https://huggingface.co/timm/resnet101.a3_in1k)|224 |79.82|94.62|44.6 |7.8 |16.2 |2114 | |[resnext50_32x4d.ra_in1k](https://huggingface.co/timm/resnext50_32x4d.ra_in1k)|224 |79.76|94.6 |25.0 |4.3 |14.4 |2943 | |[resnet50.c1_in1k](https://huggingface.co/timm/resnet50.c1_in1k)|224 |79.74|94.95|25.6 |4.1 |11.1 |3455 | |[ecaresnet50d_pruned.miil_in1k](https://huggingface.co/timm/ecaresnet50d_pruned.miil_in1k)|224 |79.74|94.87|19.9 |2.5 |6.4 |3929 | |[resnet33ts.ra2_in1k](https://huggingface.co/timm/resnet33ts.ra2_in1k)|288 |79.71|94.83|19.7 |6.0 |14.8 |2710 | |[resnet152.gluon_in1k](https://huggingface.co/timm/resnet152.gluon_in1k)|224 |79.68|94.74|60.2 |11.6 |22.6 |1486 | |[resnext50d_32x4d.bt_in1k](https://huggingface.co/timm/resnext50d_32x4d.bt_in1k)|224 |79.67|94.87|25.0 |4.5 |15.2 |2729 | |[resnet50.bt_in1k](https://huggingface.co/timm/resnet50.bt_in1k)|288 |79.63|94.91|25.6 |6.8 |18.4 |2086 | |[ecaresnet50t.a3_in1k](https://huggingface.co/timm/ecaresnet50t.a3_in1k)|224 |79.56|94.72|25.6 |4.3 |11.8 |2805 | |[resnet101c.gluon_in1k](https://huggingface.co/timm/resnet101c.gluon_in1k)|224 |79.53|94.58|44.6 |8.1 |17.0 |2062 | |[resnet50.b1k_in1k](https://huggingface.co/timm/resnet50.b1k_in1k)|224 |79.52|94.61|25.6 |4.1 |11.1 |3459 | |[resnet50.tv2_in1k](https://huggingface.co/timm/resnet50.tv2_in1k)|176 |79.42|94.64|25.6 |2.6 |6.9 |5397 | |[resnet32ts.ra2_in1k](https://huggingface.co/timm/resnet32ts.ra2_in1k)|288 |79.4 |94.66|18.0 |5.9 |14.6 |2752 | |[resnet50.b2k_in1k](https://huggingface.co/timm/resnet50.b2k_in1k)|224 |79.38|94.57|25.6 |4.1 |11.1 |3459 | |[resnext50_32x4d.tv2_in1k](https://huggingface.co/timm/resnext50_32x4d.tv2_in1k)|176 |79.37|94.3 |25.0 |2.7 |9.0 |4577 | |[resnext50_32x4d.gluon_in1k](https://huggingface.co/timm/resnext50_32x4d.gluon_in1k)|224 |79.36|94.43|25.0 |4.3 |14.4 |2942 | |[resnext101_32x8d.tv_in1k](https://huggingface.co/timm/resnext101_32x8d.tv_in1k)|224 |79.31|94.52|88.8 |16.5 |31.2 |1100 | |[resnet101.gluon_in1k](https://huggingface.co/timm/resnet101.gluon_in1k)|224 |79.31|94.53|44.6 |7.8 |16.2 |2125 | |[resnetblur50.bt_in1k](https://huggingface.co/timm/resnetblur50.bt_in1k)|224 |79.31|94.63|25.6 |5.2 |12.0 |2524 | |[resnet50.a1h_in1k](https://huggingface.co/timm/resnet50.a1h_in1k)|176 |79.27|94.49|25.6 |2.6 |6.9 |5404 | |[resnext50_32x4d.a3_in1k](https://huggingface.co/timm/resnext50_32x4d.a3_in1k)|224 |79.25|94.31|25.0 |4.3 |14.4 |2931 | |[resnet50.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnet50.fb_ssl_yfcc100m_ft_in1k)|224 |79.22|94.84|25.6 |4.1 |11.1 |3451 | |[resnet33ts.ra2_in1k](https://huggingface.co/timm/resnet33ts.ra2_in1k)|256 |79.21|94.56|19.7 |4.8 |11.7 |3392 | |[resnet50d.gluon_in1k](https://huggingface.co/timm/resnet50d.gluon_in1k)|224 |79.07|94.48|25.6 |4.4 |11.9 |3162 | |[resnet50.ram_in1k](https://huggingface.co/timm/resnet50.ram_in1k)|224 |79.03|94.38|25.6 |4.1 |11.1 |3453 | |[resnet50.am_in1k](https://huggingface.co/timm/resnet50.am_in1k)|224 |79.01|94.39|25.6 |4.1 |11.1 |3461 | |[resnet32ts.ra2_in1k](https://huggingface.co/timm/resnet32ts.ra2_in1k)|256 |79.01|94.37|18.0 |4.6 |11.6 |3440 | |[ecaresnet26t.ra2_in1k](https://huggingface.co/timm/ecaresnet26t.ra2_in1k)|256 |78.9 |94.54|16.0 |3.4 |10.5 |3421 | |[resnet152.a3_in1k](https://huggingface.co/timm/resnet152.a3_in1k)|160 |78.89|94.11|60.2 |5.9 |11.5 |2745 | |[wide_resnet101_2.tv_in1k](https://huggingface.co/timm/wide_resnet101_2.tv_in1k)|224 |78.84|94.28|126.9 |22.8 |21.2 |1079 | |[seresnext26d_32x4d.bt_in1k](https://huggingface.co/timm/seresnext26d_32x4d.bt_in1k)|288 |78.83|94.24|16.8 |4.5 |16.8 |2251 | |[resnet50.ra_in1k](https://huggingface.co/timm/resnet50.ra_in1k)|224 |78.81|94.32|25.6 |4.1 |11.1 |3454 | |[seresnext26t_32x4d.bt_in1k](https://huggingface.co/timm/seresnext26t_32x4d.bt_in1k)|288 |78.74|94.33|16.8 |4.5 |16.7 |2264 | |[resnet50s.gluon_in1k](https://huggingface.co/timm/resnet50s.gluon_in1k)|224 |78.72|94.23|25.7 |5.5 |13.5 |2796 | |[resnet50d.a3_in1k](https://huggingface.co/timm/resnet50d.a3_in1k)|224 |78.71|94.24|25.6 |4.4 |11.9 |3154 | |[wide_resnet50_2.tv_in1k](https://huggingface.co/timm/wide_resnet50_2.tv_in1k)|224 |78.47|94.09|68.9 |11.4 |14.4 |1934 | |[resnet50.bt_in1k](https://huggingface.co/timm/resnet50.bt_in1k)|224 |78.46|94.27|25.6 |4.1 |11.1 |3454 | |[resnet34d.ra2_in1k](https://huggingface.co/timm/resnet34d.ra2_in1k)|288 |78.43|94.35|21.8 |6.5 |7.5 |3291 | |[gcresnext26ts.ch_in1k](https://huggingface.co/timm/gcresnext26ts.ch_in1k)|288 |78.42|94.04|10.5 |3.1 |13.3 |3226 | |[resnet26t.ra2_in1k](https://huggingface.co/timm/resnet26t.ra2_in1k)|320 |78.33|94.13|16.0 |5.2 |16.4 |2391 | |[resnet152.tv_in1k](https://huggingface.co/timm/resnet152.tv_in1k)|224 |78.32|94.04|60.2 |11.6 |22.6 |1487 | |[seresnext26ts.ch_in1k](https://huggingface.co/timm/seresnext26ts.ch_in1k)|288 |78.28|94.1 |10.4 |3.1 |13.3 |3062 | |[bat_resnext26ts.ch_in1k](https://huggingface.co/timm/bat_resnext26ts.ch_in1k)|256 |78.25|94.1 |10.7 |2.5 |12.5 |3393 | |[resnet50.a3_in1k](https://huggingface.co/timm/resnet50.a3_in1k)|224 |78.06|93.78|25.6 |4.1 |11.1 |3450 | |[resnet50c.gluon_in1k](https://huggingface.co/timm/resnet50c.gluon_in1k)|224 |78.0 |93.99|25.6 |4.4 |11.9 |3286 | |[eca_resnext26ts.ch_in1k](https://huggingface.co/timm/eca_resnext26ts.ch_in1k)|288 |78.0 |93.91|10.3 |3.1 |13.3 |3297 | |[seresnext26t_32x4d.bt_in1k](https://huggingface.co/timm/seresnext26t_32x4d.bt_in1k)|224 |77.98|93.75|16.8 |2.7 |10.1 |3841 | |[resnet34.a1_in1k](https://huggingface.co/timm/resnet34.a1_in1k)|288 |77.92|93.77|21.8 |6.1 |6.2 |3609 | |[resnet101.a3_in1k](https://huggingface.co/timm/resnet101.a3_in1k)|160 |77.88|93.71|44.6 |4.0 |8.3 |3926 | |[resnet26t.ra2_in1k](https://huggingface.co/timm/resnet26t.ra2_in1k)|256 |77.87|93.84|16.0 |3.4 |10.5 |3772 | |[seresnext26ts.ch_in1k](https://huggingface.co/timm/seresnext26ts.ch_in1k)|256 |77.86|93.79|10.4 |2.4 |10.5 |4263 | |[resnetrs50.tf_in1k](https://huggingface.co/timm/resnetrs50.tf_in1k)|160 |77.82|93.81|35.7 |2.3 |6.2 |5238 | |[gcresnext26ts.ch_in1k](https://huggingface.co/timm/gcresnext26ts.ch_in1k)|256 |77.81|93.82|10.5 |2.4 |10.5 |4183 | |[ecaresnet50t.a3_in1k](https://huggingface.co/timm/ecaresnet50t.a3_in1k)|160 |77.79|93.6 |25.6 |2.2 |6.0 |5329 | |[resnext50_32x4d.a3_in1k](https://huggingface.co/timm/resnext50_32x4d.a3_in1k)|160 |77.73|93.32|25.0 |2.2 |7.4 |5576 | |[resnext50_32x4d.tv_in1k](https://huggingface.co/timm/resnext50_32x4d.tv_in1k)|224 |77.61|93.7 |25.0 |4.3 |14.4 |2944 | |[seresnext26d_32x4d.bt_in1k](https://huggingface.co/timm/seresnext26d_32x4d.bt_in1k)|224 |77.59|93.61|16.8 |2.7 |10.2 |3807 | |[resnet50.gluon_in1k](https://huggingface.co/timm/resnet50.gluon_in1k)|224 |77.58|93.72|25.6 |4.1 |11.1 |3455 | |[eca_resnext26ts.ch_in1k](https://huggingface.co/timm/eca_resnext26ts.ch_in1k)|256 |77.44|93.56|10.3 |2.4 |10.5 |4284 | |[resnet26d.bt_in1k](https://huggingface.co/timm/resnet26d.bt_in1k)|288 |77.41|93.63|16.0 |4.3 |13.5 |2907 | |[resnet101.tv_in1k](https://huggingface.co/timm/resnet101.tv_in1k)|224 |77.38|93.54|44.6 |7.8 |16.2 |2125 | |[resnet50d.a3_in1k](https://huggingface.co/timm/resnet50d.a3_in1k)|160 |77.22|93.27|25.6 |2.2 |6.1 |5982 | |[resnext26ts.ra2_in1k](https://huggingface.co/timm/resnext26ts.ra2_in1k)|288 |77.17|93.47|10.3 |3.1 |13.3 |3392 | |[resnet34.a2_in1k](https://huggingface.co/timm/resnet34.a2_in1k)|288 |77.15|93.27|21.8 |6.1 |6.2 |3615 | |[resnet34d.ra2_in1k](https://huggingface.co/timm/resnet34d.ra2_in1k)|224 |77.1 |93.37|21.8 |3.9 |4.5 |5436 | |[seresnet50.a3_in1k](https://huggingface.co/timm/seresnet50.a3_in1k)|224 |77.02|93.07|28.1 |4.1 |11.1 |2952 | |[resnext26ts.ra2_in1k](https://huggingface.co/timm/resnext26ts.ra2_in1k)|256 |76.78|93.13|10.3 |2.4 |10.5 |4410 | |[resnet26d.bt_in1k](https://huggingface.co/timm/resnet26d.bt_in1k)|224 |76.7 |93.17|16.0 |2.6 |8.2 |4859 | |[resnet34.bt_in1k](https://huggingface.co/timm/resnet34.bt_in1k)|288 |76.5 |93.35|21.8 |6.1 |6.2 |3617 | |[resnet34.a1_in1k](https://huggingface.co/timm/resnet34.a1_in1k)|224 |76.42|92.87|21.8 |3.7 |3.7 |5984 | |[resnet26.bt_in1k](https://huggingface.co/timm/resnet26.bt_in1k)|288 |76.35|93.18|16.0 |3.9 |12.2 |3331 | |[resnet50.tv_in1k](https://huggingface.co/timm/resnet50.tv_in1k)|224 |76.13|92.86|25.6 |4.1 |11.1 |3457 | |[resnet50.a3_in1k](https://huggingface.co/timm/resnet50.a3_in1k)|160 |75.96|92.5 |25.6 |2.1 |5.7 |6490 | |[resnet34.a2_in1k](https://huggingface.co/timm/resnet34.a2_in1k)|224 |75.52|92.44|21.8 |3.7 |3.7 |5991 | |[resnet26.bt_in1k](https://huggingface.co/timm/resnet26.bt_in1k)|224 |75.3 |92.58|16.0 |2.4 |7.4 |5583 | |[resnet34.bt_in1k](https://huggingface.co/timm/resnet34.bt_in1k)|224 |75.16|92.18|21.8 |3.7 |3.7 |5994 | |[seresnet50.a3_in1k](https://huggingface.co/timm/seresnet50.a3_in1k)|160 |75.1 |92.08|28.1 |2.1 |5.7 |5513 | |[resnet34.gluon_in1k](https://huggingface.co/timm/resnet34.gluon_in1k)|224 |74.57|91.98|21.8 |3.7 |3.7 |5984 | |[resnet18d.ra2_in1k](https://huggingface.co/timm/resnet18d.ra2_in1k)|288 |73.81|91.83|11.7 |3.4 |5.4 |5196 | |[resnet34.tv_in1k](https://huggingface.co/timm/resnet34.tv_in1k)|224 |73.32|91.42|21.8 |3.7 |3.7 |5979 | |[resnet18.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnet18.fb_swsl_ig1b_ft_in1k)|224 |73.28|91.73|11.7 |1.8 |2.5 |10213 | |[resnet18.a1_in1k](https://huggingface.co/timm/resnet18.a1_in1k)|288 |73.16|91.03|11.7 |3.0 |4.1 |6050 | |[resnet34.a3_in1k](https://huggingface.co/timm/resnet34.a3_in1k)|224 |72.98|91.11|21.8 |3.7 |3.7 |5967 | |[resnet18.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnet18.fb_ssl_yfcc100m_ft_in1k)|224 |72.6 |91.42|11.7 |1.8 |2.5 |10213 | |[resnet18.a2_in1k](https://huggingface.co/timm/resnet18.a2_in1k)|288 |72.37|90.59|11.7 |3.0 |4.1 |6051 | |[resnet14t.c3_in1k](https://huggingface.co/timm/resnet14t.c3_in1k)|224 |72.26|90.31|10.1 |1.7 |5.8 |7026 | |[resnet18d.ra2_in1k](https://huggingface.co/timm/resnet18d.ra2_in1k)|224 |72.26|90.68|11.7 |2.1 |3.3 |8707 | |[resnet18.a1_in1k](https://huggingface.co/timm/resnet18.a1_in1k)|224 |71.49|90.07|11.7 |1.8 |2.5 |10187 | |[resnet14t.c3_in1k](https://huggingface.co/timm/resnet14t.c3_in1k)|176 |71.31|89.69|10.1 |1.1 |3.6 |10970 | |[resnet18.gluon_in1k](https://huggingface.co/timm/resnet18.gluon_in1k)|224 |70.84|89.76|11.7 |1.8 |2.5 |10210 | |[resnet18.a2_in1k](https://huggingface.co/timm/resnet18.a2_in1k)|224 |70.64|89.47|11.7 |1.8 |2.5 |10194 | |[resnet34.a3_in1k](https://huggingface.co/timm/resnet34.a3_in1k)|160 |70.56|89.52|21.8 |1.9 |1.9 |10737 | |[resnet18.tv_in1k](https://huggingface.co/timm/resnet18.tv_in1k)|224 |69.76|89.07|11.7 |1.8 |2.5 |10205 | |[resnet10t.c3_in1k](https://huggingface.co/timm/resnet10t.c3_in1k)|224 |68.34|88.03|5.4 |1.1 |2.4 |13079 | |[resnet18.a3_in1k](https://huggingface.co/timm/resnet18.a3_in1k)|224 |68.25|88.17|11.7 |1.8 |2.5 |10167 | |[resnet10t.c3_in1k](https://huggingface.co/timm/resnet10t.c3_in1k)|176 |66.71|86.96|5.4 |0.7 |1.5 |20327 | |[resnet18.a3_in1k](https://huggingface.co/timm/resnet18.a3_in1k)|160 |65.66|86.26|11.7 |0.9 |1.3 |18229 | ## Citation ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{Xie2016, title={Aggregated Residual Transformations for Deep Neural Networks}, author={Saining Xie and Ross Girshick and Piotr Dollár and Zhuowen Tu and Kaiming He}, journal={arXiv preprint arXiv:1611.05431}, year={2016} } ``` ```bibtex @article{He2015, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Deep Residual Learning for Image Recognition}, journal = {arXiv preprint arXiv:1512.03385}, year = {2015} } ```
38,940
[ [ -0.06365966796875, -0.0171051025390625, 0.004123687744140625, 0.0276336669921875, -0.03106689453125, -0.008758544921875, -0.01047515869140625, -0.0299835205078125, 0.08123779296875, 0.0216217041015625, -0.04864501953125, -0.041290283203125, -0.046295166015625, ...
JainSanyam/my-beautiful-cat
2023-10-08T18:54:14.000Z
[ "diffusers", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
JainSanyam
null
null
JainSanyam/my-beautiful-cat
0
1,903
diffusers
2023-10-08T18:49:16
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### My-Beautiful-Cat Dreambooth model trained by JainSanyam following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: VCE-14 Sample pictures of this concept: ![0](https://huggingface.co/JainSanyam/my-beautiful-cat/resolve/main/sample_images/ftw_(10).jpg)
402
[ [ -0.05035400390625, -0.025848388671875, 0.02294921875, 0.0181121826171875, -0.030120849609375, 0.0430908203125, 0.044036865234375, -0.0276947021484375, 0.066650390625, 0.04876708984375, -0.0482177734375, -0.01082611083984375, -0.010986328125, 0.01322937011718...
TheBloke/zephyr-7B-beta-AWQ
2023-10-27T14:56:23.000Z
[ "transformers", "safetensors", "mistral", "text-generation", "generated_from_trainer", "en", "dataset:HuggingFaceH4/ultrachat_200k", "dataset:HuggingFaceH4/ultrafeedback_binarized", "arxiv:2305.18290", "arxiv:2310.16944", "license:mit", "text-generation-inference", "region:us" ]
text-generation
TheBloke
null
null
TheBloke/zephyr-7B-beta-AWQ
12
1,903
transformers
2023-10-27T13:16:11
--- base_model: HuggingFaceH4/zephyr-7b-beta datasets: - HuggingFaceH4/ultrachat_200k - HuggingFaceH4/ultrafeedback_binarized inference: false language: - en license: mit model-index: - name: zephyr-7b-beta results: [] model_creator: Hugging Face H4 model_name: Zephyr 7B Beta model_type: mistral prompt_template: '<|system|> </s> <|user|> {prompt}</s> <|assistant|> ' quantized_by: TheBloke tags: - generated_from_trainer --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Zephyr 7B Beta - AWQ - Model creator: [Hugging Face H4](https://huggingface.co/HuggingFaceH4) - Original model: [Zephyr 7B Beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) <!-- description start --> ## Description This repo contains AWQ model files for [Hugging Face H4's Zephyr 7B Beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta). These files were quantised using hardware kindly provided by [Massed Compute](https://massedcompute.com/). ### About AWQ AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization. Compared to GPTQ, it offers faster Transformers-based inference with equivalent or better quality compared to the most commonly used GPTQ settings. It is supported by: - [Text Generation Webui](https://github.com/oobabooga/text-generation-webui) - using Loader: AutoAWQ - [vLLM](https://github.com/vllm-project/vllm) - Llama and Mistral models only - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) - for use from Python code <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/zephyr-7B-beta-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/zephyr-7B-beta-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF) * [Hugging Face H4's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Zephyr ``` <|system|> </s> <|user|> {prompt}</s> <|assistant|> ``` <!-- prompt-template end --> <!-- README_AWQ.md-provided-files start --> ## Provided files, and AWQ parameters For my first release of AWQ models, I am releasing 128g models only. I will consider adding 32g as well if there is interest, and once I have done perplexity and evaluation comparisons, but at this time 32g models are still not fully tested with AutoAWQ and vLLM. Models are released as sharded safetensors files. | Branch | Bits | GS | AWQ Dataset | Seq Len | Size | | ------ | ---- | -- | ----------- | ------- | ---- | | [main](https://huggingface.co/TheBloke/zephyr-7B-beta-AWQ/tree/main) | 4 | 128 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 4.15 GB <!-- README_AWQ.md-provided-files end --> <!-- README_AWQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui) Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/zephyr-7B-beta-AWQ`. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `zephyr-7B-beta-AWQ` 7. Select **Loader: AutoAWQ**. 8. Click Load, and the model will load and is now ready for use. 9. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. 10. Once you're ready, click the **Text Generation** tab and enter a prompt to get started! <!-- README_AWQ.md-text-generation-webui end --> <!-- README_AWQ.md-use-from-vllm start --> ## Multi-user inference server: vLLM Documentation on installing and using vLLM [can be found here](https://vllm.readthedocs.io/en/latest/). - Please ensure you are using vLLM version 0.2 or later. - When using vLLM as a server, pass the `--quantization awq` parameter. For example: ```shell python3 python -m vllm.entrypoints.api_server --model TheBloke/zephyr-7B-beta-AWQ --quantization awq ``` - When using vLLM from Python code, again set `quantization=awq`. For example: ```python from vllm import LLM, SamplingParams prompts = [ "Tell me about AI", "Write a story about llamas", "What is 291 - 150?", "How much wood would a woodchuck chuck if a woodchuck could chuck wood?", ] prompt_template=f'''<|system|> </s> <|user|> {prompt}</s> <|assistant|> ''' prompts = [prompt_template.format(prompt=prompt) for prompt in prompts] sampling_params = SamplingParams(temperature=0.8, top_p=0.95) llm = LLM(model="TheBloke/zephyr-7B-beta-AWQ", quantization="awq", dtype="auto") outputs = llm.generate(prompts, sampling_params) # Print the outputs. for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") ``` <!-- README_AWQ.md-use-from-vllm start --> <!-- README_AWQ.md-use-from-tgi start --> ## Multi-user inference server: Hugging Face Text Generation Inference (TGI) Use TGI version 1.1.0 or later. The official Docker container is: `ghcr.io/huggingface/text-generation-inference:1.1.0` Example Docker parameters: ```shell --model-id TheBloke/zephyr-7B-beta-AWQ --port 3000 --quantize awq --max-input-length 3696 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 ``` Example Python code for interfacing with TGI (requires [huggingface-hub](https://github.com/huggingface/huggingface_hub) 0.17.0 or later): ```shell pip3 install huggingface-hub ``` ```python from huggingface_hub import InferenceClient endpoint_url = "https://your-endpoint-url-here" prompt = "Tell me about AI" prompt_template=f'''<|system|> </s> <|user|> {prompt}</s> <|assistant|> ''' client = InferenceClient(endpoint_url) response = client.text_generation(prompt, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1) print(f"Model output: ", response) ``` <!-- README_AWQ.md-use-from-tgi end --> <!-- README_AWQ.md-use-from-python start --> ## Inference from Python code using AutoAWQ ### Install the AutoAWQ package Requires: [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) 0.1.1 or later. ```shell pip3 install autoawq ``` If you have problems installing [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) using the pre-built wheels, install it from source instead: ```shell pip3 uninstall -y autoawq git clone https://github.com/casper-hansen/AutoAWQ cd AutoAWQ pip3 install . ``` ### AutoAWQ example code ```python from awq import AutoAWQForCausalLM from transformers import AutoTokenizer model_name_or_path = "TheBloke/zephyr-7B-beta-AWQ" # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False) # Load model model = AutoAWQForCausalLM.from_quantized(model_name_or_path, fuse_layers=True, trust_remote_code=False, safetensors=True) prompt = "Tell me about AI" prompt_template=f'''<|system|> </s> <|user|> {prompt}</s> <|assistant|> ''' print("*** Running model.generate:") token_input = tokenizer( prompt_template, return_tensors='pt' ).input_ids.cuda() # Generate output generation_output = model.generate( token_input, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, max_new_tokens=512 ) # Get the tokens from the output, decode them, print them token_output = generation_output[0] text_output = tokenizer.decode(token_output) print("LLM output: ", text_output) """ # Inference should be possible with transformers pipeline as well in future # But currently this is not yet supported by AutoAWQ (correct as of September 25th 2023) from transformers import pipeline print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1 ) print(pipe(prompt_template)[0]['generated_text']) """ ``` <!-- README_AWQ.md-use-from-python end --> <!-- README_AWQ.md-compatibility start --> ## Compatibility The files provided are tested to work with: - [text-generation-webui](https://github.com/oobabooga/text-generation-webui) using `Loader: AutoAWQ`. - [vLLM](https://github.com/vllm-project/vllm) version 0.2.0 and later. - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) version 1.1.0 and later. - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) version 0.1.1 and later. <!-- README_AWQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: Hugging Face H4's Zephyr 7B Beta <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> <img src="https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png" alt="Zephyr Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Model Card for Zephyr 7B β Zephyr is a series of language models that are trained to act as helpful assistants. Zephyr-7B-β is the second model in the series, and is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) that was trained on on a mix of publicly available, synthetic datasets using [Direct Preference Optimization (DPO)](https://arxiv.org/abs/2305.18290). We found that removing the in-built alignment of these datasets boosted performance on [MT Bench](https://huggingface.co/spaces/lmsys/mt-bench) and made the model more helpful. However, this means that model is likely to generate problematic text when prompted to do so and should only be used for educational and research purposes. You can find more details in the [technical report](https://arxiv.org/abs/2310.16944). ## Model description - **Model type:** A 7B parameter GPT-like model fine-tuned on a mix of publicly available, synthetic datasets. - **Language(s) (NLP):** Primarily English - **License:** MIT - **Finetuned from model:** [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) ### Model Sources <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/huggingface/alignment-handbook - **Demo:** https://huggingface.co/spaces/HuggingFaceH4/zephyr-chat - **Chatbot Arena:** Evaluate Zephyr 7B against 10+ LLMs in the LMSYS arena: http://arena.lmsys.org ## Performance At the time of release, Zephyr-7B-β is the highest ranked 7B chat model on the [MT-Bench](https://huggingface.co/spaces/lmsys/mt-bench) and [AlpacaEval](https://tatsu-lab.github.io/alpaca_eval/) benchmarks: | Model | Size | Alignment | MT-Bench (score) | AlpacaEval (win rate %) | |-------------|-----|----|---------------|--------------| | StableLM-Tuned-α | 7B| dSFT |2.75| -| | MPT-Chat | 7B |dSFT |5.42| -| | Xwin-LMv0.1 | 7B| dPPO| 6.19| 87.83| | Mistral-Instructv0.1 | 7B| - | 6.84 |-| | Zephyr-7b-α |7B| dDPO| 6.88| -| | **Zephyr-7b-β** 🪁 | **7B** | **dDPO** | **7.34** | **90.60** | | Falcon-Instruct | 40B |dSFT |5.17 |45.71| | Guanaco | 65B | SFT |6.41| 71.80| | Llama2-Chat | 70B |RLHF |6.86| 92.66| | Vicuna v1.3 | 33B |dSFT |7.12 |88.99| | WizardLM v1.0 | 70B |dSFT |7.71 |-| | Xwin-LM v0.1 | 70B |dPPO |- |95.57| | GPT-3.5-turbo | - |RLHF |7.94 |89.37| | Claude 2 | - |RLHF |8.06| 91.36| | GPT-4 | -| RLHF |8.99| 95.28| In particular, on several categories of MT-Bench, Zephyr-7B-β has strong performance compared to larger open models like Llama2-Chat-70B: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6200d0a443eb0913fa2df7cc/raxvt5ma16d7T23my34WC.png) However, on more complex tasks like coding and mathematics, Zephyr-7B-β lags behind proprietary models and more research is needed to close the gap. ## Intended uses & limitations The model was initially fine-tuned on a filtered and preprocessed of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT. We then further aligned the model with [🤗 TRL's](https://github.com/huggingface/trl) `DPOTrainer` on the [openbmb/UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) dataset, which contains 64k prompts and model completions that are ranked by GPT-4. As a result, the model can be used for chat and you can check out our [demo](https://huggingface.co/spaces/HuggingFaceH4/zephyr-chat) to test its capabilities. You can find the datasets used for training Zephyr-7B-β [here](https://huggingface.co/collections/HuggingFaceH4/zephyr-7b-6538c6d6d5ddd1cbb1744a66) Here's how you can run the model using the `pipeline()` function from 🤗 Transformers: ```python # Install transformers from source - only needed for versions <= v4.34 # pip install git+https://github.com/huggingface/transformers.git # pip install accelerate import torch from transformers import pipeline pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto") # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) # <|system|> # You are a friendly chatbot who always responds in the style of a pirate.</s> # <|user|> # How many helicopters can a human eat in one sitting?</s> # <|assistant|> # Ah, me hearty matey! But yer question be a puzzler! A human cannot eat a helicopter in one sitting, as helicopters are not edible. They be made of metal, plastic, and other materials, not food! ``` ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Zephyr-7B-β has not been aligned to human preferences with techniques like RLHF or deployed with in-the-loop filtering of responses like ChatGPT, so the model can produce problematic outputs (especially when prompted to do so). It is also unknown what the size and composition of the corpus was used to train the base model (`mistralai/Mistral-7B-v0.1`), however it is likely to have included a mix of Web data and technical sources like books and code. See the [Falcon 180B model card](https://huggingface.co/tiiuae/falcon-180B#training-data) for an example of this. ## Training and evaluation data During DPO training, this model achieves the following results on the evaluation set: - Loss: 0.7496 - Rewards/chosen: -4.5221 - Rewards/rejected: -8.3184 - Rewards/accuracies: 0.7812 - Rewards/margins: 3.7963 - Logps/rejected: -340.1541 - Logps/chosen: -299.4561 - Logits/rejected: -2.3081 - Logits/chosen: -2.3531 ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 2 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 16 - total_train_batch_size: 32 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3.0 ### Training results The table below shows the full set of DPO training metrics: | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:| | 0.6284 | 0.05 | 100 | 0.6098 | 0.0425 | -0.1872 | 0.7344 | 0.2297 | -258.8416 | -253.8099 | -2.7976 | -2.8234 | | 0.4908 | 0.1 | 200 | 0.5426 | -0.0279 | -0.6842 | 0.75 | 0.6563 | -263.8124 | -254.5145 | -2.7719 | -2.7960 | | 0.5264 | 0.15 | 300 | 0.5324 | 0.0414 | -0.9793 | 0.7656 | 1.0207 | -266.7627 | -253.8209 | -2.7892 | -2.8122 | | 0.5536 | 0.21 | 400 | 0.4957 | -0.0185 | -1.5276 | 0.7969 | 1.5091 | -272.2460 | -254.4203 | -2.8542 | -2.8764 | | 0.5362 | 0.26 | 500 | 0.5031 | -0.2630 | -1.5917 | 0.7812 | 1.3287 | -272.8869 | -256.8653 | -2.8702 | -2.8958 | | 0.5966 | 0.31 | 600 | 0.5963 | -0.2993 | -1.6491 | 0.7812 | 1.3499 | -273.4614 | -257.2279 | -2.8778 | -2.8986 | | 0.5014 | 0.36 | 700 | 0.5382 | -0.2859 | -1.4750 | 0.75 | 1.1891 | -271.7204 | -257.0942 | -2.7659 | -2.7869 | | 0.5334 | 0.41 | 800 | 0.5677 | -0.4289 | -1.8968 | 0.7969 | 1.4679 | -275.9378 | -258.5242 | -2.7053 | -2.7265 | | 0.5251 | 0.46 | 900 | 0.5772 | -0.2116 | -1.3107 | 0.7344 | 1.0991 | -270.0768 | -256.3507 | -2.8463 | -2.8662 | | 0.5205 | 0.52 | 1000 | 0.5262 | -0.3792 | -1.8585 | 0.7188 | 1.4793 | -275.5552 | -258.0276 | -2.7893 | -2.7979 | | 0.5094 | 0.57 | 1100 | 0.5433 | -0.6279 | -1.9368 | 0.7969 | 1.3089 | -276.3377 | -260.5136 | -2.7453 | -2.7536 | | 0.5837 | 0.62 | 1200 | 0.5349 | -0.3780 | -1.9584 | 0.7656 | 1.5804 | -276.5542 | -258.0154 | -2.7643 | -2.7756 | | 0.5214 | 0.67 | 1300 | 0.5732 | -1.0055 | -2.2306 | 0.7656 | 1.2251 | -279.2761 | -264.2903 | -2.6986 | -2.7113 | | 0.6914 | 0.72 | 1400 | 0.5137 | -0.6912 | -2.1775 | 0.7969 | 1.4863 | -278.7448 | -261.1467 | -2.7166 | -2.7275 | | 0.4655 | 0.77 | 1500 | 0.5090 | -0.7987 | -2.2930 | 0.7031 | 1.4943 | -279.8999 | -262.2220 | -2.6651 | -2.6838 | | 0.5731 | 0.83 | 1600 | 0.5312 | -0.8253 | -2.3520 | 0.7812 | 1.5268 | -280.4902 | -262.4876 | -2.6543 | -2.6728 | | 0.5233 | 0.88 | 1700 | 0.5206 | -0.4573 | -2.0951 | 0.7812 | 1.6377 | -277.9205 | -258.8084 | -2.6870 | -2.7097 | | 0.5593 | 0.93 | 1800 | 0.5231 | -0.5508 | -2.2000 | 0.7969 | 1.6492 | -278.9703 | -259.7433 | -2.6221 | -2.6519 | | 0.4967 | 0.98 | 1900 | 0.5290 | -0.5340 | -1.9570 | 0.8281 | 1.4230 | -276.5395 | -259.5749 | -2.6564 | -2.6878 | | 0.0921 | 1.03 | 2000 | 0.5368 | -1.1376 | -3.1615 | 0.7812 | 2.0239 | -288.5854 | -265.6111 | -2.6040 | -2.6345 | | 0.0733 | 1.08 | 2100 | 0.5453 | -1.1045 | -3.4451 | 0.7656 | 2.3406 | -291.4208 | -265.2799 | -2.6289 | -2.6595 | | 0.0972 | 1.14 | 2200 | 0.5571 | -1.6915 | -3.9823 | 0.8125 | 2.2908 | -296.7934 | -271.1505 | -2.6471 | -2.6709 | | 0.1058 | 1.19 | 2300 | 0.5789 | -1.0621 | -3.8941 | 0.7969 | 2.8319 | -295.9106 | -264.8563 | -2.5527 | -2.5798 | | 0.2423 | 1.24 | 2400 | 0.5455 | -1.1963 | -3.5590 | 0.7812 | 2.3627 | -292.5599 | -266.1981 | -2.5414 | -2.5784 | | 0.1177 | 1.29 | 2500 | 0.5889 | -1.8141 | -4.3942 | 0.7969 | 2.5801 | -300.9120 | -272.3761 | -2.4802 | -2.5189 | | 0.1213 | 1.34 | 2600 | 0.5683 | -1.4608 | -3.8420 | 0.8125 | 2.3812 | -295.3901 | -268.8436 | -2.4774 | -2.5207 | | 0.0889 | 1.39 | 2700 | 0.5890 | -1.6007 | -3.7337 | 0.7812 | 2.1330 | -294.3068 | -270.2423 | -2.4123 | -2.4522 | | 0.0995 | 1.45 | 2800 | 0.6073 | -1.5519 | -3.8362 | 0.8281 | 2.2843 | -295.3315 | -269.7538 | -2.4685 | -2.5050 | | 0.1145 | 1.5 | 2900 | 0.5790 | -1.7939 | -4.2876 | 0.8438 | 2.4937 | -299.8461 | -272.1744 | -2.4272 | -2.4674 | | 0.0644 | 1.55 | 3000 | 0.5735 | -1.7285 | -4.2051 | 0.8125 | 2.4766 | -299.0209 | -271.5201 | -2.4193 | -2.4574 | | 0.0798 | 1.6 | 3100 | 0.5537 | -1.7226 | -4.2850 | 0.8438 | 2.5624 | -299.8200 | -271.4610 | -2.5367 | -2.5696 | | 0.1013 | 1.65 | 3200 | 0.5575 | -1.5715 | -3.9813 | 0.875 | 2.4098 | -296.7825 | -269.9498 | -2.4926 | -2.5267 | | 0.1254 | 1.7 | 3300 | 0.5905 | -1.6412 | -4.4703 | 0.8594 | 2.8291 | -301.6730 | -270.6473 | -2.5017 | -2.5340 | | 0.085 | 1.76 | 3400 | 0.6133 | -1.9159 | -4.6760 | 0.8438 | 2.7601 | -303.7296 | -273.3941 | -2.4614 | -2.4960 | | 0.065 | 1.81 | 3500 | 0.6074 | -1.8237 | -4.3525 | 0.8594 | 2.5288 | -300.4951 | -272.4724 | -2.4597 | -2.5004 | | 0.0755 | 1.86 | 3600 | 0.5836 | -1.9252 | -4.4005 | 0.8125 | 2.4753 | -300.9748 | -273.4872 | -2.4327 | -2.4716 | | 0.0746 | 1.91 | 3700 | 0.5789 | -1.9280 | -4.4906 | 0.8125 | 2.5626 | -301.8762 | -273.5149 | -2.4686 | -2.5115 | | 0.1348 | 1.96 | 3800 | 0.6015 | -1.8658 | -4.2428 | 0.8281 | 2.3769 | -299.3976 | -272.8936 | -2.4943 | -2.5393 | | 0.0217 | 2.01 | 3900 | 0.6122 | -2.3335 | -4.9229 | 0.8281 | 2.5894 | -306.1988 | -277.5699 | -2.4841 | -2.5272 | | 0.0219 | 2.07 | 4000 | 0.6522 | -2.9890 | -6.0164 | 0.8281 | 3.0274 | -317.1334 | -284.1248 | -2.4105 | -2.4545 | | 0.0119 | 2.12 | 4100 | 0.6922 | -3.4777 | -6.6749 | 0.7969 | 3.1972 | -323.7187 | -289.0121 | -2.4272 | -2.4699 | | 0.0153 | 2.17 | 4200 | 0.6993 | -3.2406 | -6.6775 | 0.7969 | 3.4369 | -323.7453 | -286.6413 | -2.4047 | -2.4465 | | 0.011 | 2.22 | 4300 | 0.7178 | -3.7991 | -7.4397 | 0.7656 | 3.6406 | -331.3667 | -292.2260 | -2.3843 | -2.4290 | | 0.0072 | 2.27 | 4400 | 0.6840 | -3.3269 | -6.8021 | 0.8125 | 3.4752 | -324.9908 | -287.5042 | -2.4095 | -2.4536 | | 0.0197 | 2.32 | 4500 | 0.7013 | -3.6890 | -7.3014 | 0.8125 | 3.6124 | -329.9841 | -291.1250 | -2.4118 | -2.4543 | | 0.0182 | 2.37 | 4600 | 0.7476 | -3.8994 | -7.5366 | 0.8281 | 3.6372 | -332.3356 | -293.2291 | -2.4163 | -2.4565 | | 0.0125 | 2.43 | 4700 | 0.7199 | -4.0560 | -7.5765 | 0.8438 | 3.5204 | -332.7345 | -294.7952 | -2.3699 | -2.4100 | | 0.0082 | 2.48 | 4800 | 0.7048 | -3.6613 | -7.1356 | 0.875 | 3.4743 | -328.3255 | -290.8477 | -2.3925 | -2.4303 | | 0.0118 | 2.53 | 4900 | 0.6976 | -3.7908 | -7.3152 | 0.8125 | 3.5244 | -330.1224 | -292.1431 | -2.3633 | -2.4047 | | 0.0118 | 2.58 | 5000 | 0.7198 | -3.9049 | -7.5557 | 0.8281 | 3.6508 | -332.5271 | -293.2844 | -2.3764 | -2.4194 | | 0.006 | 2.63 | 5100 | 0.7506 | -4.2118 | -7.9149 | 0.8125 | 3.7032 | -336.1194 | -296.3530 | -2.3407 | -2.3860 | | 0.0143 | 2.68 | 5200 | 0.7408 | -4.2433 | -7.9802 | 0.8125 | 3.7369 | -336.7721 | -296.6682 | -2.3509 | -2.3946 | | 0.0057 | 2.74 | 5300 | 0.7552 | -4.3392 | -8.0831 | 0.7969 | 3.7439 | -337.8013 | -297.6275 | -2.3388 | -2.3842 | | 0.0138 | 2.79 | 5400 | 0.7404 | -4.2395 | -7.9762 | 0.8125 | 3.7367 | -336.7322 | -296.6304 | -2.3286 | -2.3737 | | 0.0079 | 2.84 | 5500 | 0.7525 | -4.4466 | -8.2196 | 0.7812 | 3.7731 | -339.1662 | -298.7007 | -2.3200 | -2.3641 | | 0.0077 | 2.89 | 5600 | 0.7520 | -4.5586 | -8.3485 | 0.7969 | 3.7899 | -340.4545 | -299.8206 | -2.3078 | -2.3517 | | 0.0094 | 2.94 | 5700 | 0.7527 | -4.5542 | -8.3509 | 0.7812 | 3.7967 | -340.4790 | -299.7773 | -2.3062 | -2.3510 | | 0.0054 | 2.99 | 5800 | 0.7520 | -4.5169 | -8.3079 | 0.7812 | 3.7911 | -340.0493 | -299.4038 | -2.3081 | -2.3530 | ### Framework versions - Transformers 4.35.0.dev0 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.14.0 ## Citation If you find Zephyr-7B-β is useful in your work, please cite it with: ``` @misc{tunstall2023zephyr, title={Zephyr: Direct Distillation of LM Alignment}, author={Lewis Tunstall and Edward Beeching and Nathan Lambert and Nazneen Rajani and Kashif Rasul and Younes Belkada and Shengyi Huang and Leandro von Werra and Clémentine Fourrier and Nathan Habib and Nathan Sarrazin and Omar Sanseviero and Alexander M. Rush and Thomas Wolf}, year={2023}, eprint={2310.16944}, archivePrefix={arXiv}, primaryClass={cs.LG} } ```
32,796
[ [ -0.03302001953125, -0.06201171875, 0.0274505615234375, 0.00928497314453125, -0.01446533203125, -0.0147705078125, 0.00799560546875, -0.043701171875, 0.0057220458984375, 0.025848388671875, -0.051239013671875, -0.037200927734375, -0.0183868408203125, -0.0023689...
Daniil-plotnikov/russian-vision-v6-11
2023-10-08T14:54:32.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "ru", "en", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
Daniil-plotnikov
null
null
Daniil-plotnikov/russian-vision-v6-11
1
1,900
diffusers
2023-10-08T13:51:18
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion language: - ru - en --- ### Russian-Vision-v6.11 Model for generate images on russian languages
171
[ [ -0.011749267578125, -0.0011739730834960938, 0.0211944580078125, 0.0355224609375, -0.050933837890625, -0.0203704833984375, 0.0151214599609375, -0.01128387451171875, 0.0151214599609375, 0.044097900390625, -0.03131103515625, -0.05596923828125, -0.046356201171875, ...
DataikuNLP/paraphrase-multilingual-MiniLM-L12-v2
2021-09-02T08:31:10.000Z
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "transformers", "arxiv:1908.10084", "license:apache-2.0", "endpoints_compatible", "region:us" ]
sentence-similarity
DataikuNLP
null
null
DataikuNLP/paraphrase-multilingual-MiniLM-L12-v2
0
1,899
sentence-transformers
2022-03-02T23:29:04
--- pipeline_tag: sentence-similarity license: apache-2.0 tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # DataikuNLP/paraphrase-multilingual-MiniLM-L12-v2 **This model is a copy of [this model repository](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) from sentence-transformers at the specific commit `d66eff4d8a8598f264f166af8db67f7797164651`.** This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2') model = AutoModel.from_pretrained('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
3,983
[ [ -0.0174560546875, -0.055328369140625, 0.0288848876953125, 0.0239410400390625, -0.025390625, -0.0290069580078125, -0.014862060546875, -0.0031414031982421875, 0.0127716064453125, 0.03192138671875, -0.04193115234375, -0.0236968994140625, -0.045166015625, 0.0145...