modelId
stringlengths
4
111
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringlengths
5
30
author
stringlengths
2
34
config
null
securityStatus
null
id
stringlengths
4
111
likes
int64
0
9.53k
downloads
int64
2
73.6M
library_name
stringlengths
2
84
created
timestamp[us]
card
stringlengths
101
901k
card_len
int64
101
901k
embeddings
list
qanastek/XLMRoberta-Alexa-Intents-Classification
2022-05-05T00:52:15.000Z
[ "transformers", "pytorch", "Transformers", "text-classification", "intent-classification", "multi-class-classification", "natural-language-understanding", "dataset:qanastek/MASSIVE", "license:cc-by-4.0", "endpoints_compatible", "has_space", "region:us" ]
text-classification
qanastek
null
null
qanastek/XLMRoberta-Alexa-Intents-Classification
20
687
transformers
2022-05-04T22:36:57
--- tags: - Transformers - text-classification - intent-classification - multi-class-classification - natural-language-understanding languages: - af-ZA - am-ET - ar-SA - az-AZ - bn-BD - cy-GB - da-DK - de-DE - el-GR - en-US - es-ES - fa-IR - fi-FI - fr-FR - he-IL - hi-IN - hu-HU - hy-AM - id-ID - is-IS - it-IT - ja-JP - jv-ID - ka-GE - km-KH - kn-IN - ko-KR - lv-LV - ml-IN - mn-MN - ms-MY - my-MM - nb-NO - nl-NL - pl-PL - pt-PT - ro-RO - ru-RU - sl-SL - sq-AL - sv-SE - sw-KE - ta-IN - te-IN - th-TH - tl-PH - tr-TR - ur-PK - vi-VN - zh-CN - zh-TW multilinguality: - af-ZA - am-ET - ar-SA - az-AZ - bn-BD - cy-GB - da-DK - de-DE - el-GR - en-US - es-ES - fa-IR - fi-FI - fr-FR - he-IL - hi-IN - hu-HU - hy-AM - id-ID - is-IS - it-IT - ja-JP - jv-ID - ka-GE - km-KH - kn-IN - ko-KR - lv-LV - ml-IN - mn-MN - ms-MY - my-MM - nb-NO - nl-NL - pl-PL - pt-PT - ro-RO - ru-RU - sl-SL - sq-AL - sv-SE - sw-KE - ta-IN - te-IN - th-TH - tl-PH - tr-TR - ur-PK - vi-VN - zh-CN - zh-TW datasets: - qanastek/MASSIVE widget: - text: "wake me up at five am this week" - text: "je veux écouter la chanson de jacques brel encore une fois" - text: "quiero escuchar la canción de arijit singh una vez más" - text: "olly onde é que á um parque por perto onde eu possa correr" - text: "פרק הבא בפודקאסט בבקשה" - text: "亚马逊股价" - text: "найди билет на поезд в санкт-петербург" license: cc-by-4.0 --- **People Involved** * [LABRAK Yanis](https://www.linkedin.com/in/yanis-labrak-8a7412145/) (1) **Affiliations** 1. [LIA, NLP team](https://lia.univ-avignon.fr/), Avignon University, Avignon, France. ## Demo: How to use in HuggingFace Transformers Pipeline Requires [transformers](https://pypi.org/project/transformers/): ```pip install transformers``` ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline model_name = 'qanastek/XLMRoberta-Alexa-Intents-Classification' tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) res = classifier("réveille-moi à neuf heures du matin le vendredi") print(res) ``` Outputs: ```python [{'label': 'alarm_set', 'score': 0.9998375177383423}] ``` ## Training data [MASSIVE](https://huggingface.co/datasets/qanastek/MASSIVE) is a parallel dataset of > 1M utterances across 51 languages with annotations for the Natural Language Understanding tasks of intent prediction and slot annotation. Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions. ## Intents * audio_volume_other * play_music * iot_hue_lighton * general_greet * calendar_set * audio_volume_down * social_query * audio_volume_mute * iot_wemo_on * iot_hue_lightup * audio_volume_up * iot_coffee * takeaway_query * qa_maths * play_game * cooking_query * iot_hue_lightdim * iot_wemo_off * music_settings * weather_query * news_query * alarm_remove * social_post * recommendation_events * transport_taxi * takeaway_order * music_query * calendar_query * lists_query * qa_currency * recommendation_movies * general_joke * recommendation_locations * email_querycontact * lists_remove * play_audiobook * email_addcontact * lists_createoradd * play_radio * qa_stock * alarm_query * email_sendemail * general_quirky * music_likeness * cooking_recipe * email_query * datetime_query * transport_traffic * play_podcasts * iot_hue_lightchange * calendar_remove * transport_query * transport_ticket * qa_factoid * iot_cleaning * alarm_set * datetime_convert * iot_hue_lightoff * qa_definition * music_dislikeness ## Evaluation results ```plain precision recall f1-score support alarm_query 0.9661 0.9037 0.9338 1734 alarm_remove 0.9484 0.9608 0.9545 1071 alarm_set 0.8611 0.9254 0.8921 2091 audio_volume_down 0.8657 0.9537 0.9075 561 audio_volume_mute 0.8608 0.9130 0.8861 1632 audio_volume_other 0.8684 0.5392 0.6653 306 audio_volume_up 0.7198 0.8446 0.7772 663 calendar_query 0.7555 0.8229 0.7878 6426 calendar_remove 0.8688 0.9441 0.9049 3417 calendar_set 0.9092 0.9014 0.9053 10659 cooking_query 0.0000 0.0000 0.0000 0 cooking_recipe 0.9282 0.8592 0.8924 3672 datetime_convert 0.8144 0.7686 0.7909 765 datetime_query 0.9152 0.9305 0.9228 4488 email_addcontact 0.6482 0.8431 0.7330 612 email_query 0.9629 0.9319 0.9472 6069 email_querycontact 0.6853 0.8032 0.7396 1326 email_sendemail 0.9530 0.9381 0.9455 5814 general_greet 0.1026 0.3922 0.1626 51 general_joke 0.9305 0.9123 0.9213 969 general_quirky 0.6984 0.5417 0.6102 8619 iot_cleaning 0.9590 0.9359 0.9473 1326 iot_coffee 0.9304 0.9749 0.9521 1836 iot_hue_lightchange 0.8794 0.9374 0.9075 1836 iot_hue_lightdim 0.8695 0.8711 0.8703 1071 iot_hue_lightoff 0.9440 0.9229 0.9334 2193 iot_hue_lighton 0.4545 0.5882 0.5128 153 iot_hue_lightup 0.9271 0.8315 0.8767 1377 iot_wemo_off 0.9615 0.8715 0.9143 918 iot_wemo_on 0.8455 0.7941 0.8190 510 lists_createoradd 0.8437 0.8356 0.8396 1989 lists_query 0.8918 0.8335 0.8617 2601 lists_remove 0.9536 0.8601 0.9044 2652 music_dislikeness 0.7725 0.7157 0.7430 204 music_likeness 0.8570 0.8159 0.8359 1836 music_query 0.8667 0.8050 0.8347 1785 music_settings 0.4024 0.3301 0.3627 306 news_query 0.8343 0.8657 0.8498 6324 play_audiobook 0.8172 0.8125 0.8149 2091 play_game 0.8666 0.8403 0.8532 1785 play_music 0.8683 0.8845 0.8763 8976 play_podcasts 0.8925 0.9125 0.9024 3213 play_radio 0.8260 0.8935 0.8585 3672 qa_currency 0.9459 0.9578 0.9518 1989 qa_definition 0.8638 0.8552 0.8595 2907 qa_factoid 0.7959 0.8178 0.8067 7191 qa_maths 0.8937 0.9302 0.9116 1275 qa_stock 0.7995 0.9412 0.8646 1326 recommendation_events 0.7646 0.7702 0.7674 2193 recommendation_locations 0.7489 0.8830 0.8104 1581 recommendation_movies 0.6907 0.7706 0.7285 1020 social_post 0.9623 0.9080 0.9344 4131 social_query 0.8104 0.7914 0.8008 1275 takeaway_order 0.7697 0.8458 0.8059 1122 takeaway_query 0.9059 0.8571 0.8808 1785 transport_query 0.8141 0.7559 0.7839 2601 transport_taxi 0.9222 0.9403 0.9312 1173 transport_ticket 0.9259 0.9384 0.9321 1785 transport_traffic 0.6919 0.9660 0.8063 765 weather_query 0.9387 0.9492 0.9439 7956 accuracy 0.8617 151674 macro avg 0.8162 0.8273 0.8178 151674 weighted avg 0.8639 0.8617 0.8613 151674 ```
7,985
[ [ -0.04534912109375, -0.0310211181640625, 0.0218963623046875, 0.015350341796875, 0.0013723373413085938, -0.0005788803100585938, -0.002197265625, -0.0225067138671875, 0.046722412109375, 0.02056884765625, -0.052978515625, -0.05816650390625, -0.0460205078125, 0.0...
csebuetnlp/banglat5
2022-08-21T13:59:20.000Z
[ "transformers", "pytorch", "t5", "text2text-generation", "bn", "arxiv:2205.11081", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text2text-generation
csebuetnlp
null
null
csebuetnlp/banglat5
7
687
transformers
2022-05-23T07:51:38
--- language: - bn licenses: - cc-by-nc-sa-4.0 --- # BanglaT5 This repository contains the pretrained checkpoint of the model **BanglaT5**. This is a sequence to sequence transformer model pretrained with the ["Span Corruption"]() objective. Finetuned models using this checkpoint achieve state-of-the-art results on many of the NLG tasks in bengali. For finetuning on different downstream tasks such as `Machine Translation`, `Abstractive Text Summarization`, `Question Answering` etc., refer to the scripts in the official GitHub [repository](https://github.com/csebuetnlp/BanglaNLG). **Note**: This model was pretrained using a specific normalization pipeline available [here](https://github.com/csebuetnlp/normalizer). All finetuning scripts in the official GitHub repository use this normalization by default. If you need to adapt the pretrained model for a different task make sure the text units are normalized using this pipeline before tokenizing to get best results. A basic example is given below: ## Using this model in `transformers` (tested on 4.11.0.dev0) ```python from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from normalizer import normalize # pip install git+https://github.com/csebuetnlp/normalizer model = AutoModelForSeq2SeqLM.from_pretrained("csebuetnlp/banglat5") tokenizer = AutoTokenizer.from_pretrained("csebuetnlp/banglat5", use_fast=False) input_sentence = "" input_ids = tokenizer(normalize(input_sentence), return_tensors="pt").input_ids generated_tokens = model.generate(input_ids) decoded_tokens = tokenizer.batch_decode(generated_tokens)[0] print(decoded_tokens) ``` ## Benchmarks * Supervised fine-tuning | Model | Params | MT (SacreBLEU) | TS (ROUGE-2) | QA (EM/F1) | MD (SacreBLEU-1) | NHG (ROUGE-2) | XLS (ROUGE-2) | BNLG score | |--------------------|------------|-----------------------|------------------------|-------------------|--------------------|----------------|----------------|---------------| |[mT5 (base)](https://huggingface.co/google/mt5-base) | 582M | 36.6/22.5 | 10.3 | 59.0/65.3 | 17.5 | 9.6 | 2.7/0.7 | 24.9 | |[XLM-ProphetNet](https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased) | 616M | 23.3/16.4 | 7.8 | 53.0/57.3 | 20.0 | 9.5 | 6.2/2.7 | 21.8 | |[mBART-50](https://huggingface.co/facebook/mbart-large-50) | 611M | 23.6/16.7 | 10.4 | 53.4/58.9 | 18.5 | 11.2 | 5.4/3.7 | 22.4 | |[IndicBART](https://huggingface.co/ai4bharat/IndicBART) | 244M | 22.7/13.1 | 8.1 | 53.3/58.8 | 14.8 | 7.9 | 6.3/2.5 | 20.8 | |[BanglaT5](https://huggingface.co/csebuetnlp/banglat5) | 247M | 38.8/25.2 | 13.7 | 68.5/74.8 | 19.0 | 13.8 | 6.4/4.0 | 29.4 | The benchmarking datasets are as follows: * **MT:** **[Machine Translation](https://github.com/csebuetnlp/banglanmt#datasets)** * **TS:** **[Abstractive Text Summarization](https://huggingface.co/datasets/csebuetnlp/xlsum)** * **QA:** **[Question Answering](https://huggingface.co/datasets/csebuetnlp/squad_bn)** * **MD:** **[Multi Turn Dialogue Generation](https://drive.google.com/file/d/1qPmNN6qA4evbh4cD_BDDTCFOwMu4H2JS/view?usp=sharing)** * **NHG:** **[News Headline Generation](https://huggingface.co/datasets/csebuetnlp/xlsum)** * **XLS:** **[Cross-lingual Summarization](https://huggingface.co/datasets/csebuetnlp/CrossSum)** ## Citation If you use this model, please cite the following paper: ``` @article{bhattacharjee2022banglanlg, author = {Abhik Bhattacharjee and Tahmid Hasan and Wasi Uddin Ahmad and Rifat Shahriyar}, title = {BanglaNLG: Benchmarks and Resources for Evaluating Low-Resource Natural Language Generation in Bangla}, journal = {CoRR}, volume = {abs/2205.11081}, year = {2022}, url = {https://arxiv.org/abs/2205.11081}, eprinttype = {arXiv}, eprint = {2205.11081} } ``` If you use the normalization module, please cite the following paper: ``` @inproceedings{hasan-etal-2020-low, title = "Not Low-Resource Anymore: Aligner Ensembling, Batch Filtering, and New Datasets for {B}engali-{E}nglish Machine Translation", author = "Hasan, Tahmid and Bhattacharjee, Abhik and Samin, Kazi and Hasan, Masum and Basak, Madhusudan and Rahman, M. Sohel and Shahriyar, Rifat", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.207", doi = "10.18653/v1/2020.emnlp-main.207", pages = "2612--2623", abstract = "Despite being the seventh most widely spoken language in the world, Bengali has received much less attention in machine translation literature due to being low in resources. Most publicly available parallel corpora for Bengali are not large enough; and have rather poor quality, mostly because of incorrect sentence alignments resulting from erroneous sentence segmentation, and also because of a high volume of noise present in them. In this work, we build a customized sentence segmenter for Bengali and propose two novel methods for parallel corpus creation on low-resource setups: aligner ensembling and batch filtering. With the segmenter and the two methods combined, we compile a high-quality Bengali-English parallel corpus comprising of 2.75 million sentence pairs, more than 2 million of which were not available before. Training on neural models, we achieve an improvement of more than 9 BLEU score over previous approaches to Bengali-English machine translation. We also evaluate on a new test set of 1000 pairs made with extensive quality control. We release the segmenter, parallel corpus, and the evaluation set, thus elevating Bengali from its low-resource status. To the best of our knowledge, this is the first ever large scale study on Bengali-English machine translation. We believe our study will pave the way for future research on Bengali-English machine translation as well as other low-resource languages. Our data and code are available at https://github.com/csebuetnlp/banglanmt.", } ```
6,179
[ [ -0.0413818359375, -0.04638671875, -0.0017538070678710938, 0.034515380859375, -0.0169525146484375, 0.005542755126953125, -0.0265350341796875, -0.023040771484375, 0.0185089111328125, 0.021728515625, -0.039276123046875, -0.042510986328125, -0.04833984375, 0.028...
arnomatic/seacreatures
2023-07-24T07:00:00.000Z
[ "diffusers", "tensorboard", "text-to-image", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
arnomatic
null
null
arnomatic/seacreatures
15
687
diffusers
2022-12-09T18:15:57
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: underwater --- ### seacreatures Dreambooth model trained by arnomatic with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v1-5 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! underwater bioluminescence creature (use that on your prompt) Sample pictures of: ![2354.jpg](https://cdn-uploads.huggingface.co/production/uploads/638635fcc12615765cad241c/tCLLErycfGhbf0Q1TmfZ4.jpeg) ![3212.jpg](https://cdn-uploads.huggingface.co/production/uploads/638635fcc12615765cad241c/vqsK5PJg-3LoHLrXmSIk8.jpeg) ![5435.jpg](https://cdn-uploads.huggingface.co/production/uploads/638635fcc12615765cad241c/O9I3UjwR0AvY7rMCn9IUX.jpeg) underwater bioluminescence creatures (use that on your prompt)
1,056
[ [ -0.048980712890625, -0.026519775390625, 0.03985595703125, 0.0292205810546875, -0.057525634765625, 0.026153564453125, 0.02691650390625, -0.036041259765625, 0.055267333984375, 0.0413818359375, -0.049072265625, -0.03570556640625, -0.033050537109375, 0.007144927...
Infernaught/test_adalora_weights
2023-09-07T15:58:45.000Z
[ "peft", "region:us" ]
null
Infernaught
null
null
Infernaught/test_adalora_weights
0
687
peft
2023-09-07T15:26:18
--- library_name: peft --- ## Training procedure ### Framework versions - PEFT 0.5.0 - PEFT 0.5.0
101
[ [ -0.019500732421875, -0.006832122802734375, 0.02142333984375, 0.064697265625, -0.01194000244140625, -0.000026166439056396484, 0.039276123046875, -0.00853729248046875, 0.0196990966796875, 0.0562744140625, -0.04290771484375, -0.0174713134765625, -0.035552978515625,...
google/roberta2roberta_L-24_discofuse
2023-01-24T16:43:18.000Z
[ "transformers", "pytorch", "encoder-decoder", "text2text-generation", "en", "dataset:discofuse", "arxiv:1907.12461", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text2text-generation
google
null
null
google/roberta2roberta_L-24_discofuse
2
686
transformers
2022-03-02T23:29:05
--- language: en license: apache-2.0 datasets: - discofuse --- # Roberta2Roberta_L-24_discofuse EncoderDecoder model The model was introduced in [this paper](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn and first released in [this repository](https://tfhub.dev/google/bertseq2seq/roberta24_discofuse/1). The model is an encoder-decoder model that was initialized on the `roberta-large` checkpoints for both the encoder and decoder and fine-tuned on sentencefusion on the discofuse dataset, which is linked above. Disclaimer: The model card has been written by the Hugging Face team. ## How to use You can use this model for sentence fusion, *e.g.* IMPORTANT: The model was not trained on the `"` (double quotation mark) character -> so the before tokenizing the text, it is advised to replace all `"` (double quotation marks) with a single `` ` `` (single back tick). ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse") model = AutoModelForSeq2SeqLM.from_pretrained("google/roberta2roberta_L-24_discofuse") discofuse = """As a run-blocker, Zeitler moves relatively well. Zeitler often struggles at the point of contact in space.""" input_ids = tokenizer(discofuse, return_tensors="pt").input_ids output_ids = model.generate(input_ids)[0] print(tokenizer.decode(output_ids, skip_special_tokens=True)) # should output # As a run-blocker, Zeitler moves relatively well. However, Zeitler often struggles at the point of contact in space. ```
1,596
[ [ -0.0156402587890625, -0.0635986328125, 0.0225067138671875, 0.0033397674560546875, -0.0201263427734375, -0.00514984130859375, 0.005458831787109375, -0.019500732421875, 0.0092010498046875, 0.03826904296875, -0.05828857421875, -0.0301055908203125, -0.05838012695312...
pranavpsv/gpt2-genre-story-generator
2021-05-23T11:02:06.000Z
[ "transformers", "pytorch", "jax", "gpt2", "text-generation", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
pranavpsv
null
null
pranavpsv/gpt2-genre-story-generator
39
685
transformers
2022-03-02T23:29:05
# GPT2 Genre Based Story Generator ## Model description GPT2 fine-tuned on genre-based story generation. ## Intended uses Used to generate stories based on user inputted genre and starting prompts. ## How to use #### Supported Genres superhero, action, drama, horror, thriller, sci_fi #### Input text format \<BOS> \<genre> Some optional text... **Example**: \<BOS> \<sci_fi> After discovering time travel, ```python # Example of usage from transformers import pipeline story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator") print(story_gen("<BOS> <superhero> Batman")) ``` ## Training data Initialized with pre-trained weights of "gpt2" checkpoint. Fine-tuned the model on stories of various genres.
739
[ [ -0.01004791259765625, -0.04052734375, 0.040771484375, 0.027252197265625, -0.0228729248046875, 0.01395416259765625, 0.0098114013671875, -0.004150390625, 0.004791259765625, 0.0168609619140625, -0.071533203125, -0.0233917236328125, -0.04248046875, 0.01701354980...
cardiffnlp/twitter-roberta-base-2019-90m
2022-10-10T18:42:18.000Z
[ "transformers", "pytorch", "roberta", "fill-mask", "timelms", "twitter", "en", "dataset:twitter-api", "arxiv:2202.03829", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
cardiffnlp
null
null
cardiffnlp/twitter-roberta-base-2019-90m
0
684
transformers
2022-03-02T23:29:05
--- language: en tags: - timelms - twitter license: mit datasets: - twitter-api --- # Twitter 2021 90M (RoBERTa-base) This is a RoBERTa-base model trained on 90M tweets until the end of 2019. More details and performance scores are available in the [TimeLMs paper](https://arxiv.org/abs/2202.03829). Below, we provide some usage examples using the standard Transformers interface. For another interface more suited to comparing predictions and perplexity scores between models trained at different temporal intervals, check the [TimeLMs repository](https://github.com/cardiffnlp/timelms). For other models trained until different periods, check this [table](https://github.com/cardiffnlp/timelms#released-models). ## Preprocess Text Replace usernames and links for placeholders: "@user" and "http". If you're interested in retaining verified users which were also retained during training, you may keep the users listed [here](https://github.com/cardiffnlp/timelms/tree/main/data). ```python def preprocess(text): preprocessed_text = [] for t in text.split(): if len(t) > 1: t = '@user' if t[0] == '@' and t.count('@') == 1 else t t = 'http' if t.startswith('http') else t preprocessed_text.append(t) return ' '.join(preprocessed_text) ``` ## Example Masked Language Model ```python from transformers import pipeline, AutoTokenizer MODEL = "cardiffnlp/twitter-roberta-base-2019-90m" fill_mask = pipeline("fill-mask", model=MODEL, tokenizer=MODEL) tokenizer = AutoTokenizer.from_pretrained(MODEL) def pprint(candidates, n): for i in range(n): token = tokenizer.decode(candidates[i]['token']) score = candidates[i]['score'] print("%d) %.5f %s" % (i+1, score, token)) texts = [ "So glad I'm <mask> vaccinated.", "I keep forgetting to bring a <mask>.", "Looking forward to watching <mask> Game tonight!", ] for text in texts: t = preprocess(text) print(f"{'-'*30}\n{t}") candidates = fill_mask(t) pprint(candidates, 5) ``` Output: ``` ------------------------------ So glad I'm <mask> vaccinated. 1) 0.28870 getting 2) 0.28611 not 3) 0.15485 fully 4) 0.07357 self 5) 0.01812 being ------------------------------ I keep forgetting to bring a <mask>. 1) 0.12194 book 2) 0.04396 pillow 3) 0.04202 bag 4) 0.03038 wallet 5) 0.02729 charger ------------------------------ Looking forward to watching <mask> Game tonight! 1) 0.65505 End 2) 0.19230 The 3) 0.03856 the 4) 0.01223 end 5) 0.00978 this ``` ## Example Tweet Embeddings ```python from transformers import AutoTokenizer, AutoModel, TFAutoModel import numpy as np from scipy.spatial.distance import cosine from collections import Counter def get_embedding(text): # naive approach for demonstration text = preprocess(text) encoded_input = tokenizer(text, return_tensors='pt') features = model(**encoded_input) features = features[0].detach().cpu().numpy() return np.mean(features[0], axis=0) MODEL = "cardiffnlp/twitter-roberta-base-2019-90m" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModel.from_pretrained(MODEL) query = "The book was awesome" tweets = ["I just ordered fried chicken 🐣", "The movie was great", "What time is the next game?", "Just finished reading 'Embeddings in NLP'"] sims = Counter() for tweet in tweets: sim = 1 - cosine(get_embedding(query), get_embedding(tweet)) sims[tweet] = sim print('Most similar to: ', query) print(f"{'-'*30}") for idx, (tweet, sim) in enumerate(sims.most_common()): print("%d) %.5f %s" % (idx+1, sim, tweet)) ``` Output: ``` Most similar to: The book was awesome ------------------------------ 1) 0.99078 The movie was great 2) 0.96701 Just finished reading 'Embeddings in NLP' 3) 0.96037 I just ordered fried chicken 🐣 4) 0.95919 What time is the next game? ``` ## Example Feature Extraction ```python from transformers import AutoTokenizer, AutoModel, TFAutoModel import numpy as np MODEL = "cardiffnlp/twitter-roberta-base-2019-90m" tokenizer = AutoTokenizer.from_pretrained(MODEL) text = "Good night 😊" text = preprocess(text) # Pytorch model = AutoModel.from_pretrained(MODEL) encoded_input = tokenizer(text, return_tensors='pt') features = model(**encoded_input) features = features[0].detach().cpu().numpy() features_mean = np.mean(features[0], axis=0) #features_max = np.max(features[0], axis=0) # # Tensorflow # model = TFAutoModel.from_pretrained(MODEL) # encoded_input = tokenizer(text, return_tensors='tf') # features = model(encoded_input) # features = features[0].numpy() # features_mean = np.mean(features[0], axis=0) # #features_max = np.max(features[0], axis=0) ```
4,710
[ [ -0.018218994140625, -0.043792724609375, 0.00983428955078125, 0.0260009765625, -0.0137481689453125, 0.007183074951171875, -0.00818634033203125, -0.0054779052734375, 0.0173187255859375, 0.00252532958984375, -0.038177490234375, -0.046783447265625, -0.056396484375, ...
cardiffnlp/twitter-roberta-base-emoji
2022-11-28T11:27:48.000Z
[ "transformers", "pytorch", "tf", "jax", "roberta", "text-classification", "arxiv:2010.12421", "endpoints_compatible", "region:us" ]
text-classification
cardiffnlp
null
null
cardiffnlp/twitter-roberta-base-emoji
9
684
transformers
2022-03-02T23:29:05
# Twitter-roBERTa-base for Emoji prediction This is a roBERTa-base model trained on ~58M tweets and finetuned for emoji prediction with the TweetEval benchmark. - Paper: [_TweetEval_ benchmark (Findings of EMNLP 2020)](https://arxiv.org/pdf/2010.12421.pdf). - Git Repo: [Tweeteval official repository](https://github.com/cardiffnlp/tweeteval). ## Example of classification ```python from transformers import AutoModelForSequenceClassification from transformers import TFAutoModelForSequenceClassification from transformers import AutoTokenizer import numpy as np from scipy.special import softmax import csv import urllib.request # Preprocess text (username and link placeholders) def preprocess(text): new_text = [] for t in text.split(" "): t = '@user' if t.startswith('@') and len(t) > 1 else t t = 'http' if t.startswith('http') else t new_text.append(t) return " ".join(new_text) # Tasks: # emoji, emotion, hate, irony, offensive, sentiment # stance/abortion, stance/atheism, stance/climate, stance/feminist, stance/hillary task='emoji' MODEL = f"cardiffnlp/twitter-roberta-base-{task}" tokenizer = AutoTokenizer.from_pretrained(MODEL) # download label mapping labels=[] mapping_link = f"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/{task}/mapping.txt" with urllib.request.urlopen(mapping_link) as f: html = f.read().decode('utf-8').split("\n") csvreader = csv.reader(html, delimiter='\t') labels = [row[1] for row in csvreader if len(row) > 1] # PT model = AutoModelForSequenceClassification.from_pretrained(MODEL) model.save_pretrained(MODEL) text = "Looking forward to Christmas" text = preprocess(text) encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) scores = output[0][0].detach().numpy() scores = softmax(scores) # # TF # model = TFAutoModelForSequenceClassification.from_pretrained(MODEL) # model.save_pretrained(MODEL) # text = "Looking forward to Christmas" # text = preprocess(text) # encoded_input = tokenizer(text, return_tensors='tf') # output = model(encoded_input) # scores = output[0][0].numpy() # scores = softmax(scores) ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(scores.shape[0]): l = labels[ranking[i]] s = scores[ranking[i]] print(f"{i+1}) {l} {np.round(float(s), 4)}") ``` Output: ``` 1) 🎄 0.5457 2) 😊 0.1417 3) 😁 0.0649 4) 😍 0.0395 5) ❤️ 0.03 6) 😜 0.028 7) ✨ 0.0263 8) 😉 0.0237 9) 😂 0.0177 10) 😎 0.0166 11) 😘 0.0143 12) 💕 0.014 13) 💙 0.0076 14) 💜 0.0068 15) 🔥 0.0065 16) 💯 0.004 17) 🇺🇸 0.0037 18) 📷 0.0034 19) ☀ 0.0033 20) 📸 0.0021 ```
2,625
[ [ -0.0012273788452148438, -0.05218505859375, 0.010498046875, 0.0202484130859375, -0.01233673095703125, 0.011444091796875, -0.0263214111328125, -0.00196075439453125, 0.022125244140625, 0.008392333984375, -0.02777099609375, -0.058624267578125, -0.054168701171875, ...
superb/hubert-base-superb-er
2021-11-04T16:03:24.000Z
[ "transformers", "pytorch", "hubert", "audio-classification", "speech", "audio", "en", "dataset:superb", "arxiv:2105.01051", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
audio-classification
superb
null
null
superb/hubert-base-superb-er
15
684
transformers
2022-03-02T23:29:05
--- language: en datasets: - superb tags: - speech - audio - hubert - audio-classification license: apache-2.0 widget: - example_title: IEMOCAP clip "happy" src: https://cdn-media.huggingface.co/speech_samples/IEMOCAP_Ses01F_impro03_F013.wav - example_title: IEMOCAP clip "neutral" src: https://cdn-media.huggingface.co/speech_samples/IEMOCAP_Ses01F_impro04_F000.wav --- # Hubert-Base for Emotion Recognition ## Model description This is a ported version of [S3PRL's Hubert for the SUPERB Emotion Recognition task](https://github.com/s3prl/s3prl/tree/master/s3prl/downstream/emotion). The base model is [hubert-base-ls960](https://huggingface.co/facebook/hubert-base-ls960), which is pretrained on 16kHz sampled speech audio. When using the model make sure that your speech input is also sampled at 16Khz. For more information refer to [SUPERB: Speech processing Universal PERformance Benchmark](https://arxiv.org/abs/2105.01051) ## Task and dataset description Emotion Recognition (ER) predicts an emotion class for each utterance. The most widely used ER dataset [IEMOCAP](https://sail.usc.edu/iemocap/) is adopted, and we follow the conventional evaluation protocol: we drop the unbalanced emotion classes to leave the final four classes with a similar amount of data points and cross-validate on five folds of the standard splits. For the original model's training and evaluation instructions refer to the [S3PRL downstream task README](https://github.com/s3prl/s3prl/tree/master/s3prl/downstream#er-emotion-recognition). ## Usage examples You can use the model via the Audio Classification pipeline: ```python from datasets import load_dataset from transformers import pipeline dataset = load_dataset("anton-l/superb_demo", "er", split="session1") classifier = pipeline("audio-classification", model="superb/hubert-base-superb-er") labels = classifier(dataset[0]["file"], top_k=5) ``` Or use the model directly: ```python import torch import librosa from datasets import load_dataset from transformers import HubertForSequenceClassification, Wav2Vec2FeatureExtractor def map_to_array(example): speech, _ = librosa.load(example["file"], sr=16000, mono=True) example["speech"] = speech return example # load a demo dataset and read audio files dataset = load_dataset("anton-l/superb_demo", "er", split="session1") dataset = dataset.map(map_to_array) model = HubertForSequenceClassification.from_pretrained("superb/hubert-base-superb-er") feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-er") # compute attention masks and normalize the waveform if needed inputs = feature_extractor(dataset[:4]["speech"], sampling_rate=16000, padding=True, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) labels = [model.config.id2label[_id] for _id in predicted_ids.tolist()] ``` ## Eval results The evaluation metric is accuracy. | | **s3prl** | **transformers** | |--------|-----------|------------------| |**session1**| `0.6492` | `0.6359` | ### BibTeX entry and citation info ```bibtex @article{yang2021superb, title={SUPERB: Speech processing Universal PERformance Benchmark}, author={Yang, Shu-wen and Chi, Po-Han and Chuang, Yung-Sung and Lai, Cheng-I Jeff and Lakhotia, Kushal and Lin, Yist Y and Liu, Andy T and Shi, Jiatong and Chang, Xuankai and Lin, Guan-Ting and others}, journal={arXiv preprint arXiv:2105.01051}, year={2021} } ```
3,491
[ [ -0.0333251953125, -0.0289154052734375, 0.01861572265625, 0.0195770263671875, -0.006061553955078125, -0.0036563873291015625, -0.019500732421875, -0.03436279296875, 0.004486083984375, 0.0194091796875, -0.0439453125, -0.043121337890625, -0.042144775390625, -0.0...
lambdalabs/sd-naruto-diffusers
2023-05-16T09:23:32.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "en", "dataset:lambdalabs/naruto-blip-captions", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
lambdalabs
null
null
lambdalabs/sd-naruto-diffusers
48
684
diffusers
2022-10-27T21:30:28
--- language: - en thumbnail: "https://staticassetbucket.s3.us-west-1.amazonaws.com/GOT_naruto.png" tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image datasets: - lambdalabs/naruto-blip-captions --- # Naruto diffusion *Stable Diffusion fine tuned on Naruto by [Lambda Labs](https://lambdalabs.com/).* __Try the live [text-to-naruto demo here](https://huggingface.co/spaces/lambdalabs/text-to-naruto)!__ If you want more details on how to train your own Stable Diffusion variants, see this [example](https://github.com/LambdaLabsML/examples/tree/main/stable-diffusion-finetuning). ## About Put in a text prompt and generate your own Naruto style image! **Game of Thrones to Naruto** ![pk0.jpg](https://staticassetbucket.s3.us-west-1.amazonaws.com/GOT_naruto.png) **Marvel to Naruto** ![pk1.jpg](https://staticassetbucket.s3.us-west-1.amazonaws.com/marvel_naruto.png) ## Prompt engineering matters We find that prompt engineering does help produce compelling and consistent Naruto style portraits. For example, writing prompts such as '*person_name* ninja portrait' or '*person_name* in the style of Naruto' tends to produce results that are closer to the style of Naruto character with the characteristic headband and other elements of costume. Here are a few examples of prompts with and without prompt engineering that will illustrate that point. **Bill Gates:** ![pk2.jpg](https://staticassetbucket.s3.us-west-1.amazonaws.com/bill_gates_vanilla.png) > Without prompt engineering ![pk3.jpg](https://staticassetbucket.s3.us-west-1.amazonaws.com/bill_gates_ninja.png) > With prompt engineering **A cute bunny:** ![pk4.jpg](https://staticassetbucket.s3.us-west-1.amazonaws.com/cute_bunny_vanilla.png) > Without prompt engineering ![pk4.jpg](https://staticassetbucket.s3.us-west-1.amazonaws.com/cute_bunny_ninja.png) > With prompt engineering ## Usage To run model locally: ```bash !pip install diffusers==0.3.0 !pip install transformers scipy ftfy ``` ```python import torch from diffusers import StableDiffusionPipeline from torch import autocast pipe = StableDiffusionPipeline.from_pretrained("lambdalabs/sd-naruto-diffusers", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "Yoda" scale = 10 n_samples = 4 # Sometimes the nsfw checker is confused by the Naruto images, you can disable # it at your own risk here disable_safety = False if disable_safety: def null_safety(images, **kwargs): return images, False pipe.safety_checker = null_safety with autocast("cuda"): images = pipe(n_samples*[prompt], guidance_scale=scale).images for idx, im in enumerate(images): im.save(f"{idx:06}.png") ``` ## Model description Trained on [BLIP captioned Naruto images](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) using 2xA6000 GPUs on [Lambda GPU Cloud](https://lambdalabs.com/service/gpu-cloud) for around 30,000 step (about 12 hours, at a cost of about $20). ## Links - [Lambda Diffusers](https://github.com/LambdaLabsML/lambda-diffusers) - [Captioned Naruto dataset](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) - [Model weights in Diffusers format](https://huggingface.co/lambdalabs/sd-naruto-diffusers) - [Original model weights](https://huggingface.co/justinpinkney/pokemon-stable-diffusion) - [Naruto diffusers repo](https://github.com/eolecvk/naruto-sd) Trained by [Eole Cervenka](https://www.linkedin.com/in/eole-cervenka/) after the work of [Justin Pinkney](https://justinpinkney.com) ([@Buntworthy](https://twitter.com/Buntworthy)) at [Lambda Labs](https://lambdalabs.com/).
3,598
[ [ -0.045074462890625, -0.0443115234375, 0.013671875, 0.031585693359375, -0.0076141357421875, 0.01495361328125, -0.00556182861328125, -0.0243072509765625, 0.025848388671875, 0.022857666015625, -0.053680419921875, -0.002197265625, -0.0189208984375, 0.01032257080...
timm/hrnet_w48.ms_in1k
2023-04-24T21:32:23.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:1908.07919", "license:mit", "region:us" ]
image-classification
timm
null
null
timm/hrnet_w48.ms_in1k
0
684
timm
2023-04-24T21:31:09
--- tags: - image-classification - timm library_name: timm license: mit datasets: - imagenet-1k --- # Model card for hrnet_w48.ms_in1k A HRNet image classification model. Trained on ImageNet-1k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 77.5 - GMACs: 17.3 - Activations (M): 28.6 - Image size: 224 x 224 - **Papers:** - Deep High-Resolution Representation Learning for Visual Recognition: https://arxiv.org/abs/1908.07919 - **Original:** https://github.com/HRNet/HRNet-Image-Classification - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('hrnet_w48.ms_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hrnet_w48.ms_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 128, 56, 56]) # torch.Size([1, 256, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hrnet_w48.ms_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{WangSCJDZLMTWLX19, title={Deep High-Resolution Representation Learning for Visual Recognition}, author={Jingdong Wang and Ke Sun and Tianheng Cheng and Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, journal = {TPAMI} year={2019} } ```
3,756
[ [ -0.0364990234375, -0.0299835205078125, -0.00077056884765625, 0.01140594482421875, -0.0265960693359375, -0.0283355712890625, -0.0197296142578125, -0.0279388427734375, 0.016448974609375, 0.03314208984375, -0.032470703125, -0.05816650390625, -0.048797607421875, ...
Infernaught/test_adapter_weights
2023-08-30T23:35:23.000Z
[ "peft", "region:us" ]
null
Infernaught
null
null
Infernaught/test_adapter_weights
0
684
peft
2023-08-30T23:34:56
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: float16 The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.4.0 - PEFT 0.4.0
793
[ [ -0.048736572265625, -0.049041748046875, 0.0238800048828125, 0.03515625, -0.04180908203125, 0.00396728515625, 0.00658416748046875, -0.0211334228515625, -0.00955963134765625, 0.026214599609375, -0.047821044921875, -0.014129638671875, -0.03729248046875, 0.01445...
Helsinki-NLP/opus-mt-yo-en
2023-08-16T12:09:00.000Z
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "yo", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
Helsinki-NLP
null
null
Helsinki-NLP/opus-mt-yo-en
0
683
transformers
2022-03-02T23:29:04
--- tags: - translation license: apache-2.0 --- ### opus-mt-yo-en * source languages: yo * target languages: en * OPUS readme: [yo-en](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/yo-en/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-16.zip](https://object.pouta.csc.fi/OPUS-MT-models/yo-en/opus-2020-01-16.zip) * test set translations: [opus-2020-01-16.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/yo-en/opus-2020-01-16.test.txt) * test set scores: [opus-2020-01-16.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/yo-en/opus-2020-01-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.yo.en | 33.8 | 0.496 |
816
[ [ -0.0183258056640625, -0.038970947265625, 0.0195465087890625, 0.01849365234375, -0.0279388427734375, -0.0313720703125, -0.030426025390625, -0.00934600830078125, 0.01107025146484375, 0.035888671875, -0.059356689453125, -0.03839111328125, -0.04046630859375, 0.0...
monologg/koelectra-small-discriminator
2020-12-26T16:23:23.000Z
[ "transformers", "pytorch", "electra", "pretraining", "ko", "endpoints_compatible", "region:us" ]
null
monologg
null
null
monologg/koelectra-small-discriminator
0
683
transformers
2022-03-02T23:29:05
--- language: ko --- # KoELECTRA (Small Discriminator) Pretrained ELECTRA Language Model for Korean (`koelectra-small-discriminator`) For more detail, please see [original repository](https://github.com/monologg/KoELECTRA/blob/master/README_EN.md). ## Usage ### Load model and tokenizer ```python >>> from transformers import ElectraModel, ElectraTokenizer >>> model = ElectraModel.from_pretrained("monologg/koelectra-small-discriminator") >>> tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-discriminator") ``` ### Tokenizer example ```python >>> from transformers import ElectraTokenizer >>> tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-discriminator") >>> tokenizer.tokenize("[CLS] 한국어 ELECTRA를 공유합니다. [SEP]") ['[CLS]', '한국어', 'E', '##L', '##EC', '##T', '##RA', '##를', '공유', '##합니다', '.', '[SEP]'] >>> tokenizer.convert_tokens_to_ids(['[CLS]', '한국어', 'E', '##L', '##EC', '##T', '##RA', '##를', '공유', '##합니다', '.', '[SEP]']) [2, 18429, 41, 6240, 15229, 6204, 20894, 5689, 12622, 10690, 18, 3] ``` ## Example using ElectraForPreTraining ```python import torch from transformers import ElectraForPreTraining, ElectraTokenizer discriminator = ElectraForPreTraining.from_pretrained("monologg/koelectra-small-discriminator") tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-discriminator") sentence = "나는 방금 밥을 먹었다." fake_sentence = "나는 내일 밥을 먹었다." fake_tokens = tokenizer.tokenize(fake_sentence) fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt") discriminator_outputs = discriminator(fake_inputs) predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2) print(list(zip(fake_tokens, predictions.tolist()[1:-1]))) ```
1,734
[ [ -0.0186767578125, -0.0248870849609375, 0.0048980712890625, 0.0211181640625, -0.046844482421875, 0.01641845703125, -0.0084075927734375, 0.009063720703125, 0.024688720703125, 0.034088134765625, -0.031646728515625, -0.0404052734375, -0.039642333984375, 0.029129...
baffo32/decapoda-research-llama-7B-hf
2023-04-10T18:22:05.000Z
[ "transformers", "pytorch", "llama", "text-generation", "license:other", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
baffo32
null
null
baffo32/decapoda-research-llama-7B-hf
2
683
transformers
2023-04-10T12:49:58
--- license: other --- LLaMA-7B converted to work with Transformers/HuggingFace. This is under a special license, please see the LICENSE file for details. -- license: other --- # LLaMA Model Card ## Model details **Organization developing the model** The FAIR team of Meta AI. **Model date** LLaMA was trained between December. 2022 and Feb. 2023. **Model version** This is version 1 of the model. **Model type** LLaMA is an auto-regressive language model, based on the transformer architecture. The model comes in different sizes: 7B, 13B, 33B and 65B parameters. **Paper or resources for more information** More information can be found in the paper “LLaMA, Open and Efficient Foundation Language Models”, available at https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/. **Citations details** https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/ **License** Non-commercial bespoke license **Where to send questions or comments about the model** Questions and comments about LLaMA can be sent via the [GitHub repository](https://github.com/facebookresearch/llama) of the project , by opening an issue. ## Intended use **Primary intended uses** The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations. **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** LLaMA is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Factors **Relevant factors** One of the most relevant factors for which model performance may vary is which language is used. Although we included 20 languages in the training data, most of our dataset is made of English text, and we thus expect the model to perform better for English than other languages. Relatedly, it has been shown in previous studies that performance might vary for different dialects, and we expect that it will be the case for our model. **Evaluation factors** As our model is trained on data from the Web, we expect that it reflects biases from this source. We thus evaluated on RAI datasets to measure biases exhibited by the model for gender, religion, race, sexual orientation, age, nationality, disability, physical appearance and socio-economic status. We also measure the toxicity of model generations, depending on the toxicity of the context used to prompt the model. ## Metrics **Model performance measures** We use the following measure to evaluate the model: - Accuracy for common sense reasoning, reading comprehension, natural language understanding (MMLU), BIG-bench hard, WinoGender and CrowS-Pairs, - Exact match for question answering, - The toxicity score from Perspective API on RealToxicityPrompts. **Decision thresholds** Not applicable. **Approaches to uncertainty and variability** Due to the high computational requirements of training LLMs, we trained only one model of each size, and thus could not evaluate variability of pre-training. ## Evaluation datasets The model was evaluated on the following benchmarks: BoolQ, PIQA, SIQA, HellaSwag, WinoGrande, ARC, OpenBookQA, NaturalQuestions, TriviaQA, RACE, MMLU, BIG-bench hard, GSM8k, RealToxicityPrompts, WinoGender, CrowS-Pairs. ## Training dataset The model was trained using the following source of data: CCNet [67%], C4 [15%], GitHub [4.5%], Wikipedia [4.5%], Books [4.5%], ArXiv [2.5%], Stack Exchange[2%]. The Wikipedia and Books domains include data in the following languages: bg, ca, cs, da, de, en, es, fr, hr, hu, it, nl, pl, pt, ro, ru, sl, sr, sv, uk. See the paper for more details about the training set and corresponding preprocessing. ## Quantitative analysis Hyperparameters for the model architecture <table> <thead> <tr> <th >LLaMA</th> <th colspan=6>Model hyper parameters </th> </tr> <tr> <th>Number of parameters</th><th>dimension</th><th>n heads</th><th>n layers</th><th>Learn rate</th><th>Batch size</th><th>n tokens</th> </tr> </thead> <tbody> <tr> <th>7B</th> <th>4096</th> <th>32</th> <th>32</th> <th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>13B</th><th>5120</th><th>40</th><th>40</th><th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>33B</th><th>6656</th><th>52</th><th>60</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> <tr> <th>65B</th><th>8192</th><th>64</th><th>80</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> </tbody> </table> *Table 1 - Summary of LLama Model Hyperparameters* We present our results on eight standard common sense reasoning benchmarks in the table below. <table> <thead> <tr> <th>LLaMA</th> <th colspan=9>Reasoning tasks </th> </tr> <tr> <th>Number of parameters</th> <th>BoolQ</th><th>PIQA</th><th>SIQA</th><th>HellaSwag</th><th>WinoGrande</th><th>ARC-e</th><th>ARC-c</th><th>OBQA</th><th>COPA</th> </tr> </thead> <tbody> <tr> <th>7B</th><th>76.5</th><th>79.8</th><th>48.9</th><th>76.1</th><th>70.1</th><th>76.7</th><th>47.6</th><th>57.2</th><th>93 </th> <tr><th>13B</th><th>78.1</th><th>80.1</th><th>50.4</th><th>79.2</th><th>73</th><th>78.1</th><th>52.7</th><th>56.4</th><th>94 </th> <tr><th>33B</th><th>83.1</th><th>82.3</th><th>50.4</th><th>82.8</th><th>76</th><th>81.4</th><th>57.8</th><th>58.6</th><th>92 </th> <tr><th>65B</th><th>85.3</th><th>82.8</th><th>52.3</th><th>84.2</th><th>77</th><th>81.5</th><th>56</th><th>60.2</th><th>94</th></tr> </tbody> </table> *Table 2 - Summary of LLama Model Performance on Reasoning tasks* We present our results on bias in the table below. Note that lower value is better indicating lower bias. | No | Category | FAIR LLM | | --- | -------------------- | -------- | | 1 | Gender | 70.6 | | 2 | Religion | 79 | | 3 | Race/Color | 57 | | 4 | Sexual orientation | 81 | | 5 | Age | 70.1 | | 6 | Nationality | 64.2 | | 7 | Disability | 66.7 | | 8 | Physical appearance | 77.8 | | 9 | Socioeconomic status | 71.5 | | | LLaMA Average | 66.6 | *Table 3 - Summary bias of our model output* ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Mitigations** We filtered the data from the Web based on its proximity to Wikipedia text and references. For this, we used a Kneser-Ney language model and a fastText linear classifier. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** LLaMA is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
8,308
[ [ -0.0296630859375, -0.054443359375, 0.033111572265625, 0.021148681640625, -0.017608642578125, -0.018524169921875, 0.001064300537109375, -0.04913330078125, 0.00511932373046875, 0.03240966796875, -0.034576416015625, -0.042938232421875, -0.054107666015625, 0.015...
abhinavkulkarni/tiiuae-falcon-40b-instruct-w4-g128-awq
2023-09-12T13:09:19.000Z
[ "transformers", "pytorch", "RefinedWeb", "text-generation", "AWQ", "custom_code", "license:apache-2.0", "text-generation-inference", "region:us" ]
text-generation
abhinavkulkarni
null
null
abhinavkulkarni/tiiuae-falcon-40b-instruct-w4-g128-awq
1
683
transformers
2023-07-13T11:41:06
--- license: apache-2.0 tags: - AWQ inference: false --- # Falcon-40b-Instruct (4-bit 128g AWQ Quantized) [Falcon-40b-Instruct](https://huggingface.co/tiiuae/falcon-40b-instruct) is a 40B parameters causal decoder-only model built by [TII](https://www.tii.ae) based on [Falcon-40B](https://huggingface.co/tiiuae/falcon-7b) and finetuned on a mixture of chat/instruct datasets. This model is a 4-bit 128 group size AWQ quantized model. For more information about AWQ quantization, please click [here](https://github.com/mit-han-lab/llm-awq). ## Model Date July 5, 2023 ## Model License Please refer to original Falcon model license ([link](https://huggingface.co/tiiuae/falcon-40b-instruct)). Please refer to the AWQ quantization license ([link](https://github.com/llm-awq/blob/main/LICENSE)). ## CUDA Version This model was successfully tested on CUDA driver v530.30.02 and runtime v11.7 with Python v3.10.11. Please note that AWQ requires NVIDIA GPUs with compute capability of `8.0` or higher. ## How to Use ```bash git clone https://github.com/mit-han-lab/llm-awq \ && cd llm-awq \ && git checkout f084f40bd996f3cf3a0633c1ad7d9d476c318aaa \ && pip install -e . \ && cd awq/kernels \ && python setup.py install ``` ```python import time import torch from awq.quantize.quantizer import real_quantize_model_weight from awq.utils.utils import simple_dispatch_model from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer, TextStreamer, TextStreamer from accelerate import init_empty_weights, load_checkpoint_and_dispatch from huggingface_hub import snapshot_download model_name = "abhinavkulkarni/tiiuae-falcon-40b-instruct-w4-g128-awq" # Config config = AutoConfig.from_pretrained(model_name, trust_remote_code=True) # Tokenizer try: tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name, trust_remote_code=True) except: tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, trust_remote_code=True) streamer = TextStreamer(tokenizer, skip_special_tokens=True) streamer = TextStreamer(tokenizer, skip_special_tokens=True) # Model w_bit = 4 q_config = { "zero_point": True, "q_group_size": 128, } # Initialize empty model with init_empty_weights(): model = AutoModelForCausalLM.from_config(config=config, torch_dtype=torch.float16, trust_remote_code=True) real_quantize_model_weight(model, w_bit=w_bit, q_config=q_config, init_only=True) model.tie_weights() model.tie_weights() # Infer device_map device_map = infer_auto_device_map( model, no_split_module_classes=[ "OPTDecoderLayer", "LlamaDecoderLayer", "BloomBlock", "MPTBlock", "DecoderLayer"] ) # Load weights load_checkpoint_in_model( model, checkpoint=snapshot_download(model_name), device_map=device_map, offload_state_dict=True, ) model = simple_dispatch_model(model, device_map=device_map) # Inference prompt = f'''What is the difference between nuclear fusion and fission? ###Response:''' input_ids = tokenizer(prompt, return_tensors='pt').input_ids.cuda() output = model.generate( inputs=input_ids, temperature=0.7, max_new_tokens=512, top_p=0.15, top_k=0, repetition_penalty=1.1, eos_token_id=tokenizer.eos_token_id, streamer=streamer, streamer=streamer, ) ``` ## Evaluation This evaluation was done using [LM-Eval](https://github.com/EleutherAI/lm-evaluation-harness). [Falcon-40b-Instruct](https://huggingface.co/tiiuae/falcon-40b-instruct) | Task |Version| Metric |Value | |Stderr| |--------|------:|---------------|-----:|---|------| |wikitext| 1|word_perplexity|8.8219| | | | | |byte_perplexity|1.5025| | | | | |bits_per_byte |0.5874| | | [Falcon-40b-Instruct (4-bit 128-group AWQ)](https://huggingface.co/abhinavkulkarni/tiiuae-falcon-40b-instruct-w4-g128-awq) | Task |Version| Metric |Value | |Stderr| |--------|------:|---------------|-----:|---|------| |wikitext| 1|word_perplexity|8.9237| | | | | |byte_perplexity|1.5058| | | | | |bits_per_byte |0.5905| | | ## Acknowledgements *Paper coming soon* 😊. In the meanwhile, you can use the following information to cite: ``` @article{falcon40b, title={{Falcon-40B}: an open large language model with state-of-the-art performance}, author={Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme}, year={2023} } ``` The model was quantized with AWQ technique. If you find AWQ useful or relevant to your research, please kindly cite the paper: ``` @article{lin2023awq, title={AWQ: Activation-aware Weight Quantization for LLM Compression and Acceleration}, author={Lin, Ji and Tang, Jiaming and Tang, Haotian and Yang, Shang and Dang, Xingyu and Han, Song}, journal={arXiv}, year={2023} } ```
5,089
[ [ -0.0345458984375, -0.049285888671875, 0.0259246826171875, 0.018768310546875, -0.01352691650390625, 0.0036602020263671875, 0.00521087646484375, -0.0275726318359375, 0.0014219284057617188, 0.0005183219909667969, -0.041259765625, -0.0248260498046875, -0.04046630859...
cardiffnlp/twitter-roberta-base-dec2020
2022-10-10T18:27:25.000Z
[ "transformers", "pytorch", "roberta", "fill-mask", "timelms", "twitter", "en", "dataset:twitter-api", "arxiv:2202.03829", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
cardiffnlp
null
null
cardiffnlp/twitter-roberta-base-dec2020
0
682
transformers
2022-03-02T23:29:05
--- language: en tags: - timelms - twitter license: mit datasets: - twitter-api --- # Twitter December 2020 (RoBERTa-base, 107M) This is a RoBERTa-base model trained on 107.06M tweets until the end of December 2020. More details and performance scores are available in the [TimeLMs paper](https://arxiv.org/abs/2202.03829). Below, we provide some usage examples using the standard Transformers interface. For another interface more suited to comparing predictions and perplexity scores between models trained at different temporal intervals, check the [TimeLMs repository](https://github.com/cardiffnlp/timelms). For other models trained until different periods, check this [table](https://github.com/cardiffnlp/timelms#released-models). ## Preprocess Text Replace usernames and links for placeholders: "@user" and "http". If you're interested in retaining verified users which were also retained during training, you may keep the users listed [here](https://github.com/cardiffnlp/timelms/tree/main/data). ```python def preprocess(text): preprocessed_text = [] for t in text.split(): if len(t) > 1: t = '@user' if t[0] == '@' and t.count('@') == 1 else t t = 'http' if t.startswith('http') else t preprocessed_text.append(t) return ' '.join(preprocessed_text) ``` ## Example Masked Language Model ```python from transformers import pipeline, AutoTokenizer MODEL = "cardiffnlp/twitter-roberta-base-dec2020" fill_mask = pipeline("fill-mask", model=MODEL, tokenizer=MODEL) tokenizer = AutoTokenizer.from_pretrained(MODEL) def pprint(candidates, n): for i in range(n): token = tokenizer.decode(candidates[i]['token']) score = candidates[i]['score'] print("%d) %.5f %s" % (i+1, score, token)) texts = [ "So glad I'm <mask> vaccinated.", "I keep forgetting to bring a <mask>.", "Looking forward to watching <mask> Game tonight!", ] for text in texts: t = preprocess(text) print(f"{'-'*30}\n{t}") candidates = fill_mask(t) pprint(candidates, 5) ``` Output: ``` ------------------------------ So glad I'm <mask> vaccinated. 1) 0.42239 not 2) 0.23834 getting 3) 0.10684 fully 4) 0.07550 being 5) 0.02097 already ------------------------------ I keep forgetting to bring a <mask>. 1) 0.08145 mask 2) 0.05051 laptop 3) 0.04620 book 4) 0.03910 bag 5) 0.03824 blanket ------------------------------ Looking forward to watching <mask> Game tonight! 1) 0.57602 the 2) 0.25120 The 3) 0.02610 End 4) 0.02324 this 5) 0.00690 This ``` ## Example Tweet Embeddings ```python from transformers import AutoTokenizer, AutoModel, TFAutoModel import numpy as np from scipy.spatial.distance import cosine from collections import Counter def get_embedding(text): # naive approach for demonstration text = preprocess(text) encoded_input = tokenizer(text, return_tensors='pt') features = model(**encoded_input) features = features[0].detach().cpu().numpy() return np.mean(features[0], axis=0) MODEL = "cardiffnlp/twitter-roberta-base-dec2020" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModel.from_pretrained(MODEL) query = "The book was awesome" tweets = ["I just ordered fried chicken 🐣", "The movie was great", "What time is the next game?", "Just finished reading 'Embeddings in NLP'"] sims = Counter() for tweet in tweets: sim = 1 - cosine(get_embedding(query), get_embedding(tweet)) sims[tweet] = sim print('Most similar to: ', query) print(f"{'-'*30}") for idx, (tweet, sim) in enumerate(sims.most_common()): print("%d) %.5f %s" % (idx+1, sim, tweet)) ``` Output: ``` Most similar to: The book was awesome ------------------------------ 1) 0.99084 The movie was great 2) 0.96618 Just finished reading 'Embeddings in NLP' 3) 0.96127 I just ordered fried chicken 🐣 4) 0.95315 What time is the next game? ``` ## Example Feature Extraction ```python from transformers import AutoTokenizer, AutoModel, TFAutoModel import numpy as np MODEL = "cardiffnlp/twitter-roberta-base-dec2020" tokenizer = AutoTokenizer.from_pretrained(MODEL) text = "Good night 😊" text = preprocess(text) # Pytorch model = AutoModel.from_pretrained(MODEL) encoded_input = tokenizer(text, return_tensors='pt') features = model(**encoded_input) features = features[0].detach().cpu().numpy() features_mean = np.mean(features[0], axis=0) #features_max = np.max(features[0], axis=0) # # Tensorflow # model = TFAutoModel.from_pretrained(MODEL) # encoded_input = tokenizer(text, return_tensors='tf') # features = model(encoded_input) # features = features[0].numpy() # features_mean = np.mean(features[0], axis=0) # #features_max = np.max(features[0], axis=0) ```
4,732
[ [ -0.01861572265625, -0.041839599609375, 0.0101318359375, 0.024261474609375, -0.0173187255859375, 0.0073089599609375, -0.008453369140625, -0.006999969482421875, 0.017181396484375, -0.0010738372802734375, -0.0382080078125, -0.046295166015625, -0.05718994140625, ...
dandelin/vilt-b32-mlm-itm
2021-11-27T10:13:10.000Z
[ "transformers", "pytorch", "vilt", "arxiv:2102.03334", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
dandelin
null
null
dandelin/vilt-b32-mlm-itm
2
681
transformers
2022-03-02T23:29:05
--- license: apache-2.0 tags: --- # Vision-and-Language Transformer (ViLT), pre-trained only Vision-and-Language Transformer (ViLT) model pre-trained on GCC+SBU+COCO+VG (200k steps). It was introduced in the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Kim et al. and first released in [this repository](https://github.com/dandelin/ViLT). Disclaimer: The team releasing ViLT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description (to do) ## Intended uses & limitations You can use the raw model for visual question answering. ### How to use (to do) ## Training data (to do) ## Training procedure ### Preprocessing (to do) ### Pretraining (to do) ## Evaluation results (to do) ### BibTeX entry and citation info ```bibtex @misc{kim2021vilt, title={ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision}, author={Wonjae Kim and Bokyung Son and Ildoo Kim}, year={2021}, eprint={2102.03334}, archivePrefix={arXiv}, primaryClass={stat.ML} } ```
1,174
[ [ -0.0498046875, -0.056610107421875, 0.016937255859375, 0.00661468505859375, -0.036285400390625, -0.00402069091796875, -0.012115478515625, -0.030853271484375, -0.001983642578125, 0.050262451171875, -0.0516357421875, -0.0289459228515625, -0.048248291015625, -0....
timm/vit_large_patch16_224.mae
2023-05-09T20:33:10.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "arxiv:2111.06377", "arxiv:2010.11929", "license:cc-by-nc-4.0", "region:us" ]
image-classification
timm
null
null
timm/vit_large_patch16_224.mae
0
681
timm
2023-05-09T20:29:15
--- tags: - image-classification - timm library_name: timm license: cc-by-nc-4.0 --- # Model card for vit_large_patch16_224.mae A Vision Transformer (ViT) image feature model. Pretrained on ImageNet-1k with Self-Supervised Masked Autoencoder (MAE) method. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 303.3 - GMACs: 61.6 - Activations (M): 63.5 - Image size: 224 x 224 - **Papers:** - Masked Autoencoders Are Scalable Vision Learners: https://arxiv.org/abs/2111.06377 - An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale: https://arxiv.org/abs/2010.11929v2 - **Pretrain Dataset:** ImageNet-1k - **Original:** https://github.com/facebookresearch/mae ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vit_large_patch16_224.mae', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vit_large_patch16_224.mae', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 197, 1024) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @Article{MaskedAutoencoders2021, author = {Kaiming He and Xinlei Chen and Saining Xie and Yanghao Li and Piotr Doll{'a}r and Ross Girshick}, journal = {arXiv:2111.06377}, title = {Masked Autoencoders Are Scalable Vision Learners}, year = {2021}, } ``` ```bibtex @article{dosovitskiy2020vit, title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}, journal={ICLR}, year={2021} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
3,597
[ [ -0.038970947265625, -0.0281829833984375, 0.004566192626953125, 0.0161590576171875, -0.021392822265625, -0.020660400390625, -0.01535797119140625, -0.033721923828125, 0.0277557373046875, 0.0298614501953125, -0.03741455078125, -0.04144287109375, -0.05902099609375, ...
Infernaught/test_ap_weights
2023-09-06T23:30:16.000Z
[ "peft", "region:us" ]
null
Infernaught
null
null
Infernaught/test_ap_weights
0
681
peft
2023-09-06T23:29:54
--- library_name: peft --- ## Training procedure ### Framework versions - PEFT 0.5.0 - PEFT 0.5.0
101
[ [ -0.019500732421875, -0.006832122802734375, 0.02142333984375, 0.064697265625, -0.01194000244140625, -0.000026166439056396484, 0.039276123046875, -0.00853729248046875, 0.0196990966796875, 0.0562744140625, -0.04290771484375, -0.0174713134765625, -0.035552978515625,...
NickKolok/milly-thompson-20230428-16-adm-5000-steps
2023-04-28T19:51:35.000Z
[ "diffusers", "text-to-image", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
NickKolok
null
null
NickKolok/milly-thompson-20230428-16-adm-5000-steps
0
680
diffusers
2023-04-28T15:29:56
--- license: creativeml-openrail-m tags: - text-to-image --- ### milly-thompson-20230428-16-adm-5000-steps on Stable Diffusion via Dreambooth #### model by NickKolok This your the Stable Diffusion model fine-tuned the milly-thompson-20230428-16-adm-5000-steps concept taught to Stable Diffusion with Dreambooth. #It can be used by modifying the `instance_prompt`: **millythompsontrigun** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts)
937
[ [ -0.0263824462890625, -0.06854248046875, 0.042877197265625, 0.0019779205322265625, -0.00824737548828125, 0.01100921630859375, 0.0271453857421875, -0.01036834716796875, 0.0357666015625, 0.0242156982421875, -0.03009033203125, -0.031951904296875, -0.0445556640625, ...
ydshieh/wav2vec2-large-xlsr-53-chinese-zh-cn-gpt
2021-04-01T14:09:29.000Z
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "zh", "dataset:common_voice", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
ydshieh
null
null
ydshieh/wav2vec2-large-xlsr-53-chinese-zh-cn-gpt
29
679
transformers
2022-03-02T23:29:05
--- language: zh datasets: - common_voice metrics: - cer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Large 53 - Chinese (zh-CN), by Yih-Dar SHIEH results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice zh-CN type: common_voice args: zh-CN metrics: - name: Test CER type: cer value: 20.90 --- # Wav2Vec2-Large-XLSR-53-Chinese-zh-cn-gpt Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Chinese (zh-CN) using the [Common Voice](https://huggingface.co/datasets/common_voice), included [Common Voice](https://huggingface.co/datasets/common_voice) Chinese (zh-TW) dataset (converting the label text to simplified Chinese). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "zh-CN", split="test") processor = Wav2Vec2Processor.from_pretrained("ydshieh/wav2vec2-large-xlsr-53-chinese-zh-cn-gpt") model = Wav2Vec2ForCTC.from_pretrained("ydshieh/wav2vec2-large-xlsr-53-chinese-zh-cn-gpt") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset[:2]["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset[:2]["sentence"]) ``` ## Evaluation The model can be evaluated as follows on the zh-CN test data of Common Voice. Original CER calculation refer to https://huggingface.co/ctl/wav2vec2-large-xlsr-cantonese ```python #!pip install datasets==1.4.1 #!pip install transformers==4.4.0 #!pip install torchaudio #!pip install jiwer import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re import jiwer def chunked_cer(targets, predictions, chunk_size=None): _predictions = [char for seq in predictions for char in list(seq)] _targets = [char for seq in targets for char in list(seq)] if chunk_size is None: return jiwer.wer(_targets, _predictions) start = 0 end = chunk_size H, S, D, I = 0, 0, 0, 0 while start < len(targets): _predictions = [char for seq in predictions[start:end] for char in list(seq)] _targets = [char for seq in targets[start:end] for char in list(seq)] chunk_metrics = jiwer.compute_measures(_targets, _predictions) H = H + chunk_metrics["hits"] S = S + chunk_metrics["substitutions"] D = D + chunk_metrics["deletions"] I = I + chunk_metrics["insertions"] start += chunk_size end += chunk_size return float(S + D + I) / float(H + S + D) test_dataset = load_dataset("common_voice", "zh-CN", split="test") processor = Wav2Vec2Processor.from_pretrained("ydshieh/wav2vec2-large-xlsr-53-chinese-zh-cn-gpt") model = Wav2Vec2ForCTC.from_pretrained("ydshieh/wav2vec2-large-xlsr-53-chinese-zh-cn-gpt") model.to("cuda") chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\�\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\⋯\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\–\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\。\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\》\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\)\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\~\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\~\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\…\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\︰\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\(\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\」\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\‧\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\《\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\﹔\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\、\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\—\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\「\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\﹖\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\·\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\×\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\̃\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\̌\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ε\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\λ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\μ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\и\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\т\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\─\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\□\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\〈\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\〉\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\『\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\』\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ア\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\オ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\カ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\チ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ド\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ベ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ャ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ヤ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ン\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\・\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\丶\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\a\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\b\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\f\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\g\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\i\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\n\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\p\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\t' + "\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\']" resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") + " " speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("CER: {:2f}".format(100 * chunked_cer(predictions=result["pred_strings"], targets=result["sentence"], chunk_size=1000))) ``` **Test Result**: 20.902244 % ## Training The Common Voice zh-CN `train`, `validation` were used for training, as well as Common Voice zh-TW `train`, `validation` and `test` datasets. The script used for training can be found [to be uploaded later](...)
15,050
[ [ -0.00824737548828125, -0.050140380859375, 0.0135498046875, 0.0006852149963378906, -0.0021305084228515625, 0.0078125, 0.03338623046875, -0.017547607421875, 0.032989501953125, 0.045684814453125, -0.0631103515625, 0.00029754638671875, -0.065185546875, 0.0038757...
dallinmackay/JWST-Deep-Space-diffusion
2023-05-16T09:26:16.000Z
[ "diffusers", "stable-diffusion", "text-to-image", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
dallinmackay
null
null
dallinmackay/JWST-Deep-Space-diffusion
147
679
diffusers
2022-11-09T01:15:48
--- license: creativeml-openrail-m thumbnail: "https://huggingface.co/dallinmackay/JWST-Deep-Space-diffusion/resolve/main/previewJWST.jpg" tags: - stable-diffusion - text-to-image --- ### JWST Deep Space Diffusion This is a fine-tuned Stable Diffusion model (based on v1.5) trained on images taken by the **_James Webb Space Telescope_**, as well as Judy Schmidt. Use the token **_JWST_** in your prompts to use the style (e.g., "jwst, green spiral galaxy"). [CKPT download link](https://huggingface.co/dallinmackay/JWST-Deep-Space-diffusion/resolve/main/JWST-Deep-Space.ckpt) **Images rendered with this model:** _prompt and settings used: **"JWST"** | **Steps: 25, Sampler: Euler_a, CFG scale: 7**_ ![Image Samples](https://huggingface.co/dallinmackay/JWST-Deep-Space-diffusion/resolve/main/previewJWST.jpg) -- [![Become A Patreon](https://badgen.net/badge/become/a%20patron/F96854)](https://www.patreon.com/dallinmackay) -- This model was trained with Dreambooth, using TheLastBen colab notebook -- ### 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). ## License This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
2,278
[ [ -0.03125, -0.056793212890625, 0.044769287109375, 0.0276641845703125, -0.01788330078125, -0.01386260986328125, -0.0014352798461914062, -0.0186920166015625, 0.01294708251953125, 0.03564453125, -0.046478271484375, -0.0352783203125, -0.0535888671875, -0.02651977...
timm/regnetx_008.tv2_in1k
2023-03-21T06:31:41.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "arxiv:2003.13678", "license:bsd-3-clause", "region:us" ]
image-classification
timm
null
null
timm/regnetx_008.tv2_in1k
0
679
timm
2023-03-21T06:31:35
--- tags: - image-classification - timm library_tag: timm license: bsd-3-clause --- # Model card for regnetx_008.tv2_in1k A RegNetX-800MF image classification model. Pretrained on ImageNet-1k by torchvision contributors (see ImageNet1K-V2 weight details https://github.com/pytorch/vision/issues/3995#new-recipe). The `timm` RegNet implementation includes a number of enhancements not present in other implementations, including: * stochastic depth * gradient checkpointing * layer-wise LR decay * configurable output stride (dilation) * configurable activation and norm layers * option for a pre-activation bottleneck block used in RegNetV variant * only known RegNetZ model definitions with pretrained weights ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 7.3 - GMACs: 0.8 - Activations (M): 5.1 - Image size: 224 x 224 - **Papers:** - Designing Network Design Spaces: https://arxiv.org/abs/2003.13678 - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('regnetx_008.tv2_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'regnetx_008.tv2_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 32, 112, 112]) # torch.Size([1, 64, 56, 56]) # torch.Size([1, 128, 28, 28]) # torch.Size([1, 288, 14, 14]) # torch.Size([1, 672, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'regnetx_008.tv2_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 672, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). For the comparison summary below, the ra_in1k, ra3_in1k, ch_in1k, sw_*, and lion_* tagged weights are trained in `timm`. |model |img_size|top1 |top5 |param_count|gmacs|macts | |-------------------------|--------|------|------|-----------|-----|------| |[regnety_1280.swag_ft_in1k](https://huggingface.co/timm/regnety_1280.swag_ft_in1k)|384 |88.228|98.684|644.81 |374.99|210.2 | |[regnety_320.swag_ft_in1k](https://huggingface.co/timm/regnety_320.swag_ft_in1k)|384 |86.84 |98.364|145.05 |95.0 |88.87 | |[regnety_160.swag_ft_in1k](https://huggingface.co/timm/regnety_160.swag_ft_in1k)|384 |86.024|98.05 |83.59 |46.87|67.67 | |[regnety_160.sw_in12k_ft_in1k](https://huggingface.co/timm/regnety_160.sw_in12k_ft_in1k)|288 |86.004|97.83 |83.59 |26.37|38.07 | |[regnety_1280.swag_lc_in1k](https://huggingface.co/timm/regnety_1280.swag_lc_in1k)|224 |85.996|97.848|644.81 |127.66|71.58 | |[regnety_160.lion_in12k_ft_in1k](https://huggingface.co/timm/regnety_160.lion_in12k_ft_in1k)|288 |85.982|97.844|83.59 |26.37|38.07 | |[regnety_160.sw_in12k_ft_in1k](https://huggingface.co/timm/regnety_160.sw_in12k_ft_in1k)|224 |85.574|97.666|83.59 |15.96|23.04 | |[regnety_160.lion_in12k_ft_in1k](https://huggingface.co/timm/regnety_160.lion_in12k_ft_in1k)|224 |85.564|97.674|83.59 |15.96|23.04 | |[regnety_120.sw_in12k_ft_in1k](https://huggingface.co/timm/regnety_120.sw_in12k_ft_in1k)|288 |85.398|97.584|51.82 |20.06|35.34 | |[regnety_2560.seer_ft_in1k](https://huggingface.co/timm/regnety_2560.seer_ft_in1k)|384 |85.15 |97.436|1282.6 |747.83|296.49| |[regnetz_e8.ra3_in1k](https://huggingface.co/timm/regnetz_e8.ra3_in1k)|320 |85.036|97.268|57.7 |15.46|63.94 | |[regnety_120.sw_in12k_ft_in1k](https://huggingface.co/timm/regnety_120.sw_in12k_ft_in1k)|224 |84.976|97.416|51.82 |12.14|21.38 | |[regnety_320.swag_lc_in1k](https://huggingface.co/timm/regnety_320.swag_lc_in1k)|224 |84.56 |97.446|145.05 |32.34|30.26 | |[regnetz_040_h.ra3_in1k](https://huggingface.co/timm/regnetz_040_h.ra3_in1k)|320 |84.496|97.004|28.94 |6.43 |37.94 | |[regnetz_e8.ra3_in1k](https://huggingface.co/timm/regnetz_e8.ra3_in1k)|256 |84.436|97.02 |57.7 |9.91 |40.94 | |[regnety_1280.seer_ft_in1k](https://huggingface.co/timm/regnety_1280.seer_ft_in1k)|384 |84.432|97.092|644.81 |374.99|210.2 | |[regnetz_040.ra3_in1k](https://huggingface.co/timm/regnetz_040.ra3_in1k)|320 |84.246|96.93 |27.12 |6.35 |37.78 | |[regnetz_d8.ra3_in1k](https://huggingface.co/timm/regnetz_d8.ra3_in1k)|320 |84.054|96.992|23.37 |6.19 |37.08 | |[regnetz_d8_evos.ch_in1k](https://huggingface.co/timm/regnetz_d8_evos.ch_in1k)|320 |84.038|96.992|23.46 |7.03 |38.92 | |[regnetz_d32.ra3_in1k](https://huggingface.co/timm/regnetz_d32.ra3_in1k)|320 |84.022|96.866|27.58 |9.33 |37.08 | |[regnety_080.ra3_in1k](https://huggingface.co/timm/regnety_080.ra3_in1k)|288 |83.932|96.888|39.18 |13.22|29.69 | |[regnety_640.seer_ft_in1k](https://huggingface.co/timm/regnety_640.seer_ft_in1k)|384 |83.912|96.924|281.38 |188.47|124.83| |[regnety_160.swag_lc_in1k](https://huggingface.co/timm/regnety_160.swag_lc_in1k)|224 |83.778|97.286|83.59 |15.96|23.04 | |[regnetz_040_h.ra3_in1k](https://huggingface.co/timm/regnetz_040_h.ra3_in1k)|256 |83.776|96.704|28.94 |4.12 |24.29 | |[regnetv_064.ra3_in1k](https://huggingface.co/timm/regnetv_064.ra3_in1k)|288 |83.72 |96.75 |30.58 |10.55|27.11 | |[regnety_064.ra3_in1k](https://huggingface.co/timm/regnety_064.ra3_in1k)|288 |83.718|96.724|30.58 |10.56|27.11 | |[regnety_160.deit_in1k](https://huggingface.co/timm/regnety_160.deit_in1k)|288 |83.69 |96.778|83.59 |26.37|38.07 | |[regnetz_040.ra3_in1k](https://huggingface.co/timm/regnetz_040.ra3_in1k)|256 |83.62 |96.704|27.12 |4.06 |24.19 | |[regnetz_d8.ra3_in1k](https://huggingface.co/timm/regnetz_d8.ra3_in1k)|256 |83.438|96.776|23.37 |3.97 |23.74 | |[regnetz_d32.ra3_in1k](https://huggingface.co/timm/regnetz_d32.ra3_in1k)|256 |83.424|96.632|27.58 |5.98 |23.74 | |[regnetz_d8_evos.ch_in1k](https://huggingface.co/timm/regnetz_d8_evos.ch_in1k)|256 |83.36 |96.636|23.46 |4.5 |24.92 | |[regnety_320.seer_ft_in1k](https://huggingface.co/timm/regnety_320.seer_ft_in1k)|384 |83.35 |96.71 |145.05 |95.0 |88.87 | |[regnetv_040.ra3_in1k](https://huggingface.co/timm/regnetv_040.ra3_in1k)|288 |83.204|96.66 |20.64 |6.6 |20.3 | |[regnety_320.tv2_in1k](https://huggingface.co/timm/regnety_320.tv2_in1k)|224 |83.162|96.42 |145.05 |32.34|30.26 | |[regnety_080.ra3_in1k](https://huggingface.co/timm/regnety_080.ra3_in1k)|224 |83.16 |96.486|39.18 |8.0 |17.97 | |[regnetv_064.ra3_in1k](https://huggingface.co/timm/regnetv_064.ra3_in1k)|224 |83.108|96.458|30.58 |6.39 |16.41 | |[regnety_040.ra3_in1k](https://huggingface.co/timm/regnety_040.ra3_in1k)|288 |83.044|96.5 |20.65 |6.61 |20.3 | |[regnety_064.ra3_in1k](https://huggingface.co/timm/regnety_064.ra3_in1k)|224 |83.02 |96.292|30.58 |6.39 |16.41 | |[regnety_160.deit_in1k](https://huggingface.co/timm/regnety_160.deit_in1k)|224 |82.974|96.502|83.59 |15.96|23.04 | |[regnetx_320.tv2_in1k](https://huggingface.co/timm/regnetx_320.tv2_in1k)|224 |82.816|96.208|107.81 |31.81|36.3 | |[regnety_032.ra_in1k](https://huggingface.co/timm/regnety_032.ra_in1k)|288 |82.742|96.418|19.44 |5.29 |18.61 | |[regnety_160.tv2_in1k](https://huggingface.co/timm/regnety_160.tv2_in1k)|224 |82.634|96.22 |83.59 |15.96|23.04 | |[regnetz_c16_evos.ch_in1k](https://huggingface.co/timm/regnetz_c16_evos.ch_in1k)|320 |82.634|96.472|13.49 |3.86 |25.88 | |[regnety_080_tv.tv2_in1k](https://huggingface.co/timm/regnety_080_tv.tv2_in1k)|224 |82.592|96.246|39.38 |8.51 |19.73 | |[regnetx_160.tv2_in1k](https://huggingface.co/timm/regnetx_160.tv2_in1k)|224 |82.564|96.052|54.28 |15.99|25.52 | |[regnetz_c16.ra3_in1k](https://huggingface.co/timm/regnetz_c16.ra3_in1k)|320 |82.51 |96.358|13.46 |3.92 |25.88 | |[regnetv_040.ra3_in1k](https://huggingface.co/timm/regnetv_040.ra3_in1k)|224 |82.44 |96.198|20.64 |4.0 |12.29 | |[regnety_040.ra3_in1k](https://huggingface.co/timm/regnety_040.ra3_in1k)|224 |82.304|96.078|20.65 |4.0 |12.29 | |[regnetz_c16.ra3_in1k](https://huggingface.co/timm/regnetz_c16.ra3_in1k)|256 |82.16 |96.048|13.46 |2.51 |16.57 | |[regnetz_c16_evos.ch_in1k](https://huggingface.co/timm/regnetz_c16_evos.ch_in1k)|256 |81.936|96.15 |13.49 |2.48 |16.57 | |[regnety_032.ra_in1k](https://huggingface.co/timm/regnety_032.ra_in1k)|224 |81.924|95.988|19.44 |3.2 |11.26 | |[regnety_032.tv2_in1k](https://huggingface.co/timm/regnety_032.tv2_in1k)|224 |81.77 |95.842|19.44 |3.2 |11.26 | |[regnetx_080.tv2_in1k](https://huggingface.co/timm/regnetx_080.tv2_in1k)|224 |81.552|95.544|39.57 |8.02 |14.06 | |[regnetx_032.tv2_in1k](https://huggingface.co/timm/regnetx_032.tv2_in1k)|224 |80.924|95.27 |15.3 |3.2 |11.37 | |[regnety_320.pycls_in1k](https://huggingface.co/timm/regnety_320.pycls_in1k)|224 |80.804|95.246|145.05 |32.34|30.26 | |[regnetz_b16.ra3_in1k](https://huggingface.co/timm/regnetz_b16.ra3_in1k)|288 |80.712|95.47 |9.72 |2.39 |16.43 | |[regnety_016.tv2_in1k](https://huggingface.co/timm/regnety_016.tv2_in1k)|224 |80.66 |95.334|11.2 |1.63 |8.04 | |[regnety_120.pycls_in1k](https://huggingface.co/timm/regnety_120.pycls_in1k)|224 |80.37 |95.12 |51.82 |12.14|21.38 | |[regnety_160.pycls_in1k](https://huggingface.co/timm/regnety_160.pycls_in1k)|224 |80.288|94.964|83.59 |15.96|23.04 | |[regnetx_320.pycls_in1k](https://huggingface.co/timm/regnetx_320.pycls_in1k)|224 |80.246|95.01 |107.81 |31.81|36.3 | |[regnety_080.pycls_in1k](https://huggingface.co/timm/regnety_080.pycls_in1k)|224 |79.882|94.834|39.18 |8.0 |17.97 | |[regnetz_b16.ra3_in1k](https://huggingface.co/timm/regnetz_b16.ra3_in1k)|224 |79.872|94.974|9.72 |1.45 |9.95 | |[regnetx_160.pycls_in1k](https://huggingface.co/timm/regnetx_160.pycls_in1k)|224 |79.862|94.828|54.28 |15.99|25.52 | |[regnety_064.pycls_in1k](https://huggingface.co/timm/regnety_064.pycls_in1k)|224 |79.716|94.772|30.58 |6.39 |16.41 | |[regnetx_120.pycls_in1k](https://huggingface.co/timm/regnetx_120.pycls_in1k)|224 |79.592|94.738|46.11 |12.13|21.37 | |[regnetx_016.tv2_in1k](https://huggingface.co/timm/regnetx_016.tv2_in1k)|224 |79.44 |94.772|9.19 |1.62 |7.93 | |[regnety_040.pycls_in1k](https://huggingface.co/timm/regnety_040.pycls_in1k)|224 |79.23 |94.654|20.65 |4.0 |12.29 | |[regnetx_080.pycls_in1k](https://huggingface.co/timm/regnetx_080.pycls_in1k)|224 |79.198|94.55 |39.57 |8.02 |14.06 | |[regnetx_064.pycls_in1k](https://huggingface.co/timm/regnetx_064.pycls_in1k)|224 |79.064|94.454|26.21 |6.49 |16.37 | |[regnety_032.pycls_in1k](https://huggingface.co/timm/regnety_032.pycls_in1k)|224 |78.884|94.412|19.44 |3.2 |11.26 | |[regnety_008_tv.tv2_in1k](https://huggingface.co/timm/regnety_008_tv.tv2_in1k)|224 |78.654|94.388|6.43 |0.84 |5.42 | |[regnetx_040.pycls_in1k](https://huggingface.co/timm/regnetx_040.pycls_in1k)|224 |78.482|94.24 |22.12 |3.99 |12.2 | |[regnetx_032.pycls_in1k](https://huggingface.co/timm/regnetx_032.pycls_in1k)|224 |78.178|94.08 |15.3 |3.2 |11.37 | |[regnety_016.pycls_in1k](https://huggingface.co/timm/regnety_016.pycls_in1k)|224 |77.862|93.73 |11.2 |1.63 |8.04 | |[regnetx_008.tv2_in1k](https://huggingface.co/timm/regnetx_008.tv2_in1k)|224 |77.302|93.672|7.26 |0.81 |5.15 | |[regnetx_016.pycls_in1k](https://huggingface.co/timm/regnetx_016.pycls_in1k)|224 |76.908|93.418|9.19 |1.62 |7.93 | |[regnety_008.pycls_in1k](https://huggingface.co/timm/regnety_008.pycls_in1k)|224 |76.296|93.05 |6.26 |0.81 |5.25 | |[regnety_004.tv2_in1k](https://huggingface.co/timm/regnety_004.tv2_in1k)|224 |75.592|92.712|4.34 |0.41 |3.89 | |[regnety_006.pycls_in1k](https://huggingface.co/timm/regnety_006.pycls_in1k)|224 |75.244|92.518|6.06 |0.61 |4.33 | |[regnetx_008.pycls_in1k](https://huggingface.co/timm/regnetx_008.pycls_in1k)|224 |75.042|92.342|7.26 |0.81 |5.15 | |[regnetx_004_tv.tv2_in1k](https://huggingface.co/timm/regnetx_004_tv.tv2_in1k)|224 |74.57 |92.184|5.5 |0.42 |3.17 | |[regnety_004.pycls_in1k](https://huggingface.co/timm/regnety_004.pycls_in1k)|224 |74.018|91.764|4.34 |0.41 |3.89 | |[regnetx_006.pycls_in1k](https://huggingface.co/timm/regnetx_006.pycls_in1k)|224 |73.862|91.67 |6.2 |0.61 |3.98 | |[regnetx_004.pycls_in1k](https://huggingface.co/timm/regnetx_004.pycls_in1k)|224 |72.38 |90.832|5.16 |0.4 |3.14 | |[regnety_002.pycls_in1k](https://huggingface.co/timm/regnety_002.pycls_in1k)|224 |70.282|89.534|3.16 |0.2 |2.17 | |[regnetx_002.pycls_in1k](https://huggingface.co/timm/regnetx_002.pycls_in1k)|224 |68.752|88.556|2.68 |0.2 |2.16 | ## Citation ```bibtex @InProceedings{Radosavovic2020, title = {Designing Network Design Spaces}, author = {Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Doll{'a}r}, booktitle = {CVPR}, year = {2020} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
15,536
[ [ -0.059173583984375, -0.01485443115234375, -0.01424407958984375, 0.035400390625, -0.033538818359375, -0.00814056396484375, -0.01104736328125, -0.037353515625, 0.07318115234375, 0.007205963134765625, -0.049468994140625, -0.037750244140625, -0.04937744140625, 0...
ProomptEngineer/pe-courtroomsketch-style
2023-09-11T15:42:39.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "license:other", "has_space", "region:us" ]
text-to-image
ProomptEngineer
null
null
ProomptEngineer/pe-courtroomsketch-style
2
679
diffusers
2023-09-11T15:42:34
--- license: other tags: - text-to-image - stable-diffusion - lora - diffusers base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: PECourtRoomSketch widget: - text: PECourtRoomSketch --- # PE CourtRoomSketch [Style] ![Image 0](2201917.jpeg) <h2 id="heading-63">If you want to donate:</h2><h2 id="heading-64"><a target="_blank" rel="ugc" href="https://ko-fi.com/proomptengineer">https://ko-fi.com/proomptengineer</a></h2><h2 id="heading-4">Courtroom Sketch Style.</h2><h2 id="heading-5">Can do some funny and absurd stuff.</h2><h2 id="heading-6">Weight from 0.8-1 recommended.</h2> ## Image examples for the model: ![Image 1](2201914.jpeg) ![Image 2](2201915.jpeg) ![Image 3](2201916.jpeg) ![Image 4](2201918.jpeg)
748
[ [ -0.005035400390625, -0.044158935546875, 0.04803466796875, 0.0117034912109375, -0.061767578125, -0.01549530029296875, 0.037109375, -0.008270263671875, 0.042877197265625, 0.0579833984375, -0.0280914306640625, 0.005519866943359375, -0.0369873046875, -0.00874328...
Rajaram1996/FacialEmoRecog
2023-02-15T23:08:51.000Z
[ "transformers", "pytorch", "vit", "image-classification", "image CLassification", "en", "dataset:Jeneral/fer2013", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
image-classification
Rajaram1996
null
null
Rajaram1996/FacialEmoRecog
17
678
transformers
2022-03-02T23:29:04
--- license: mit inference: true pipeline_tag: image-classification datasets: - Jeneral/fer2013 language: - en metrics: - accuracy tags: - image CLassification - pytorch --- # metrics: # - accuracy # model-index: # - name: FacialEmoRecog # results: # - task: # name: Image Classification # type: image-classification # - metrics: # name: Accuracy # type: accuracy # value: 0.9189583659172058 # FacialEmoRecog Create your own image classifier for **anything** by running this repo ## Example Images
537
[ [ -0.00881195068359375, -0.0186767578125, 0.01128387451171875, 0.002155303955078125, -0.024200439453125, -0.01174163818359375, 0.02838134765625, -0.01558685302734375, 0.0269317626953125, 0.042236328125, -0.01513671875, -0.07843017578125, -0.06732177734375, -0....
Splend1dchan/canine-c-squad
2023-03-22T07:09:39.000Z
[ "transformers", "pytorch", "safetensors", "canine", "question-answering", "autotrain_compatible", "endpoints_compatible", "region:us" ]
question-answering
Splend1dchan
null
null
Splend1dchan/canine-c-squad
0
678
transformers
2022-04-08T14:16:41
python run_squad.py \ --model_name_or_path google/canine-c \ --do_train \ --do_eval \ --per_gpu_train_batch_size 1 \ --per_gpu_eval_batch_size 1 \ --gradient_accumulation_steps 128 \ --learning_rate 3e-5 \ --num_train_epochs 3 \ --max_seq_length 1024 \ --doc_stride 128 \ --max_answer_length 240 \ --output_dir canine-c-squad \ --model_type bert { "_name_or_path": "google/canine-c", "architectures": [ "CanineForQuestionAnswering" ], "attention_probs_dropout_prob": 0.1, "bos_token_id": 57344, "downsampling_rate": 4, "eos_token_id": 57345, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-12, "local_transformer_stride": 128, "max_position_embeddings": 16384, "model_type": "canine", "num_attention_heads": 12, "num_hash_buckets": 16384, "num_hash_functions": 8, "num_hidden_layers": 12, "pad_token_id": 0, "torch_dtype": "float32", "transformers_version": "4.19.0.dev0", "type_vocab_size": 16, "upsampling_kernel_size": 4, "use_cache": true } {'exact': 58.893093661305585, 'f1': 72.18823344945899}
1,177
[ [ -0.036712646484375, -0.06097412109375, 0.003574371337890625, 0.0221099853515625, -0.0010967254638671875, 0.0182342529296875, -0.0163726806640625, -0.0018815994262695312, 0.012664794921875, 0.0018987655639648438, -0.07269287109375, -0.035369873046875, -0.03768920...
Writer/palmyra-small
2023-09-01T17:30:13.000Z
[ "transformers", "pytorch", "coreml", "safetensors", "gpt2", "text-generation", "text generation", "causal-lm", "Writer-data", "NeMo", "palmyra", "en", "dataset:English", "license:apache-2.0", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
Writer
null
null
Writer/palmyra-small
12
678
transformers
2023-02-03T13:14:37
--- language: - en datasets: - English tags: - text generation - pytorch - causal-lm - Writer-data - NeMo - palmyra pipeline_tag: text-generation library_name: transformers license: apache-2.0 --- # Palmyra Small 128M <style> img { display: inline; } </style> |[![Model architecture](https://img.shields.io/badge/Model%20Arch-Transformer%20Decoder-green)](#model-architecture)|[![Model size](https://img.shields.io/badge/Params-128M-green)](#model-architecture)|[![Language](https://img.shields.io/badge/Language-en--US-lightgrey#model-badge)](#datasets) ## Model Description Palmyra Small was primarily pre-trained with English text. Note that there is still a trace amount of non-English data present within the training corpus that was accessed through CommonCrawl. A causal language modeling (CLM) objective was utilized during the process of the model's pretraining. Similar to GPT-3, Palmyra Small is a member of the same family of models that only contain a decoder. As a result, it was pre-trained utilizing the objective of self-supervised causal language modeling. Palmyra Small uses the prompts and general experimental setup from GPT-3 in order to conduct its evaluation per GPT-3. ## Use case Palmyra Small is the fastest of Writer’s LLMs and can perform important tasks such as text parsing, simple classification, address correction, and keyword recognition. Providing more context drives even better performance. ## Training data Palmyra Small (128M) was trained on Writer’s custom dataset. ## Intended Use and Limitations Palmyra Small learns an inner representation of the English language that can be used to extract features useful for downstream tasks. However, the model is best at what it was pre-trained for which is generating text from a prompt. ### How to use This model can be easily loaded using the `AutoModelForCausalLM` functionality: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("Writer/palmyra-small") tokenizer = AutoTokenizer.from_pretrained("Writer/palmyra-small") ``` ### Limitations and Biases Palmyra Small’s core functionality is to take a string of text and predict the next token. While language models are widely used for other tasks, there are many unknowns in this work. When prompting Palmyra, keep in mind that the next statistically likely token is not always the token that produces the most "accurate" text. Never rely on Palmyra Small to produce factually correct results. Palmyra Small was trained on Writer’s custom data. As with all language models, it is difficult to predict how Palmyra Small will respond to specific prompts, and offensive content may appear unexpectedly. We recommend that the outputs be curated or filtered by humans before they are released, both to censor undesirable content and to improve the quality of the results. ## Citation and Related Information To cite this model: ``` @misc{Palmyra, author = {Writer Engineering Team}, title = {{Palmyra-base Parameter Autoregressive Language Model}}, howpublished = {\url{https://dev.writer.com}}, year = 2023, month = January } ```
3,181
[ [ -0.007038116455078125, -0.0565185546875, 0.0278472900390625, 0.0180206298828125, -0.0159454345703125, -0.009246826171875, -0.033721923828125, -0.042388916015625, 0.002849578857421875, 0.03753662109375, -0.020843505859375, -0.039337158203125, -0.05108642578125, ...
amongey/egyptian-movie-vintage-posters
2023-08-21T04:09:30.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
amongey
null
null
amongey/egyptian-movie-vintage-posters
0
678
diffusers
2023-08-21T04:04:18
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Egyptian-Movie-vintage-posters Dreambooth model trained by amongey with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
519
[ [ -0.03271484375, -0.047454833984375, 0.05120849609375, 0.01454925537109375, -0.0225677490234375, 0.02801513671875, 0.03173828125, -0.0163116455078125, 0.0560302734375, 0.0096435546875, -0.0204315185546875, -0.019805908203125, -0.050537109375, -0.0199127197265...
lberglund/test_20231012100010
2023-10-12T10:06:10.000Z
[ "diffusers", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "text-to-image", "lora", "license:openrail++", "has_space", "region:us" ]
text-to-image
lberglund
null
null
lberglund/test_20231012100010
1
678
diffusers
2023-10-12T10:00:15
--- license: openrail++ base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: "a photo of a person showing <thumbs_up> thumbs up" tags: - stable-diffusion-xl - stable-diffusion-xl-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - lberglund/test_20231012100010 These are LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0. The weights were trained on "a photo of a person showing <thumbs_up> thumbs up" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. LoRA for the text encoder was enabled: False. Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
691
[ [ -0.026580810546875, -0.035064697265625, 0.0240020751953125, 0.00890350341796875, -0.03424072265625, 0.00254058837890625, 0.0218963623046875, -0.023773193359375, 0.068603515625, 0.0357666015625, -0.03607177734375, -0.022125244140625, -0.04547119140625, -0.005...
AVIIAX/ds8
2023-10-27T13:23:04.000Z
[ "diffusers", "General", "Anime", "Art", "Girl", "Photorealistic", "LandScapes", "Lykon", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us", "has_space" ]
text-to-image
AVIIAX
null
null
AVIIAX/ds8
1
678
diffusers
2023-10-27T13:23:04
--- license: creativeml-openrail-m library_name: diffusers pipeline_tag: text-to-image tags: - General - Anime - Art - Girl - Photorealistic - LandScapes - Lykon - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image --- # Dreamshaper 8 Original page: https://civitai.com/models/4384?modelVersionId=80261 Buy Lykon a coffee: https://snipfeed.co/lykon Sample and prompt: ![Sample](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/6ei2pnNw9CUS2vX-E1qWo.png) PRETTY CUTE GIRL BY ROSSDRAWS. An extradimensional creature buying donuts. Pixar animation.
608
[ [ -0.008026123046875, -0.036651611328125, 0.04180908203125, 0.0193328857421875, -0.0197906494140625, 0.013702392578125, 0.00754547119140625, -0.0667724609375, 0.06988525390625, 0.049163818359375, -0.046875, -0.017547607421875, -0.035247802734375, 0.02090454101...
ubikpt/t5-small-finetuned-cnn
2022-06-30T10:07:16.000Z
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "summarization", "generated_from_trainer", "dataset:cnn_dailymail", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
summarization
ubikpt
null
null
ubikpt/t5-small-finetuned-cnn
0
677
transformers
2022-06-29T07:19:18
--- license: apache-2.0 tags: - summarization - generated_from_trainer datasets: - cnn_dailymail metrics: - rouge model-index: - name: t5-small-finetuned-cnn results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: cnn_dailymail type: cnn_dailymail args: 3.0.0 metrics: - name: Rouge1 type: rouge value: 33.2082 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-cnn This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the cnn_dailymail dataset. It achieves the following results on the evaluation set: - Loss: 1.8436 - Rouge1: 33.2082 - Rouge2: 16.798 - Rougel: 28.9573 - Rougelsum: 31.1044 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:| | 2.3793 | 1.0 | 359 | 1.8885 | 33.0321 | 16.7798 | 28.9367 | 30.9509 | | 2.1432 | 2.0 | 718 | 1.8481 | 33.1559 | 16.8557 | 29.015 | 31.1122 | | 2.0571 | 3.0 | 1077 | 1.8391 | 32.99 | 16.716 | 28.8118 | 30.9178 | | 2.0001 | 4.0 | 1436 | 1.8357 | 33.0543 | 16.6731 | 28.8375 | 30.9604 | | 1.9609 | 5.0 | 1795 | 1.8437 | 33.1019 | 16.7576 | 28.8669 | 31.001 | | 1.925 | 6.0 | 2154 | 1.8402 | 33.1388 | 16.7539 | 28.8887 | 31.0262 | | 1.9036 | 7.0 | 2513 | 1.8423 | 33.1825 | 16.759 | 28.9154 | 31.0656 | | 1.8821 | 8.0 | 2872 | 1.8436 | 33.2082 | 16.798 | 28.9573 | 31.1044 | ### Framework versions - Transformers 4.14.0 - Pytorch 1.5.0 - Datasets 2.3.2 - Tokenizers 0.10.3
2,403
[ [ -0.047821044921875, -0.038818359375, 0.00479888916015625, 0.00323486328125, -0.017242431640625, -0.025482177734375, -0.01114654541015625, -0.00986480712890625, 0.01541900634765625, 0.0264892578125, -0.0521240234375, -0.052337646484375, -0.054840087890625, -0...
devonho/detr-resnet-50_finetuned_cppe5
2023-01-31T06:02:36.000Z
[ "transformers", "pytorch", "tensorboard", "detr", "object-detection", "generated_from_trainer", "dataset:cppe-5", "license:apache-2.0", "endpoints_compatible", "region:us" ]
object-detection
devonho
null
null
devonho/detr-resnet-50_finetuned_cppe5
0
677
transformers
2023-01-29T06:54:11
--- license: apache-2.0 tags: - generated_from_trainer datasets: - cppe-5 model-index: - name: detr-resnet-50_finetuned_cppe5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # detr-resnet-50_finetuned_cppe5 This model is a fine-tuned version of [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) on the cppe-5 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu117 - Datasets 2.8.0 - Tokenizers 0.13.2
1,127
[ [ -0.04010009765625, -0.041595458984375, 0.0023403167724609375, 0.0128021240234375, -0.0235137939453125, -0.023895263671875, -0.01216888427734375, -0.0222625732421875, 0.01861572265625, 0.02337646484375, -0.0670166015625, -0.029449462890625, -0.035797119140625, ...
stablediffusionapi/anime-model-v2
2023-05-01T15:01:31.000Z
[ "diffusers", "stablediffusionapi.com", "stable-diffusion-api", "text-to-image", "ultra-realistic", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
stablediffusionapi
null
null
stablediffusionapi/anime-model-v2
3
677
diffusers
2023-05-01T14:58:57
--- license: creativeml-openrail-m tags: - stablediffusionapi.com - stable-diffusion-api - text-to-image - ultra-realistic pinned: true --- # Anime Model V2 API Inference ![generated from stablediffusionapi.com](https://pub-8b49af329fae499aa563997f5d4068a4.r2.dev/generations/4695483551682952954.png) ## Get API Key Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed. Replace Key in below code, change **model_id** to "anime-model-v2" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs) Model link: [View model](https://stablediffusionapi.com/models/anime-model-v2) Credits: [View credits](https://civitai.com/?query=Anime%20Model%20V2) View all models: [View Models](https://stablediffusionapi.com/models) import requests import json url = "https://stablediffusionapi.com/api/v3/dreambooth" payload = json.dumps({ "key": "", "model_id": "anime-model-v2", "prompt": "actual 8K portrait photo of gareth person, portrait, happy colors, bright eyes, clear eyes, warm smile, smooth soft skin, big dreamy eyes, beautiful intricate colored hair, symmetrical, anime wide eyes, soft lighting, detailed face, by makoto shinkai, stanley artgerm lau, wlop, rossdraws, concept art, digital painting, looking into camera", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
2,427
[ [ -0.0247802734375, -0.05780029296875, 0.03509521484375, 0.0286712646484375, -0.034393310546875, -0.002986907958984375, 0.026336669921875, -0.03399658203125, 0.036590576171875, 0.0479736328125, -0.06475830078125, -0.053009033203125, -0.0291748046875, -0.007225...
KappaNeuro/1987-action-figure-playset-packaging
2023-09-14T02:25:45.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "style", "toy", "1987 action figure playset packaging", "license:other", "region:us", "has_space" ]
text-to-image
KappaNeuro
null
null
KappaNeuro/1987-action-figure-playset-packaging
1
677
diffusers
2023-09-14T02:25:41
--- license: other tags: - text-to-image - stable-diffusion - lora - diffusers - style - toy - 1987 action figure playset packaging base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: 1987 Action Figure Playset Packaging page widget: - text: 1987 Action Figure Playset Packaging - a vintage action figure of Sarah Ramsey in unopened package. Photorealstic - text: 1987 Action Figure Playset Packaging - 1975s packshot of superhero figure named 'Tiger Shrill' in his cardboard box packaging, box front is made of transparent cellophanev 5.1 - text: 1987 Action Figure Playset Packaging - A product photo of a 1970s japanese toy clear plastic robot action figure in front of packaging - text: 1987 Action Figure Playset Packaging - a 1990's robot toy with a picture of the galaxy in the chest in front of packaging - text: 1987 Action Figure Playset Packaging - toy action figure of a pink muscle man ghost in front of colorful cardboard packaging on plain white backdrop - text: 1987 Action Figure Playset Packaging - a 1970s kenner style grim reaper action figure with pink hair and a translucent cloak and a sickle standing next to its original cardboard packaging - text: 1987 Action Figure Playset Packaging - 1975s packshot of superhero figure named 'Tiger Shrill' standing in his cardboard box packaging, box front is made of transparent plastic - text: 1987 Action Figure Playset Packaging - a 1970s die cast kenner style action figure shaped like a neon colored robot ghost standing next to its cardboard and plastic packaging - text: 1987 Action Figure Playset Packaging - space packaging of a pink astronaut woman like dog action figure, 90s toy style box, box with plastic window and many space references - text: 1987 Action Figure Playset Packaging - 1975s superhero figure named 'Astronom' standing in his cardboard box packaging, box front is with with flat transparent cellophane --- # 1987 Action Figure Playset Packaging ![Image 0](2320823.jpeg) > 1987 Action Figure Playset Packaging - a vintage action figure of Sarah Ramsey in unopened package. Photorealstic <p>The packaging for action figure playsets in 1987 reflected the popular toy trends and marketing strategies of that era. Here are some key characteristics of 1987 action figure playset packaging:</p><p>1. Colorful and Eye-Catching Design: Packaging designs often featured vibrant colors, bold graphics, and dynamic action scenes to capture the attention of potential buyers. Bright hues and exciting imagery were used to evoke a sense of excitement and playfulness.</p><p>2. Action-Packed Illustrations: The packaging typically showcased illustrations of the action figures in dynamic poses, engaging in epic battles or adventure scenarios. These illustrations aimed to convey the playset's exciting possibilities and entice children with imaginative play possibilities.</p><p>3. Character and Story Information: The packaging would often include illustrations and descriptions of the action figures and their unique features, as well as background information about the characters and their role in a larger story or universe. This helped create a sense of narrative and allowed children to immerse themselves in a cohesive fictional world.</p><p>4. Playset Features and Accessories: The packaging would highlight the playset's features, such as special action features, interactive elements, or additional accessories included with the set. This information served to demonstrate the value and play potential of the product.</p><p>5. Branding and Logos: Packaging prominently displayed the toy line's branding and logos, ensuring brand recognition and association with other toys in the line. Consistent branding helped establish brand loyalty and familiarity among consumers.</p><p>6. Collectibility and Collectible Points: Some playset packaging included special collectible points or tokens that could be redeemed for additional toys or exclusive merchandise. These incentives encouraged repeat purchases and fostered a sense of value for collectors.</p><p>7. Safety Information and Age Recommendations: As with any toy packaging, safety information and age recommendations were included to ensure proper use and enjoyment of the playset.</p><p>Overall, 1987 action figure playset packaging aimed to captivate the attention of children and parents alike. Through vivid designs, engaging illustrations, and enticing features, the packaging conveyed the excitement and imaginative potential of the playsets, making them highly appealing to toy enthusiasts of that era.</p> ## Image examples for the model: ![Image 1](2320800.jpeg) > 1987 Action Figure Playset Packaging - 1975s packshot of superhero figure named 'Tiger Shrill' in his cardboard box packaging, box front is made of transparent cellophanev 5.1 ![Image 2](2320807.jpeg) > 1987 Action Figure Playset Packaging - A product photo of a 1970s japanese toy clear plastic robot action figure in front of packaging ![Image 3](2320809.jpeg) > 1987 Action Figure Playset Packaging - a 1990's robot toy with a picture of the galaxy in the chest in front of packaging ![Image 4](2320803.jpeg) > 1987 Action Figure Playset Packaging - toy action figure of a pink muscle man ghost in front of colorful cardboard packaging on plain white backdrop ![Image 5](2320798.jpeg) > 1987 Action Figure Playset Packaging - a 1970s kenner style grim reaper action figure with pink hair and a translucent cloak and a sickle standing next to its original cardboard packaging ![Image 6](2320801.jpeg) > 1987 Action Figure Playset Packaging - 1975s packshot of superhero figure named 'Tiger Shrill' standing in his cardboard box packaging, box front is made of transparent plastic ![Image 7](2320802.jpeg) > 1987 Action Figure Playset Packaging - a 1970s die cast kenner style action figure shaped like a neon colored robot ghost standing next to its cardboard and plastic packaging ![Image 8](2320808.jpeg) > 1987 Action Figure Playset Packaging - space packaging of a pink astronaut woman like dog action figure, 90s toy style box, box with plastic window and many space references ![Image 9](2320806.jpeg) > 1987 Action Figure Playset Packaging - 1975s superhero figure named 'Astronom' standing in his cardboard box packaging, box front is with with flat transparent cellophane
6,334
[ [ -0.020477294921875, 0.011383056640625, 0.0009393692016601562, 0.003787994384765625, -0.018798828125, 0.0023822784423828125, 0.043060302734375, -0.0162200927734375, 0.04730224609375, 0.0318603515625, -0.072265625, 0.006664276123046875, -0.032501220703125, -0....
nreimers/mMiniLMv2-L6-H384-distilled-from-XLMR-Large
2021-06-20T19:03:02.000Z
[ "transformers", "pytorch", "xlm-roberta", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
nreimers
null
null
nreimers/mMiniLMv2-L6-H384-distilled-from-XLMR-Large
15
676
transformers
2022-03-02T23:29:05
# MiniLMv2 This is a MiniLMv2 model from: [https://github.com/microsoft/unilm](https://github.com/microsoft/unilm/tree/master/minilm)
133
[ [ 0.0016727447509765625, -0.0396728515625, 0.0091552734375, -0.016448974609375, -0.0255584716796875, 0.0247650146484375, 0.057891845703125, 0.00402069091796875, -0.005893707275390625, 0.035247802734375, -0.0882568359375, -0.0116119384765625, -0.01016998291015625, ...
facebook/esm-1b
2023-09-07T15:41:19.000Z
[ "transformers", "pytorch", "safetensors", "esm", "fill-mask", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
facebook
null
null
facebook/esm-1b
16
675
transformers
2022-03-02T23:29:05
This repository has been deprecated and will be deleted shortly. All ESM models have been moved to their official names to match their naming at the original FAIR repo. You can now find the ESM-1b model at [facebook/esm1b_t33_650M_UR50S](https://huggingface.co/facebook/esm1b_t33_650M_UR50S).
292
[ [ -0.037872314453125, -0.028900146484375, 0.03826904296875, 0.03289794921875, -0.0212554931640625, -0.02020263671875, 0.0205841064453125, -0.0307769775390625, 0.026641845703125, 0.043121337890625, -0.06646728515625, -0.032623291015625, -0.033355712890625, 0.00...
bipin/image-caption-generator
2023-07-05T08:26:49.000Z
[ "transformers", "pytorch", "safetensors", "vision-encoder-decoder", "image-captioning", "image-to-text", "endpoints_compatible", "has_space", "region:us" ]
image-to-text
bipin
null
null
bipin/image-caption-generator
11
675
transformers
2022-03-27T09:56:24
--- tags: - image-captioning - image-to-text model-index: - name: image-caption-generator results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Image-caption-generator This model is trained on [Flickr8k](https://www.kaggle.com/datasets/nunenuh/flickr8k) dataset to generate captions given an image. It achieves the following results on the evaluation set: - eval_loss: 0.2536 - eval_runtime: 25.369 - eval_samples_per_second: 63.818 - eval_steps_per_second: 8.002 - epoch: 4.0 - step: 3236 # Running the model using transformers library 1. Load the pre-trained model from the model hub ```python from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer import torch from PIL import Image model_name = "bipin/image-caption-generator" # load model model = VisionEncoderDecoderModel.from_pretrained(model_name) feature_extractor = ViTFeatureExtractor.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained("gpt2") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) ``` 2. Load the image for which the caption is to be generated ```python img_name = "flickr_data.jpg" img = Image.open(img_name) if img.mode != 'RGB': img = img.convert(mode="RGB") ``` 3. Pre-process the image ```python pixel_values = feature_extractor(images=[img], return_tensors="pt").pixel_values pixel_values = pixel_values.to(device) ``` 4. Generate the caption ```python max_length = 128 num_beams = 4 # get model prediction output_ids = model.generate(pixel_values, num_beams=num_beams, max_length=max_length) # decode the generated prediction preds = tokenizer.decode(output_ids[0], skip_special_tokens=True) print(preds) ``` ## Training procedure The procedure used to train this model can be found [here](https://bipinkrishnan.github.io/ml-recipe-book/image_captioning.html). ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Framework versions - Transformers 4.16.2 - Pytorch 1.9.1 - Datasets 1.18.4 - Tokenizers 0.11.6
2,515
[ [ -0.033782958984375, -0.0164337158203125, 0.0073699951171875, 0.0048675537109375, -0.044281005859375, -0.0220184326171875, 0.0093536376953125, -0.01447296142578125, -0.00959014892578125, 0.033111572265625, -0.04571533203125, -0.022857666015625, -0.05810546875, ...
timm/swinv2_large_window12_192.ms_in22k
2023-03-18T03:33:52.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-22k", "arxiv:2111.09883", "license:mit", "region:us" ]
image-classification
timm
null
null
timm/swinv2_large_window12_192.ms_in22k
0
675
timm
2023-03-18T03:32:25
--- tags: - image-classification - timm library_tag: timm license: mit datasets: - imagenet-22k --- # Model card for swinv2_large_window12_192.ms_in22k A Swin Transformer V2 image classification model. Pretrained on ImageNet-22k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 228.8 - GMACs: 26.2 - Activations (M): 56.5 - Image size: 192 x 192 - **Papers:** - Swin Transformer V2: Scaling Up Capacity and Resolution: https://arxiv.org/abs/2111.09883 - **Original:** https://github.com/microsoft/Swin-Transformer - **Dataset:** ImageNet-22k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('swinv2_large_window12_192.ms_in22k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'swinv2_large_window12_192.ms_in22k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g. for swin_base_patch4_window7_224 (NHWC output) # torch.Size([1, 56, 56, 128]) # torch.Size([1, 28, 28, 256]) # torch.Size([1, 14, 14, 512]) # torch.Size([1, 7, 7, 1024]) # e.g. for swinv2_cr_small_ns_224 (NCHW output) # torch.Size([1, 96, 56, 56]) # torch.Size([1, 192, 28, 28]) # torch.Size([1, 384, 14, 14]) # torch.Size([1, 768, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'swinv2_large_window12_192.ms_in22k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled (ie.e a (batch_size, H, W, num_features) tensor for swin / swinv2 # or (batch_size, num_features, H, W) for swinv2_cr output = model.forward_head(output, pre_logits=True) # output is (batch_size, num_features) tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{liu2021swinv2, title={Swin Transformer V2: Scaling Up Capacity and Resolution}, author={Ze Liu and Han Hu and Yutong Lin and Zhuliang Yao and Zhenda Xie and Yixuan Wei and Jia Ning and Yue Cao and Zheng Zhang and Li Dong and Furu Wei and Baining Guo}, booktitle={International Conference on Computer Vision and Pattern Recognition (CVPR)}, year={2022} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
4,417
[ [ -0.0307159423828125, -0.0291290283203125, -0.00672149658203125, 0.0134735107421875, -0.0237579345703125, -0.03216552734375, -0.0228424072265625, -0.0400390625, -0.000553131103515625, 0.029266357421875, -0.035736083984375, -0.040374755859375, -0.046478271484375, ...
warp-ai/wuerstchen-prior-model-interpolated
2023-09-18T07:01:48.000Z
[ "diffusers", "arxiv:2306.00637", "arxiv:1910.09700", "license:mit", "diffusers:WuerstchenPrior", "region:us" ]
null
warp-ai
null
null
warp-ai/wuerstchen-prior-model-interpolated
2
675
diffusers
2023-09-03T19:45:43
--- license: mit --- <img src="https://cdn-uploads.huggingface.co/production/uploads/634cb5eefb80cc6bcaf63c3e/i-DYpDHw8Pwiy7QBKZVR5.jpeg" width=1500> ## Würstchen - Overview Würstchen is a diffusion model, whose text-conditional model works in a highly compressed latent space of images. Why is this important? Compressing data can reduce computational costs for both training and inference by magnitudes. Training on 1024x1024 images is way more expensive than training on 32x32. Usually, other works make use of a relatively small compression, in the range of 4x - 8x spatial compression. Würstchen takes this to an extreme. Through its novel design, we achieve a 42x spatial compression. This was unseen before because common methods fail to faithfully reconstruct detailed images after 16x spatial compression. Würstchen employs a two-stage compression, what we call Stage A and Stage B. Stage A is a VQGAN, and Stage B is a Diffusion Autoencoder (more details can be found in the [paper](https://arxiv.org/abs/2306.00637)). A third model, Stage C, is learned in that highly compressed latent space. This training requires fractions of the compute used for current top-performing models, allowing also cheaper and faster inference. ## Würstchen - Prior The Prior is what we refer to as "Stage C". It is the text-conditional model, operating in the small latent space that Stage A and Stage B encode images into. During inference, its job is to generate the image latents given text. These image latents are then sent to Stages A & B to decode the latents into pixel space. ### Prior - Model - Interpolated The interpolated model is our current best Prior (Stage C) checkpoint. It is an interpolation between our [base model](https://huggingface.co/warp-ai/wuerstchen-prior-model-base) and the [finetuned model](https://huggingface.co/warp-ai/wuerstchen-prior-model-finetuned). We created this interpolation because the finetuned model became too artistic and often only generates artistic images. The base model, however, usually is very photorealistic. As a result, we combined both by interpolating their weights by 50%, so the middle between the base and finetuned model (`0.5 * base_weights + 0.5 * finetuned_weights`). You can also interpolate the [base model](https://huggingface.co/warp-ai/wuerstchen-prior-model-base) and the [finetuned model](https://huggingface.co/warp-ai/wuerstchen-prior-model-finetuned) as you want and maybe find an interpolation that fits your needs better than this checkpoint. ### Image Sizes Würstchen was trained on image resolutions between 1024x1024 & 1536x1536. We sometimes also observe good outputs at resolutions like 1024x2048. Feel free to try it out. We also observed that the Prior (Stage C) adapts extremely fast to new resolutions. So finetuning it at 2048x2048 should be computationally cheap. <img src="https://cdn-uploads.huggingface.co/production/uploads/634cb5eefb80cc6bcaf63c3e/IfVsUDcP15OY-5wyLYKnQ.jpeg" width=1000> ## How to run This pipeline should be run together with https://huggingface.co/warp-ai/wuerstchen: ```py import torch from diffusers import WuerstchenDecoderPipeline, WuerstchenPriorPipeline from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS device = "cuda" dtype = torch.float16 num_images_per_prompt = 2 prior_pipeline = WuerstchenPriorPipeline.from_pretrained( "warp-ai/wuerstchen-prior", torch_dtype=dtype ).to(device) decoder_pipeline = WuerstchenDecoderPipeline.from_pretrained( "warp-ai/wuerstchen", torch_dtype=dtype ).to(device) caption = "Anthropomorphic cat dressed as a fire fighter" negative_prompt = "" prior_output = prior_pipeline( prompt=caption, height=1024, width=1536, timesteps=DEFAULT_STAGE_C_TIMESTEPS, negative_prompt=negative_prompt, guidance_scale=4.0, num_images_per_prompt=num_images_per_prompt, ) decoder_output = decoder_pipeline( image_embeddings=prior_output.image_embeddings, prompt=caption, negative_prompt=negative_prompt, guidance_scale=0.0, output_type="pil", ).images ``` ## Model Details - **Developed by:** Pablo Pernias, Dominic Rampas - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** MIT - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a Diffusion model in the style of Stage C from the [Würstchen paper](https://arxiv.org/abs/2306.00637) that uses a fixed, pretrained text encoder ([CLIP ViT-bigG/14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). - **Resources for more information:** [GitHub Repository](https://github.com/dome272/Wuerstchen), [Paper](https://arxiv.org/abs/2306.00637). - **Cite as:** @misc{pernias2023wuerstchen, title={Wuerstchen: Efficient Pretraining of Text-to-Image Models}, author={Pablo Pernias and Dominic Rampas and Marc Aubreville}, year={2023}, eprint={2306.00637}, archivePrefix={arXiv}, primaryClass={cs.CV} } ## Environmental Impact **Würstchen v2** **Estimated Emissions** Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact. - **Hardware Type:** A100 PCIe 40GB - **Hours used:** 24602 - **Cloud Provider:** AWS - **Compute Region:** US-east - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 2275.68 kg CO2 eq.
5,715
[ [ -0.049957275390625, -0.0496826171875, 0.0287933349609375, 0.01456451416015625, -0.022430419921875, -0.032470703125, -0.00955963134765625, -0.0251617431640625, -0.009490966796875, 0.0232086181640625, -0.051788330078125, -0.029541015625, -0.0518798828125, -0.0...
facebook/wav2vec2-large-xlsr-53-italian
2021-07-06T02:53:33.000Z
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "speech", "audio", "it", "dataset:common_voice", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
automatic-speech-recognition
facebook
null
null
facebook/wav2vec2-large-xlsr-53-italian
4
674
transformers
2022-03-02T23:29:05
--- language: it datasets: - common_voice tags: - speech - audio - automatic-speech-recognition license: apache-2.0 --- ## Evaluation on Common Voice IT Test ```python import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys model_name = "facebook/wav2vec2-large-xlsr-53-italian" device = "cuda" chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"]' # noqa: W605 model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(model_name) ds = load_dataset("common_voice", "it", split="test", data_dir="./cv-corpus-6.1-2020-12-11") resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def map_to_array(batch): speech, _ = torchaudio.load(batch["path"]) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits pred_ids = torch.argmax(logits, dim=-1) batch["predicted"] = processor.batch_decode(pred_ids) batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys())) wer = load_metric("wer") print(wer.compute(predictions=result["predicted"], references=result["target"])) ``` **Result**: 22.1 %
1,850
[ [ -0.030853271484375, -0.052490234375, 0.00943756103515625, 0.0255126953125, -0.0183258056640625, -0.004581451416015625, -0.03594970703125, -0.01398468017578125, 0.0116424560546875, 0.0281829833984375, -0.052764892578125, -0.056915283203125, -0.045806884765625, ...
rinna/japanese-gpt-neox-3.6b-instruction-sft-v2
2023-06-09T04:57:39.000Z
[ "transformers", "pytorch", "safetensors", "gpt_neox", "text-generation", "ja", "lm", "nlp", "dataset:Anthropic/hh-rlhf", "dataset:stanfordnlp/SHP", "license:mit", "text-generation-inference", "region:us" ]
text-generation
rinna
null
null
rinna/japanese-gpt-neox-3.6b-instruction-sft-v2
22
674
transformers
2023-05-30T01:50:25
--- language: ja thumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png tags: - ja - gpt_neox - text-generation - lm - nlp license: mit datasets: - Anthropic/hh-rlhf - stanfordnlp/SHP inference: false --- # japanese-gpt-neox-3.6b-instruction-sft-v2 ![rinna-icon](./rinna.png) # Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters. The model is based on [`rinna/japanese-gpt-neox-3.6b`](https://huggingface.co/rinna/japanese-gpt-neox-3.6b) and has been finetuned to serve as an instruction-following conversational agent. This model slightly differs from the previous SFT model [`rinna/japanese-gpt-neox-3.6b-instruction-sft`](https://huggingface.co/rinna/japanese-gpt-neox-3.6b-instruction-sft), where a different data split is used for training. * **Model architecture** A 36-layer, 2816-hidden-size transformer-based language model. * **SFT vs. previous SFT evaluation** We conducted ChatGPT-based automated evaluation on 100 prompts to assess the performance difference between this SFT model and the previous SFT model. | [this SFT](https://huggingface.co/rinna/japanese-gpt-neox-3.6b-instruction-sft-v2) vs. [previous SFT](https://huggingface.co/rinna/japanese-gpt-neox-3.6b-instruction-sft) | win | tie | loss | | :---: | :---: | :---: | :---: | | ChatGPT auto. evaluation | **55**% | 0% | 45% | * **Finetuning** The finetuning data is the subset of the following datasets and has been translated into Japanese. * [Anthropic HH RLHF data](https://huggingface.co/datasets/Anthropic/hh-rlhf) * [FLAN Instruction Tuning data](https://github.com/google-research/FLAN) * [Stanford Human Preferences Dataset](https://huggingface.co/datasets/stanfordnlp/SHP) The data will **not** be released. * **Model Series** | Variant | Link | | :-- | :--| | 3.6B PPO | https://huggingface.co/rinna/japanese-gpt-neox-3.6b-instruction-ppo | | 3.6B SFT-v2 | https://huggingface.co/rinna/japanese-gpt-neox-3.6b-instruction-sft-v2 | | 3.6B SFT | https://huggingface.co/rinna/japanese-gpt-neox-3.6b-instruction-sft | | 3.6B pretrained | https://huggingface.co/rinna/japanese-gpt-neox-3.6b | * **Authors** [Tianyu Zhao](https://huggingface.co/tianyuz) and [Kei Sawada](https://huggingface.co/keisawada) # I/O Format A special format has been adopted to construct inputs. * An input prompt is formatted as a conversation between `ユーザー` and `システム`. * Each input utterance consists of (1) its speaker (`"ユーザー"` or `"システム"`), (2) a colon (`":"`), (3) a whitespace (`" "`), and (4) utterance text (e.g. `"世界で一番高い山は?"`). * The input prompt should be ended with `"システム: "` to acknowledge the model to generate a response. * Since the model's tokenizer does not recognize `"\n"`, a special newline symbol `"<NL>"` is used instead. * All the newlines in input and output utterances should be replaced with `"<NL>"`. * All the utterances in the input prompt should be separated by `"<NL>"`. Following is an example to construct an input from a conversation. ~~~python prompt = [ { "speaker": "ユーザー", "text": "コンタクトレンズを慣れるにはどうすればよいですか?" }, { "speaker": "システム", "text": "これについて具体的に説明していただけますか?何が難しいのでしょうか?" }, { "speaker": "ユーザー", "text": "目が痛いのです。" }, { "speaker": "システム", "text": "分かりました、コンタクトレンズをつけると目がかゆくなるということですね。思った以上にレンズを外す必要があるでしょうか?" }, { "speaker": "ユーザー", "text": "いえ、レンズは外しませんが、目が赤くなるんです。" } ] prompt = [ f"{uttr['speaker']}: {uttr['text']}" for uttr in prompt ] prompt = "<NL>".join(prompt) prompt = ( prompt + "<NL>" + "システム: " ) print(prompt) # "ユーザー: コンタクトレンズを慣れるにはどうすればよいですか?<NL>システム: これについて具体的に説明していただけますか?何が難しいのでしょうか?<NL>ユーザー: 目が痛いのです。<NL>システム: 分かりました、コンタクトレンズをつけると目がかゆくなるということですね。思った以上にレンズを外す必要があるでしょうか?<NL>ユーザー: いえ、レンズは外しませんが、目が赤くなるんです。<NL>システム: " ~~~ # How to use the model ~~~~python import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt-neox-3.6b-instruction-sft-v2", use_fast=False) model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt-neox-3.6b-instruction-sft-v2") if torch.cuda.is_available(): model = model.to("cuda") token_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") with torch.no_grad(): output_ids = model.generate( token_ids.to(model.device), do_sample=True, max_new_tokens=128, temperature=0.7, repetition_penalty=1.1, pad_token_id=tokenizer.pad_token_id, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id ) output = tokenizer.decode(output_ids.tolist()[0][token_ids.size(1):]) output = output.replace("<NL>", "\n") print(output) """わかりました。まずは、コンタクトレンズを長時間着用することによる目の乾燥を防ぐことができます。また、毎日同じ時間帯にコンタクトレンズを着用してみることもできます。そして、コンタクトレンズが目に合わないような場合は、新しいものを試してみる必要があります。</s>""" ~~~~ # Tokenization The model uses a [sentencepiece](https://github.com/google/sentencepiece)-based tokenizer. * The tokenizer has a vocabulary size of 32,000. * It uses sentencepiece's byte fallback feature to decompose unknown text pieces into UTF-8 byte pieces and to avoid producing `<UNK>` tokens. * sentencepiece's `--add_dummy_prefix` option was turned off so that a leading whitespace will not be prepended automatically. ~~~ print(tokenizer.tokenize("吾輩は猫である")) # ['吾', '輩', 'は', '猫', 'である'] # instead of ['▁', '吾', '輩', 'は', '猫', 'である'] as in rinna/japanese-gpt-1b ~~~ * sentencepiece's `--remove_extra_whitespaces` option was turned off so that leading, trailing, and duplicate whitespaces are reserved. ~~~ print(tokenizer.tokenize(" 吾輩は 猫である ")) # ['▁', '▁', '吾', '輩', 'は', '▁', '▁', '猫', 'である', '▁', '▁', '▁'] # instead of ['▁', '吾', '輩', 'は', '▁猫', 'である'] as in rinna/japanese-gpt-1b ~~~ * Don't forget to set `use_fast=False` to make the above features function correctly. ~~~ good_tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt-neox-3.6b", use_fast=False) bad_tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt-neox-3.6b") print(good_tokenizer.decode(good_tokenizer.encode("გამარჯობა 吾輩は 猫である "))) # 'გამარჯობა 吾輩は 猫である </s>' print(bad_tokenizer.decode(bad_tokenizer.encode("გამარჯობა 吾輩は 猫である "))) # 'გამარ[UNK]ობა 吾輩は 猫である </s>' ~~~ # Licenese [The MIT license](https://opensource.org/licenses/MIT)
6,554
[ [ -0.0238189697265625, -0.07647705078125, 0.02978515625, 0.01168060302734375, -0.025421142578125, -0.0099029541015625, -0.015960693359375, -0.0335693359375, 0.0309295654296875, 0.026458740234375, -0.05291748046875, -0.036865234375, -0.03350830078125, 0.0228881...
NbAiLab/nb-whisper-large-beta
2023-07-24T18:05:01.000Z
[ "transformers", "pytorch", "tf", "jax", "safetensors", "whisper", "automatic-speech-recognition", "audio", "asr", "hf-asr-leaderboard", "no", "nb", "nn", "en", "dataset:NbAiLab/ncc_speech", "dataset:NbAiLab/NST", "dataset:NbAiLab/NPSC", "arxiv:2212.04356", "arxiv:1910.09700", "...
automatic-speech-recognition
NbAiLab
null
null
NbAiLab/nb-whisper-large-beta
5
674
transformers
2023-07-23T19:31:02
--- license: cc-by-4.0 language: - 'no' - nb - nn - en datasets: - NbAiLab/ncc_speech - NbAiLab/NST - NbAiLab/NPSC tags: - audio - asr - automatic-speech-recognition - hf-asr-leaderboard metrics: - wer - cer library_name: transformers pipeline_tag: automatic-speech-recognition widget: - src: https://datasets-server.huggingface.co/assets/google/fleurs/--/nb_no/train/1/audio/audio.mp3 example_title: FLEURS sample 1 - src: https://datasets-server.huggingface.co/assets/google/fleurs/--/nb_no/train/4/audio/audio.mp3 example_title: FLEURS sample 2 --- # NB-Whisper Large (beta) This is a **_public beta_** of the Norwegian NB-Whisper Large model released by the National Library of Norway. NB-Whisper is a series of models for automatic speech recognition (ASR) and speech translation, building upon the foundation laid by [OpenAI's Whisper](https://arxiv.org/abs/2212.04356). All models are trained on 20,000 hours of labeled data. <center> <figure> <video controls> <source src="https://huggingface.co/NbAiLab/nb-whisper-small-beta/resolve/main/king.mp4" type="video/mp4"> Your browser does not support the video tag. </video> <figcaption><a href="https://www.royalcourt.no/tale.html?tid=137662&sek=28409&scope=27248" target="_blank">Speech given by His Majesty The King of Norway at the garden party hosted by Their Majesties The King and Queen at the Palace Park on 1 September 2016.</a>Transcribed using the Small model.</figcaption> </figure> </center> ## Model Details NB-Whisper models will be available in five different sizes: | Model Size | Parameters | Availability | |------------|------------|--------------| | tiny | 39M | [NB-Whisper Tiny (beta)](https://huggingface.co/NbAiLab/nb-whisper-tiny-beta) | | base | 74M | [NB-Whisper Base (beta)](https://huggingface.co/NbAiLab/nb-whisper-base-beta) | | small | 244M | [NB-Whisper Small (beta)](https://huggingface.co/NbAiLab/nb-whisper-small-beta) | | medium | 769M | [NB-Whisper Medium (beta)](https://huggingface.co/NbAiLab/nb-whisper-medium-beta) | | large | 1550M | [NB-Whisper Large (beta)](https://huggingface.co/NbAiLab/nb-whisper-large-beta) | An official release of NB-Whisper models is planned for the Fall 2023. Please refer to the OpenAI Whisper model card for more details about the backbone model. ### Model Description - **Developed by:** [NB AI-Lab](https://ai.nb.no/) - **Shared by:** [NB AI-Lab](https://ai.nb.no/) - **Model type:** `whisper` - **Language(s) (NLP):** Norwegian, Norwegian Bokmål, Norwegian Nynorsk, English - **License:** [Creative Commons Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) - **Finetuned from model:** [openai/whisper-small](https://huggingface.co/openai/whisper-small) ### Model Sources <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/NbAiLab/nb-whisper/ - **Paper:** _Coming soon_ - **Demo:** http://ai.nb.no/demo/nb-whisper ## Uses ### Direct Use This is a **_public beta_** release. The models published in this repository are intended for a generalist purpose and are available to third parties. ### Downstream Use For Norwegian transcriptions we are confident that this public beta will give you State-of-the-Art results compared to currently available Norwegian ASR models of the same size. However, it is still known to show some hallucinations, as well as a tendency to drop part of the transcript from time to time. Please also note that the transcripts are typically not word by word. Spoken language and written language are often very different, and the model aims to "translate" spoken utterances into grammatically correct written sentences. We strongly believe that the best way to understand these models is to try them yourself. A significant part of the training material comes from TV subtitles. Subtitles often shorten the content to make it easier to read. Typically, non-essential parts of the utterance can be also dropped. In some cases, this is a desired ability, in other cases, this is undesired. The final release of these model will provida a mechanism to control for this beaviour. ## Bias, Risks, and Limitations This is a public beta that is not intended for production. Production use without adequate assessment of risks and mitigation may be considered irresponsible or harmful. These models may have bias and/or any other undesirable distortions. When third parties, deploy or provide systems and/or services to other parties using any of these models (or using systems based on these models) or become users of the models, they should note that it is their responsibility to mitigate the risks arising from their use and, in any event, to comply with applicable regulations, including regulations regarding the use of artificial intelligence. In no event shall the owner of the models (The National Library of Norway) be liable for any results arising from the use made by third parties of these models. ## How to Get Started with the Model Use the code below to get started with the model. ```python from transformers import pipeline asr = pipeline( "automatic-speech-recognition", "NbAiLab/nb-whisper-large-beta" ) asr( "audio.mp3", generate_kwargs={'task': 'transcribe', 'language': 'no'} ) # {'text': ' Så mange anga kører seg i så viktig sak, så vi får du kører det tilbake med. Om kabaret gudam i at vi skal hjælge. Kør seg vi gjør en uda? Nei noe skal å abelistera sonvorne skrifer. Det er sak, så kjent det bare handling i samtatsen til bargører. Trudet første lask. På den å først så å køre og en gange samme, og så får vi gjør å vorte vorte vorte når vi kjent dit.'} ``` Timestamps can also be retrieved by passing in the right parameter. ```python asr( "audio.mp3", generate_kwargs={'task': 'transcribe', 'language': 'no'}, return_timestamps=True, ) # {'text': ' at så mange angar til seg så viktig sak, så vi får jo kjølget klare tilbakemeldingen om hva valget dem gjør at vi skal gjøre. Hva skjer vi gjøre nå da? Nei, nå skal jo administrationen vår skrivferdige sak, så kjem til behandling i samfærdshetshøyvalget, tror det første # r. Først så kan vi ta og henge dem kjemme, og så får vi gjøre vårt valget når vi kommer dit.', # 'chunks': [{'timestamp': (0.0, 5.34), # 'text': ' at så mange angar til seg så viktig sak, så vi får jo kjølget klare tilbakemeldingen om'}, # {'timestamp': (5.34, 8.64), # 'text': ' hva valget dem gjør at vi skal gjøre.'}, # {'timestamp': (8.64, 10.64), 'text': ' Hva skjer vi gjøre nå da?'}, # {'timestamp': (10.64, 17.44), # 'text': ' Nei, nå skal jo administrationen vår skrivferdige sak, så kjem til behandling i samfærdshetshøyvalget,'}, # {'timestamp': (17.44, 19.44), 'text': ' tror det første år.'}, # {'timestamp': (19.44, 23.94), # 'text': ' Først så kan vi ta og henge dem kjemme, og så får vi gjøre vårt valget når vi kommer dit.'}]} ``` ## Training Data Trained data comes from Språkbanken and the digital collection at the National Library of Norway. Training data includes: - NST Norwegian ASR Database (16 kHz), and its corresponding dataset - Transcribed speeches from the Norwegian Parliament produced by Språkbanken - TV broadcast (NRK) subtitles (NLN digital collection) - Audiobooks (NLN digital collection) ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** TPUv4 - **Hours used:** 1,536 - **Cloud Provider:** Google Cloud - **Compute Region:** `us-central1` - **Carbon Emitted:** Total emissions are estimated to be 247.77 kgCO₂ of which 100 percents were directly offset by the cloud provider. #### Software The model is trained using Jax/Flax. The final model is converted to Pytorch, Tensorflow, whisper.cpp and ONXX. Please tell us if you would like future models to be converted to other format. ## Citation & Contributors The development of this model was part of the contributors' professional roles at the National Library of Norway, under the _NoSTram_ project led by _Per Egil Kummervold (PEK)_. The Jax code, dataset loaders, and training scripts were collectively designed by _Javier de la Rosa (JdlR)_, _Freddy Wetjen (FW)_, _Rolv-Arild Braaten (RAB)_, and _PEK_. Primary dataset curation was handled by _FW_, _RAB_, and _PEK_, while _JdlR_ and _PEK_ crafted the documentation. The project was completed under the umbrella of AiLab, directed by _Svein Arne Brygfjeld_. All contributors played a part in shaping the optimal training strategy for the Norwegian ASR model based on the Whisper architecture. _A paper detailing our process and findings is underway!_ ## Acknowledgements Thanks to [Google TPU Research Cloud](https://sites.research.google/trc/about/) for supporting this project with extensive training resources. Thanks to Google Cloud for supporting us with credits for translating large parts of the corpus. A special thanks to [Sanchit Ghandi](https://huggingface.co/sanchit-gandhi) for providing thorough technical advice in debugging and with the work of getting this to train on Google TPUs. A special thanks to Per Erik Solberg at Språkbanken for the collaboration with regard to the Stortinget corpus. ## Contact We are releasing this ASR Whisper model as a public beta to gather constructive feedback on its performance. Please do not hesitate to contact us with any experiences, insights, or suggestions that you may have. Your input is invaluable in helping us to improve the model and ensure that it effectively serves the needs of users. Whether you have technical concerns, usability suggestions, or ideas for future enhancements, we welcome your input. Thank you for participating in this critical stage of our model's development. If you intend to incorporate this model into your research, we kindly request that you reach out to us. We can provide you with the most current status of our upcoming paper, which you can cite to acknowledge and provide context for the work done on this model. Please use this email as the main contact point, it is read by the entire team: <a rel="noopener nofollow" href="mailto:ailab@nb.no">ailab@nb.no</a>
10,509
[ [ -0.0236968994140625, -0.04083251953125, 0.0158538818359375, 0.0285186767578125, -0.0236968994140625, -0.01210784912109375, -0.0209808349609375, -0.046112060546875, 0.0268096923828125, 0.04217529296875, -0.04632568359375, -0.044158935546875, -0.044586181640625, ...
CiroN2022/overprint-effect
2023-08-23T11:52:47.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "license:other", "has_space", "region:us" ]
text-to-image
CiroN2022
null
null
CiroN2022/overprint-effect
1
674
diffusers
2023-08-23T11:52:44
--- license: other tags: - text-to-image - stable-diffusion - lora - diffusers base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: overprint_effect widget: - text: overprint_effect --- # Overprint Effect ![Image 0](2139474.jpeg) None ## Image examples for the model: ![Image 1](2139538.jpeg) ![Image 2](2139472.jpeg) ![Image 3](2139475.jpeg) ![Image 4](2139533.jpeg) ![Image 5](2139477.jpeg) ![Image 6](2139479.jpeg) ![Image 7](2139543.jpeg) ![Image 8](2139470.jpeg) ![Image 9](2139481.jpeg)
526
[ [ -0.0350341796875, -0.0204010009765625, 0.0543212890625, 0.020233154296875, -0.044158935546875, -0.0126953125, 0.053436279296875, -0.0277099609375, 0.0179901123046875, 0.0703125, -0.02423095703125, -0.022430419921875, -0.044158935546875, -0.022552490234375, ...
jeffwan/mmarco-mMiniLMv2-L12-H384-v1
2023-09-15T18:17:35.000Z
[ "transformers", "pytorch", "xlm-roberta", "text-classification", "en", "ar", "zh", "nl", "fr", "de", "hi", "in", "it", "ja", "pt", "ru", "es", "vi", "multilingual", "dataset:unicamp-dl/mmarco", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
jeffwan
null
null
jeffwan/mmarco-mMiniLMv2-L12-H384-v1
0
674
transformers
2023-09-15T17:49:50
--- license: apache-2.0 language: - en - ar - zh - nl - fr - de - hi - in - it - ja - pt - ru - es - vi - multilingual datasets: - unicamp-dl/mmarco --- # Cross-Encoder for multilingual MS Marco This model was trained on the [MMARCO](https://hf.co/unicamp-dl/mmarco) dataset. It is a machine translated version of MS MARCO using Google Translate. It was translated to 14 languages. In our experiments, we observed that it performs also well for other languages. As a base model, we used the [multilingual MiniLMv2](https://huggingface.co/nreimers/mMiniLMv2-L12-H384-distilled-from-XLMR-Large) model. The model can be used for Information Retrieval: Given a query, encode the query will all possible passages (e.g. retrieved with ElasticSearch). Then sort the passages in a decreasing order. See [SBERT.net Retrieve & Re-rank](https://www.sbert.net/examples/applications/retrieve_rerank/README.html) for more details. The training code is available here: [SBERT.net Training MS Marco](https://github.com/UKPLab/sentence-transformers/tree/master/examples/training/ms_marco) ## Usage with SentenceTransformers The usage becomes easy when you have [SentenceTransformers](https://www.sbert.net/) installed. Then, you can use the pre-trained models like this: ```python from sentence_transformers import CrossEncoder model = CrossEncoder('model_name') scores = model.predict([('Query', 'Paragraph1'), ('Query', 'Paragraph2') , ('Query', 'Paragraph3')]) ``` ## Usage with Transformers ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model = AutoModelForSequenceClassification.from_pretrained('model_name') tokenizer = AutoTokenizer.from_pretrained('model_name') features = tokenizer(['How many people live in Berlin?', 'How many people live in Berlin?'], ['Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.', 'New York City is famous for the Metropolitan Museum of Art.'], padding=True, truncation=True, return_tensors="pt") model.eval() with torch.no_grad(): scores = model(**features).logits print(scores) ```
2,130
[ [ -0.019256591796875, -0.039215087890625, 0.0269012451171875, 0.027252197265625, -0.01090240478515625, -0.007053375244140625, -0.0252838134765625, -0.0243072509765625, 0.0173187255859375, 0.031768798828125, -0.03155517578125, -0.04345703125, -0.051727294921875, ...
voidful/dpr-ctx_encoder-bert-base-multilingual
2021-02-21T09:00:44.000Z
[ "transformers", "pytorch", "dpr", "multilingual", "dataset:NQ", "dataset:Trivia", "dataset:SQuAD", "dataset:MLQA", "dataset:DRCD", "arxiv:2004.04906", "endpoints_compatible", "region:us" ]
null
voidful
null
null
voidful/dpr-ctx_encoder-bert-base-multilingual
6
673
transformers
2022-03-02T23:29:05
--- language: multilingual datasets: - NQ - Trivia - SQuAD - MLQA - DRCD --- # dpr-ctx_encoder-bert-base-multilingual ## Description Multilingual DPR Model base on bert-base-multilingual-cased. [DPR model](https://arxiv.org/abs/2004.04906) [DPR repo](https://github.com/facebookresearch/DPR) ## Data 1. [NQ](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 2. [Trivia](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 3. [SQuAD](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 4. [DRCD*](https://github.com/DRCKnowledgeTeam/DRCD) 5. [MLQA*](https://github.com/facebookresearch/MLQA) `question pairs for train`: 644,217 `question pairs for dev`: 73,710 *DRCD and MLQA are converted using script from haystack [squad_to_dpr.py](https://github.com/deepset-ai/haystack/blob/master/haystack/retriever/squad_to_dpr.py) ## Training Script I use the script from [haystack](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial9_DPR_training.ipynb) ## Usage ```python from transformers import DPRContextEncoder, DPRContextEncoderTokenizer tokenizer = DPRContextEncoderTokenizer.from_pretrained('voidful/dpr-ctx_encoder-bert-base-multilingual') model = DPRContextEncoder.from_pretrained('voidful/dpr-ctx_encoder-bert-base-multilingual') input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"] embeddings = model(input_ids).pooler_output ``` Follow the tutorial from `haystack`: [Better Retrievers via "Dense Passage Retrieval"](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial6_Better_Retrieval_via_DPR.ipynb) ``` from haystack.retriever.dense import DensePassageRetriever retriever = DensePassageRetriever(document_store=document_store, query_embedding_model="voidful/dpr-question_encoder-bert-base-multilingual", passage_embedding_model="voidful/dpr-ctx_encoder-bert-base-multilingual", max_seq_len_query=64, max_seq_len_passage=256, batch_size=16, use_gpu=True, embed_title=True, use_fast_tokenizers=True) ```
2,375
[ [ -0.018463134765625, -0.052490234375, 0.01947021484375, 0.028167724609375, -0.01377105712890625, 0.00765228271484375, -0.035186767578125, -0.007495880126953125, 0.01366424560546875, 0.00653076171875, -0.051025390625, -0.0396728515625, -0.032684326171875, 0.01...
AhmedSSoliman/MarianCG-CoNaLa-Large
2023-07-30T11:58:54.000Z
[ "transformers", "pytorch", "safetensors", "marian", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text2text-generation
AhmedSSoliman
null
null
AhmedSSoliman/MarianCG-CoNaLa-Large
0
673
transformers
2022-05-24T22:50:16
--- widget: - text: "create array containing the maximum value of respective elements of array `[2, 3, 4]` and array `[1, 5, 2]" - text: "check if all elements in list `mylist` are identical" - text: "enable debug mode on flask application `app`" - text: "getting the length of `my_tuple`" - text: 'find all files in directory "/mydir" with extension ".txt"' --- ``` ``` [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/mariancg-a-code-generation-transformer-model/code-generation-on-conala)](https://paperswithcode.com/sota/code-generation-on-conala?p=mariancg-a-code-generation-transformer-model) ``` ``` # MarianCG: a code generation transformer model inspired by machine translation This model is to improve the solving of the code generation problem and implement a transformer model that can work with high accurate results. We implemented MarianCG transformer model which is a code generation model that can be able to generate code from natural language. This work declares the impact of using Marian machine translation model for solving the problem of code generation. In our implementation, we prove that a machine translation model can be operated and working as a code generation model. Finally, we set the new contributors and state-of-the-art on CoNaLa reaching a BLEU score of 30.92 and Exact Match Accuracy of 6.2 in the code generation problem with CoNaLa dataset. MarianCG model and its implemetation with the code of training and the generated output is available at this repository: https://github.com/AhmedSSoliman/MarianCG-NL-to-Code CoNaLa Dataset for Code Generation is available at https://huggingface.co/datasets/AhmedSSoliman/CoNaLa-Large This is the model is avialable on the huggingface hub https://huggingface.co/AhmedSSoliman/MarianCG-CoNaLa-Large ```python # Model and Tokenizer from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # model_name = "AhmedSSoliman/MarianCG-NL-to-Code" model = AutoModelForSeq2SeqLM.from_pretrained("AhmedSSoliman/MarianCG-CoNaLa-Large") tokenizer = AutoTokenizer.from_pretrained("AhmedSSoliman/MarianCG-CoNaLa-Large") # Input (Natural Language) and Output (Python Code) NL_input = "create array containing the maximum value of respective elements of array `[2, 3, 4]` and array `[1, 5, 2]" output = model.generate(**tokenizer(NL_input, padding="max_length", truncation=True, max_length=512, return_tensors="pt")) output_code = tokenizer.decode(output[0], skip_special_tokens=True) ``` This model is available in spaces using gradio at: https://huggingface.co/spaces/AhmedSSoliman/MarianCG-CoNaLa-Large --- Tasks: - Translation - Code Generation - Text2Text Generation - Text Generation --- # Citation We now have a [paper](https://doi.org/10.1186/s44147-022-00159-4) for this work and you can cite: ``` @article{soliman2022mariancg, title={MarianCG: a code generation transformer model inspired by machine translation}, author={Soliman, Ahmed S and Hadhoud, Mayada M and Shaheen, Samir I}, journal={Journal of Engineering and Applied Science}, volume={69}, number={1}, pages={1--23}, year={2022}, publisher={SpringerOpen} url={https://doi.org/10.1186/s44147-022-00159-4} } ```
3,216
[ [ -0.016754150390625, -0.047882080078125, -0.0029811859130859375, 0.03570556640625, -0.0260467529296875, -0.0061492919921875, -0.0168914794921875, -0.0304718017578125, 0.0014963150024414062, 0.024566650390625, -0.040924072265625, -0.036346435546875, -0.06451416015...
facebook/unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur
2022-10-13T21:28:24.000Z
[ "fairseq", "audio", "text-to-speech", "en", "dataset:mtedx", "dataset:covost2", "dataset:europarl_st", "dataset:voxpopuli", "arxiv:2204.02967", "license:cc-by-nc-4.0", "has_space", "region:us" ]
text-to-speech
facebook
null
null
facebook/unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur
17
673
fairseq
2022-08-31T21:50:19
--- license: cc-by-nc-4.0 library_name: fairseq task: text-to-speech tags: - fairseq - audio - text-to-speech language: en datasets: - mtedx - covost2 - europarl_st - voxpopuli widget: - example_title: Common Voice sample 1 src: https://huggingface.co/facebook/xm_transformer_600m-es_en-multi_domain/resolve/main/common_voice_es_19966634.flac --- ## unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur Speech-to-speech translation model from fairseq S2UT ([paper](https://arxiv.org/abs/2204.02967)/[code](https://github.com/facebookresearch/fairseq/blob/main/examples/speech_to_speech/docs/enhanced_direct_s2st_discrete_units.md)): - Spanish-English - Trained on mTEDx, CoVoST 2, Europarl-ST and VoxPopuli ## Usage ```python import json import os from pathlib import Path import IPython.display as ipd from fairseq import hub_utils from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub from fairseq.models.speech_to_text.hub_interface import S2THubInterface from fairseq.models.text_to_speech import CodeHiFiGANVocoder from fairseq.models.text_to_speech.hub_interface import VocoderHubInterface from huggingface_hub import snapshot_download import torchaudio cache_dir = os.getenv("HUGGINGFACE_HUB_CACHE") #models, cfg, task = load_model_ensemble_and_task_from_hf_hub( # "facebook/xm_transformer_s2ut_800m-es-en-st-asr-bt_h1_2022", # arg_overrides={"config_yaml": "config.yaml", "task": "speech_to_text"}, # cache_dir=cache_dir, # ) # model = models[0].cpu() # cfg["task"].cpu = True # generator = task.build_generator([model], cfg) # # requires 16000Hz mono channel audio # audio, _ = torchaudio.load("/Users/lpw/git/api-inference-community/docker_images/fairseq/tests/samples/sample2.flac") # sample = S2THubInterface.get_model_input(task, audio) # unit = S2THubInterface.get_prediction(task, model, generator, sample) # speech synthesis library_name = "fairseq" cache_dir = ( cache_dir or (Path.home() / ".cache" / library_name).as_posix() ) cache_dir = snapshot_download( f"facebook/unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur", cache_dir=cache_dir, library_name=library_name ) x = hub_utils.from_pretrained( cache_dir, "model.pt", ".", archive_map=CodeHiFiGANVocoder.hub_models(), config_yaml="config.json", fp16=False, is_vocoder=True, ) with open(f"{x['args']['data']}/config.json") as f: vocoder_cfg = json.load(f) assert ( len(x["args"]["model_path"]) == 1 ), "Too many vocoder models in the input" vocoder = CodeHiFiGANVocoder(x["args"]["model_path"][0], vocoder_cfg) tts_model = VocoderHubInterface(vocoder_cfg, vocoder) tts_sample = tts_model.get_model_input(unit) wav, sr = tts_model.get_prediction(tts_sample) ipd.Audio(wav, rate=sr) ```
2,792
[ [ -0.032928466796875, -0.04736328125, 0.0172271728515625, 0.0215911865234375, -0.005218505859375, -0.0147552490234375, -0.020172119140625, -0.01462554931640625, -0.01268768310546875, 0.03118896484375, -0.052947998046875, -0.038055419921875, -0.04248046875, -0....
timm/convnext_large_mlp.laion2b_ft_augreg_inat21
2023-10-25T20:22:52.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "arxiv:2303.11331", "arxiv:2304.14108", "arxiv:2212.07143", "license:cc-by-nc-4.0", "region:us" ]
image-classification
timm
null
null
timm/convnext_large_mlp.laion2b_ft_augreg_inat21
0
672
timm
2023-07-27T21:50:22
--- tags: - image-classification - timm library_name: timm license: cc-by-nc-4.0 --- # Model card for convnext_large_mlp.laion2b_ft_augreg_inat21 Part of a series of `timm` fine-tune experiments on iNaturalist 2021 competition data (https://github.com/visipedia/inat_comp/tree/master/2021) for higher capacity models. Covering 10,000 species, this dataset and these models are fun to explore via the classification widget with pictures from your backyard, but quite a bit smaller than models you can find on iNaturalist website (https://www.inaturalist.org/blog/75633-a-new-computer-vision-model-v2-1-including-1-770-new-taxa). No extra meta-data was used for training these models (as was the case for the competition), it was a straightfoward fine-tune to explore differences in model pretrain data. | Model | Top-1 | Top-5 | Img Size (Train) | Paper | |-------|-------|-------|----------|-------| | [eva02_large_patch14_clip_336.merged2b_ft_inat21](https://huggingface.co/timm/eva02_large_patch14_clip_336.merged2b_ft_inat21) | 92.05 | 98.01 | 336 | https://arxiv.org/abs/2303.11331 | | [vit_large_patch14_clip_336.datacompxl_ft_augreg_inat21](https://huggingface.co/timm/vit_large_patch14_clip_336.datacompxl_ft_augreg_inat21) | 91.98 | 98.03 | 336 | https://arxiv.org/abs/2304.14108 | | [vit_large_patch14_clip_336.laion2b_ft_augreg_inat21](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_augreg_inat21) | 91.48 | 97.89 | 336 | https://arxiv.org/abs/2212.07143 | | [convnext_large_mlp.laion2b_ft_augreg_inat21](https://huggingface.co/timm/convnext_large_mlp.laion2b_ft_augreg_inat21) | 90.95 | 97.68 | 448 (384) | | | [vit_large_patch14_clip_336.datacompxl_ft_inat21](https://huggingface.co/timm/vit_large_patch14_clip_336.datacompxl_ft_inat21) | 90.85 | 97.68 | 336 | https://arxiv.org/abs/2304.14108 | | [convnext_large_mlp.laion2b_ft_augreg_inat21](https://huggingface.co/timm/convnext_large_mlp.laion2b_ft_augreg_inat21) | 90.62 | 97.61 | 384 | | | [vit_large_patch14_clip_336.laion2b_ft_in12k_in1k_inat21](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k_inat21) | 90.29 | 97.44 | 336 | https://arxiv.org/abs/2212.07143 | ## Run Validation ``` python validate.py /tfds/ --dataset tfds/i_naturalist2021 --model hf-hub:timm/convnext_large_mlp.laion2b_ft_augreg_inat21 --split val --amp ``` ## Citation ```bibtex @inproceedings{cherti2023reproducible, title={Reproducible scaling laws for contrastive language-image learning}, author={Cherti, Mehdi and Beaumont, Romain and Wightman, Ross and Wortsman, Mitchell and Ilharco, Gabriel and Gordon, Cade and Schuhmann, Christoph and Schmidt, Ludwig and Jitsev, Jenia}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={2818--2829}, year={2023} } ```
2,821
[ [ -0.053558349609375, -0.042022705078125, 0.001682281494140625, 0.032257080078125, -0.030975341796875, -0.0239105224609375, -0.0199737548828125, -0.041595458984375, 0.041259765625, 0.0205535888671875, -0.04400634765625, -0.034271240234375, -0.0369873046875, 0....
unikei/bert-base-smiles
2023-09-28T15:40:03.000Z
[ "transformers", "safetensors", "bert", "pretraining", "fill-mask", "dataset:ChEMBL", "license:bigscience-openrail-m", "endpoints_compatible", "region:us" ]
fill-mask
unikei
null
null
unikei/bert-base-smiles
0
672
transformers
2023-09-14T10:23:48
--- license: bigscience-openrail-m widget: - text: O=C([C@@H](c1ccc(cc1)O)N)[MASK][C@@H]1C(=O)N2[C@@H]1SC([C@@H]2C(=O)O)(C)C datasets: - ChEMBL pipeline_tag: fill-mask --- # BERT base for SMILES This is bidirectional transformer pretrained on SMILES (simplified molecular-input line-entry system) strings. Example: Amoxicillin ``` O=C([C@@H](c1ccc(cc1)O)N)N[C@@H]1C(=O)N2[C@@H]1SC([C@@H]2C(=O)O)(C)C ``` Two training objectives were used: 1. masked language modeling 2. molecular-formula validity prediction ## Intended uses This model is primarily aimed at being fine-tuned on the following tasks: - molecule classification - molecule-to-gene-expression mapping - cell targeting ## How to use in your code ```python from transformers import BertTokenizerFast, BertModel checkpoint = 'unikei/bert-base-smiles' tokenizer = BertTokenizerFast.from_pretrained(checkpoint) model = BertModel.from_pretrained(checkpoint) example = 'O=C([C@@H](c1ccc(cc1)O)N)N[C@@H]1C(=O)N2[C@@H]1SC([C@@H]2C(=O)O)(C)C' tokens = tokenizer(example, return_tensors='pt') predictions = model(**tokens) ```
1,086
[ [ -0.00803375244140625, -0.0428466796875, 0.0307159423828125, 0.01018524169921875, -0.0242919921875, 0.00261688232421875, -0.018585205078125, 0.0009374618530273438, 0.01438140869140625, 0.0169525146484375, -0.0738525390625, -0.040069580078125, -0.05706787109375, ...
cross-encoder/ms-marco-TinyBERT-L-4
2021-08-05T08:39:59.000Z
[ "transformers", "pytorch", "jax", "bert", "text-classification", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
cross-encoder
null
null
cross-encoder/ms-marco-TinyBERT-L-4
1
671
transformers
2022-03-02T23:29:05
--- license: apache-2.0 --- # Cross-Encoder for MS Marco This model was trained on the [MS Marco Passage Ranking](https://github.com/microsoft/MSMARCO-Passage-Ranking) task. The model can be used for Information Retrieval: Given a query, encode the query will all possible passages (e.g. retrieved with ElasticSearch). Then sort the passages in a decreasing order. See [SBERT.net Retrieve & Re-rank](https://www.sbert.net/examples/applications/retrieve_rerank/README.html) for more details. The training code is available here: [SBERT.net Training MS Marco](https://github.com/UKPLab/sentence-transformers/tree/master/examples/training/ms_marco) ## Usage with Transformers ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model = AutoModelForSequenceClassification.from_pretrained('model_name') tokenizer = AutoTokenizer.from_pretrained('model_name') features = tokenizer(['How many people live in Berlin?', 'How many people live in Berlin?'], ['Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.', 'New York City is famous for the Metropolitan Museum of Art.'], padding=True, truncation=True, return_tensors="pt") model.eval() with torch.no_grad(): scores = model(**features).logits print(scores) ``` ## Usage with SentenceTransformers The usage becomes easier when you have [SentenceTransformers](https://www.sbert.net/) installed. Then, you can use the pre-trained models like this: ```python from sentence_transformers import CrossEncoder model = CrossEncoder('model_name', max_length=512) scores = model.predict([('Query', 'Paragraph1'), ('Query', 'Paragraph2') , ('Query', 'Paragraph3')]) ``` ## Performance In the following table, we provide various pre-trained Cross-Encoders together with their performance on the [TREC Deep Learning 2019](https://microsoft.github.io/TREC-2019-Deep-Learning/) and the [MS Marco Passage Reranking](https://github.com/microsoft/MSMARCO-Passage-Ranking/) dataset. | Model-Name | NDCG@10 (TREC DL 19) | MRR@10 (MS Marco Dev) | Docs / Sec | | ------------- |:-------------| -----| --- | | **Version 2 models** | | | | cross-encoder/ms-marco-TinyBERT-L-2-v2 | 69.84 | 32.56 | 9000 | cross-encoder/ms-marco-MiniLM-L-2-v2 | 71.01 | 34.85 | 4100 | cross-encoder/ms-marco-MiniLM-L-4-v2 | 73.04 | 37.70 | 2500 | cross-encoder/ms-marco-MiniLM-L-6-v2 | 74.30 | 39.01 | 1800 | cross-encoder/ms-marco-MiniLM-L-12-v2 | 74.31 | 39.02 | 960 | **Version 1 models** | | | | cross-encoder/ms-marco-TinyBERT-L-2 | 67.43 | 30.15 | 9000 | cross-encoder/ms-marco-TinyBERT-L-4 | 68.09 | 34.50 | 2900 | cross-encoder/ms-marco-TinyBERT-L-6 | 69.57 | 36.13 | 680 | cross-encoder/ms-marco-electra-base | 71.99 | 36.41 | 340 | **Other models** | | | | nboost/pt-tinybert-msmarco | 63.63 | 28.80 | 2900 | nboost/pt-bert-base-uncased-msmarco | 70.94 | 34.75 | 340 | nboost/pt-bert-large-msmarco | 73.36 | 36.48 | 100 | Capreolus/electra-base-msmarco | 71.23 | 36.89 | 340 | amberoad/bert-multilingual-passage-reranking-msmarco | 68.40 | 35.54 | 330 | sebastian-hofstaetter/distilbert-cat-margin_mse-T2-msmarco | 72.82 | 37.88 | 720 Note: Runtime was computed on a V100 GPU.
3,233
[ [ -0.03228759765625, -0.043670654296875, 0.0250396728515625, 0.01168060302734375, -0.0127105712890625, 0.01073455810546875, -0.01338958740234375, -0.038543701171875, 0.025146484375, 0.0255889892578125, -0.041229248046875, -0.051055908203125, -0.058013916015625, ...
google/bert_uncased_L-12_H-128_A-2
2021-05-19T17:26:01.000Z
[ "transformers", "pytorch", "jax", "bert", "arxiv:1908.08962", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
google
null
null
google/bert_uncased_L-12_H-128_A-2
0
670
transformers
2022-03-02T23:29:05
--- thumbnail: https://huggingface.co/front/thumbnails/google.png license: apache-2.0 --- BERT Miniatures === This is the set of 24 BERT models referenced in [Well-Read Students Learn Better: On the Importance of Pre-training Compact Models](https://arxiv.org/abs/1908.08962) (English only, uncased, trained with WordPiece masking). We have shown that the standard BERT recipe (including model architecture and training objective) is effective on a wide range of model sizes, beyond BERT-Base and BERT-Large. The smaller BERT models are intended for environments with restricted computational resources. They can be fine-tuned in the same manner as the original BERT models. However, they are most effective in the context of knowledge distillation, where the fine-tuning labels are produced by a larger and more accurate teacher. Our goal is to enable research in institutions with fewer computational resources and encourage the community to seek directions of innovation alternative to increasing model capacity. You can download the 24 BERT miniatures either from the [official BERT Github page](https://github.com/google-research/bert/), or via HuggingFace from the links below: | |H=128|H=256|H=512|H=768| |---|:---:|:---:|:---:|:---:| | **L=2** |[**2/128 (BERT-Tiny)**][2_128]|[2/256][2_256]|[2/512][2_512]|[2/768][2_768]| | **L=4** |[4/128][4_128]|[**4/256 (BERT-Mini)**][4_256]|[**4/512 (BERT-Small)**][4_512]|[4/768][4_768]| | **L=6** |[6/128][6_128]|[6/256][6_256]|[6/512][6_512]|[6/768][6_768]| | **L=8** |[8/128][8_128]|[8/256][8_256]|[**8/512 (BERT-Medium)**][8_512]|[8/768][8_768]| | **L=10** |[10/128][10_128]|[10/256][10_256]|[10/512][10_512]|[10/768][10_768]| | **L=12** |[12/128][12_128]|[12/256][12_256]|[12/512][12_512]|[**12/768 (BERT-Base)**][12_768]| Note that the BERT-Base model in this release is included for completeness only; it was re-trained under the same regime as the original model. Here are the corresponding GLUE scores on the test set: |Model|Score|CoLA|SST-2|MRPC|STS-B|QQP|MNLI-m|MNLI-mm|QNLI(v2)|RTE|WNLI|AX| |---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| |BERT-Tiny|64.2|0.0|83.2|81.1/71.1|74.3/73.6|62.2/83.4|70.2|70.3|81.5|57.2|62.3|21.0| |BERT-Mini|65.8|0.0|85.9|81.1/71.8|75.4/73.3|66.4/86.2|74.8|74.3|84.1|57.9|62.3|26.1| |BERT-Small|71.2|27.8|89.7|83.4/76.2|78.8/77.0|68.1/87.0|77.6|77.0|86.4|61.8|62.3|28.6| |BERT-Medium|73.5|38.0|89.6|86.6/81.6|80.4/78.4|69.6/87.9|80.0|79.1|87.7|62.2|62.3|30.5| For each task, we selected the best fine-tuning hyperparameters from the lists below, and trained for 4 epochs: - batch sizes: 8, 16, 32, 64, 128 - learning rates: 3e-4, 1e-4, 5e-5, 3e-5 If you use these models, please cite the following paper: ``` @article{turc2019, title={Well-Read Students Learn Better: On the Importance of Pre-training Compact Models}, author={Turc, Iulia and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina}, journal={arXiv preprint arXiv:1908.08962v2 }, year={2019} } ``` [2_128]: https://huggingface.co/google/bert_uncased_L-2_H-128_A-2 [2_256]: https://huggingface.co/google/bert_uncased_L-2_H-256_A-4 [2_512]: https://huggingface.co/google/bert_uncased_L-2_H-512_A-8 [2_768]: https://huggingface.co/google/bert_uncased_L-2_H-768_A-12 [4_128]: https://huggingface.co/google/bert_uncased_L-4_H-128_A-2 [4_256]: https://huggingface.co/google/bert_uncased_L-4_H-256_A-4 [4_512]: https://huggingface.co/google/bert_uncased_L-4_H-512_A-8 [4_768]: https://huggingface.co/google/bert_uncased_L-4_H-768_A-12 [6_128]: https://huggingface.co/google/bert_uncased_L-6_H-128_A-2 [6_256]: https://huggingface.co/google/bert_uncased_L-6_H-256_A-4 [6_512]: https://huggingface.co/google/bert_uncased_L-6_H-512_A-8 [6_768]: https://huggingface.co/google/bert_uncased_L-6_H-768_A-12 [8_128]: https://huggingface.co/google/bert_uncased_L-8_H-128_A-2 [8_256]: https://huggingface.co/google/bert_uncased_L-8_H-256_A-4 [8_512]: https://huggingface.co/google/bert_uncased_L-8_H-512_A-8 [8_768]: https://huggingface.co/google/bert_uncased_L-8_H-768_A-12 [10_128]: https://huggingface.co/google/bert_uncased_L-10_H-128_A-2 [10_256]: https://huggingface.co/google/bert_uncased_L-10_H-256_A-4 [10_512]: https://huggingface.co/google/bert_uncased_L-10_H-512_A-8 [10_768]: https://huggingface.co/google/bert_uncased_L-10_H-768_A-12 [12_128]: https://huggingface.co/google/bert_uncased_L-12_H-128_A-2 [12_256]: https://huggingface.co/google/bert_uncased_L-12_H-256_A-4 [12_512]: https://huggingface.co/google/bert_uncased_L-12_H-512_A-8 [12_768]: https://huggingface.co/google/bert_uncased_L-12_H-768_A-12
4,617
[ [ -0.053558349609375, -0.03546142578125, 0.02392578125, 0.0131683349609375, -0.02374267578125, -0.016937255859375, -0.0239715576171875, -0.031219482421875, 0.04376220703125, -0.0060882568359375, -0.06103515625, -0.030670166015625, -0.05206298828125, -0.0019273...
potsawee/longformer-large-4096-answerable-squad2
2023-03-17T00:06:57.000Z
[ "transformers", "pytorch", "longformer", "text-classification", "en", "dataset:squad_v2", "arxiv:2303.08896", "license:apache-2.0", "region:us" ]
text-classification
potsawee
null
null
potsawee/longformer-large-4096-answerable-squad2
3
670
transformers
2023-03-16T23:42:45
--- license: apache-2.0 datasets: - squad_v2 language: - en library_name: transformers pipeline_tag: text-classification inference: false --- # longformer-large-4096 fine-tuned to SQuAD2.0 for answerability score This model determines whether the question is answerable (or unanswerable) given the context. The output is a probability where values close to 0.0 indicate that the question is unanswerable and values close to 1.0 means answerable. - Input: `question` and `context` - Output: `probability` (i.e. logit -> sigmoid) ## Model Details longformer-large-4096 model is fine-tuned to the SQuAD2.0 dataset where the input is a concatenation of ```question + context```. Due to class imbalance in SQuAD2.0, we resample such that the model is trained on a 50/50 split between answerable and unanswerable samples in SQuAD2.0. ## How to Use the Model Use the code below to get started with the model. ```python >>> import torch >>> from transformers import LongformerTokenizer, LongformerForSequenceClassification >>> tokenizer = LongformerTokenizer.from_pretrained("potsawee/longformer-large-4096-answerable-squad2") >>> model = LongformerForSequenceClassification.from_pretrained("potsawee/longformer-large-4096-answerable-squad2") >>> context = """ British government ministers have been banned from using Chinese-owned social media app TikTok on their work phones and devices on security grounds. The government fears sensitive data held on official phones could be accessed by the Chinese government. Cabinet Minister Oliver Dowden said the ban was a "precautionary" move but would come into effect immediately. """.replace("\n", " ").strip() >>> question1 = "Which application have been banned by the British government?" >>> input_text1 = question1 + ' ' + tokenizer.sep_token + ' ' + context >>> inputs1 = tokenizer(input_text1, max_length=4096, truncation=True, return_tensors="pt") >>> prob1 = torch.sigmoid(model(**inputs1).logits.squeeze(-1)) >>> print("P(answerable|question1, context) = {:.2f}%".format(prob1.item()*100)) P(answerable|question1, context) = 99.21% # highly answerable >>> question2 = "Is Facebook popular among young students in America?" >>> input_text2 = question2 + ' ' + tokenizer.sep_token + ' ' + context >>> inputs2 = tokenizer(input_text2, max_length=4096, truncation=True, return_tensors="pt") >>> prob2 = torch.sigmoid(model(**inputs2).logits.squeeze(-1)) >>> print("P(answerable|question2, context) = {:.2f}%".format(prob2.item()*100)) P(answerable|question2, context) = 2.53% # highly unanswerable ``` ## Citation ```bibtex @misc{manakul2023selfcheckgpt, title={SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models}, author={Potsawee Manakul and Adian Liusie and Mark J. F. Gales}, year={2023}, eprint={2303.08896}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
2,918
[ [ -0.023895263671875, -0.06640625, 0.0269927978515625, 0.0252838134765625, -0.01320648193359375, -0.0038776397705078125, -0.018524169921875, -0.029449462890625, 0.0175933837890625, 0.032440185546875, -0.03863525390625, -0.027679443359375, -0.0301361083984375, ...
digiplay/BrickAndMortarMix_v2.0_diffusers
2023-07-15T08:35:28.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:other", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
digiplay
null
null
digiplay/BrickAndMortarMix_v2.0_diffusers
4
670
diffusers
2023-06-17T09:57:37
--- license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- Model info: (pls apply VAE) https://civitai.com/models/83867/brickandmortarmix Original Author's DEMO image : ![](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/0dd8782d-c16b-4798-9759-274b3f1fceee/width=1024/02.jpeg) Sample image I made : for diffusers, I recommend to use low scale just 3 for better image generated result. ![](https://cdn-uploads.huggingface.co/production/uploads/646c83c871d0c8a6e4455854/jwRDgMIUhMDeJAolCQBkN.png)
565
[ [ -0.032257080078125, -0.048095703125, 0.020050048828125, 0.045318603515625, -0.0072479248046875, -0.0284881591796875, 0.0193023681640625, 0.006618499755859375, 0.00897979736328125, 0.0299072265625, -0.0189056396484375, -0.018798828125, -0.0172882080078125, -0...
stablediffusionapi/cosmic-babes
2023-05-15T00:48:19.000Z
[ "diffusers", "stablediffusionapi.com", "stable-diffusion-api", "text-to-image", "ultra-realistic", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
stablediffusionapi
null
null
stablediffusionapi/cosmic-babes
1
669
diffusers
2023-05-15T00:27:10
--- license: creativeml-openrail-m tags: - stablediffusionapi.com - stable-diffusion-api - text-to-image - ultra-realistic pinned: true --- # Cosmic Babes API Inference ![generated from stablediffusionapi.com](https://cdn.stablediffusionapi.com/generations/18919208401684110544.png) ## Get API Key Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed. Replace Key in below code, change **model_id** to "cosmic-babes" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs) Model link: [View model](https://stablediffusionapi.com/models/cosmic-babes) Credits: [View credits](https://civitai.com/?query=Cosmic%20Babes) View all models: [View Models](https://stablediffusionapi.com/models) import requests import json url = "https://stablediffusionapi.com/api/v3/dreambooth" payload = json.dumps({ "key": "", "model_id": "cosmic-babes", "prompt": "actual 8K portrait photo of gareth person, portrait, happy colors, bright eyes, clear eyes, warm smile, smooth soft skin, big dreamy eyes, beautiful intricate colored hair, symmetrical, anime wide eyes, soft lighting, detailed face, by makoto shinkai, stanley artgerm lau, wlop, rossdraws, concept art, digital painting, looking into camera", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
2,399
[ [ -0.030517578125, -0.0716552734375, 0.031646728515625, 0.029693603515625, -0.0269775390625, 0.0066375732421875, 0.0202484130859375, -0.03240966796875, 0.0401611328125, 0.046142578125, -0.06591796875, -0.066162109375, -0.02459716796875, 0.000010788440704345703...
eugenesiow/drln
2021-09-20T01:00:50.000Z
[ "transformers", "DRLN", "super-image", "image-super-resolution", "dataset:eugenesiow/Div2k", "dataset:eugenesiow/Set5", "dataset:eugenesiow/Set14", "dataset:eugenesiow/BSD100", "dataset:eugenesiow/Urban100", "arxiv:1906.12021", "arxiv:2104.07566", "license:apache-2.0", "endpoints_compatible"...
null
eugenesiow
null
null
eugenesiow/drln
2
668
transformers
2022-03-02T23:29:05
--- license: apache-2.0 tags: - super-image - image-super-resolution datasets: - eugenesiow/Div2k - eugenesiow/Set5 - eugenesiow/Set14 - eugenesiow/BSD100 - eugenesiow/Urban100 metrics: - pnsr - ssim --- # Densely Residual Laplacian Super-Resolution (DRLN) DRLN model pre-trained on DIV2K (800 images training, augmented to 4000 images, 100 images validation) for 2x, 3x and 4x image super resolution. It was introduced in the paper [Densely Residual Laplacian Super-resolution](https://arxiv.org/abs/1906.12021) by Anwar et al. (2020) and first released in [this repository](https://github.com/saeed-anwar/DRLN). The goal of image super resolution is to restore a high resolution (HR) image from a single low resolution (LR) image. The image below shows the ground truth (HR), the bicubic upscaling and model upscaling. ![Comparing Bicubic upscaling against the models x4 upscaling on Set5 Image 4](images/drln_4_4_compare.png "Comparing Bicubic upscaling against the models x4 upscaling on Set5 Image 4") ## Model description Super-Resolution convolutional neural networks have recently demonstrated high-quality restoration for single images. However, existing algorithms often require very deep architectures and long training times. Furthermore, current convolutional neural networks for super-resolution are unable to exploit features at multiple scales and weigh them equally, limiting their learning capability. In this exposition, we present a compact and accurate super-resolution algorithm namely, Densely Residual Laplacian Network (DRLN). The proposed network employs cascading residual on the residual structure to allow the flow of low-frequency information to focus on learning high and mid-level features. In addition, deep supervision is achieved via the densely concatenated residual blocks settings, which also helps in learning from high-level complex features. Moreover, we propose Laplacian attention to model the crucial features to learn the inter and intra-level dependencies between the feature maps. Furthermore, comprehensive quantitative and qualitative evaluations on low-resolution, noisy low-resolution, and real historical image benchmark datasets illustrate that our DRLN algorithm performs favorably against the state-of-the-art methods visually and accurately. ## Intended uses & limitations You can use the pre-trained models for upscaling your images 2x, 3x and 4x. You can also use the trainer to train a model on your own dataset. ### How to use The model can be used with the [super_image](https://github.com/eugenesiow/super-image) library: ```bash pip install super-image ``` Here is how to use a pre-trained model to upscale your image: ```python from super_image import DrlnModel, ImageLoader from PIL import Image import requests url = 'https://paperswithcode.com/media/datasets/Set5-0000002728-07a9793f_zA3bDjj.jpg' image = Image.open(requests.get(url, stream=True).raw) model = DrlnModel.from_pretrained('eugenesiow/drln', scale=2) # scale 2, 3 and 4 models available inputs = ImageLoader.load_image(image) preds = model(inputs) ImageLoader.save_image(preds, './scaled_2x.png') # save the output 2x scaled image to `./scaled_2x.png` ImageLoader.save_compare(inputs, preds, './scaled_2x_compare.png') # save an output comparing the super-image with a bicubic scaling ``` [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/eugenesiow/super-image-notebooks/blob/master/notebooks/Upscale_Images_with_Pretrained_super_image_Models.ipynb "Open in Colab") ## Training data The models for 2x, 3x and 4x image super resolution were pretrained on [DIV2K](https://huggingface.co/datasets/eugenesiow/Div2k), a dataset of 800 high-quality (2K resolution) images for training, augmented to 4000 images and uses a dev set of 100 validation images (images numbered 801 to 900). ## Training procedure ### Preprocessing We follow the pre-processing and training method of [Wang et al.](https://arxiv.org/abs/2104.07566). Low Resolution (LR) images are created by using bicubic interpolation as the resizing method to reduce the size of the High Resolution (HR) images by x2, x3 and x4 times. During training, RGB patches with size of 64×64 from the LR input are used together with their corresponding HR patches. Data augmentation is applied to the training set in the pre-processing stage where five images are created from the four corners and center of the original image. We need the huggingface [datasets](https://huggingface.co/datasets?filter=task_ids:other-other-image-super-resolution) library to download the data: ```bash pip install datasets ``` The following code gets the data and preprocesses/augments the data. ```python from datasets import load_dataset from super_image.data import EvalDataset, TrainDataset, augment_five_crop augmented_dataset = load_dataset('eugenesiow/Div2k', 'bicubic_x4', split='train')\ .map(augment_five_crop, batched=True, desc="Augmenting Dataset") # download and augment the data with the five_crop method train_dataset = TrainDataset(augmented_dataset) # prepare the train dataset for loading PyTorch DataLoader eval_dataset = EvalDataset(load_dataset('eugenesiow/Div2k', 'bicubic_x4', split='validation')) # prepare the eval dataset for the PyTorch DataLoader ``` ### Pretraining The model was trained on GPU. The training code is provided below: ```python from super_image import Trainer, TrainingArguments, DrlnModel, DrlnConfig training_args = TrainingArguments( output_dir='./results', # output directory num_train_epochs=1000, # total number of training epochs ) config = DrlnConfig( scale=4, # train a model to upscale 4x ) model = DrlnModel(config) trainer = Trainer( model=model, # the instantiated model to be trained args=training_args, # training arguments, defined above train_dataset=train_dataset, # training dataset eval_dataset=eval_dataset # evaluation dataset ) trainer.train() ``` [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/eugenesiow/super-image-notebooks/blob/master/notebooks/Train_super_image_Models.ipynb "Open in Colab") ## Evaluation results The evaluation metrics include [PSNR](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Quality_estimation_with_PSNR) and [SSIM](https://en.wikipedia.org/wiki/Structural_similarity#Algorithm). Evaluation datasets include: - Set5 - [Bevilacqua et al. (2012)](https://huggingface.co/datasets/eugenesiow/Set5) - Set14 - [Zeyde et al. (2010)](https://huggingface.co/datasets/eugenesiow/Set14) - BSD100 - [Martin et al. (2001)](https://huggingface.co/datasets/eugenesiow/BSD100) - Urban100 - [Huang et al. (2015)](https://huggingface.co/datasets/eugenesiow/Urban100) The results columns below are represented below as `PSNR/SSIM`. They are compared against a Bicubic baseline. |Dataset |Scale |Bicubic |drln | |--- |--- |--- |--- | |Set5 |2x |33.64/0.9292 |**38.22/0.9614** | |Set5 |3x |30.39/0.8678 |**35.31/0.9423** | |Set5 |4x |28.42/0.8101 |**32.55/0.899** | |Set14 |2x |30.22/0.8683 |**34.01/0.9211** | |Set14 |3x |27.53/0.7737 |**31.21/0.8619** | |Set14 |4x |25.99/0.7023 |**28.96/0.7901** | |BSD100 |2x |29.55/0.8425 |**33.93/0.9269** | |BSD100 |3x |27.20/0.7382 |**29.77/0.8223** | |BSD100 |4x |25.96/0.6672 |**28.65/0.7692** | |Urban100 |2x |26.66/0.8408 |**32.82/0.934** | |Urban100 |3x | |**29.79/0.8825** | |Urban100 |4x |23.14/0.6573 |**26.56/0.7998** | ![Comparing Bicubic upscaling against the models x4 upscaling on Set5 Image 2](images/drln_2_4_compare.png "Comparing Bicubic upscaling against the models x4 upscaling on Set5 Image 2") You can find a notebook to easily run evaluation on pretrained models below: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/eugenesiow/super-image-notebooks/blob/master/notebooks/Evaluate_Pretrained_super_image_Models.ipynb "Open in Colab") ## BibTeX entry and citation info ```bibtex @misc{anwar2019densely, title={Densely Residual Laplacian Super-Resolution}, author={Saeed Anwar and Nick Barnes}, year={2019}, eprint={1906.12021}, archivePrefix={arXiv}, primaryClass={eess.IV} } ```
8,955
[ [ -0.044219970703125, -0.0357666015625, -0.0007228851318359375, 0.0013799667358398438, -0.0203857421875, 0.00000864267349243164, -0.0021457672119140625, -0.0305023193359375, 0.0147857666015625, 0.01314544677734375, -0.03314208984375, -0.0189666748046875, -0.039672...
VietnamAIHub/Vietnamese_LLama2_13B_8K_SFT_General_Domain_Knowledge
2023-09-09T12:25:05.000Z
[ "transformers", "pytorch", "llama", "text-generation", "custom_code", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
VietnamAIHub
null
null
VietnamAIHub/Vietnamese_LLama2_13B_8K_SFT_General_Domain_Knowledge
3
668
transformers
2023-08-29T03:20:47
# Vietnamese Llama2-13B 8k Context Length with LoRA Adapters This repository contains a Llama-13B model fine-tuned with QLoRA (Quantization Low-Rank Adapter) adapters. The adapter is a plug-and-play tool that enables the LLaMa model to perform well in many Vietnamese NLP tasks. Project Github page: [Github](https://github.com/VietnamAIHub/Vietnamese_LLMs) ## Model Overview The Vietnamese Llama2-13b model is a large language model capable of generating meaningful text and can be used in a wide variety of natural language processing tasks, including text generation, sentiment analysis, and more. By using LoRA adapters, the model achieves better performance on low-resource tasks and demonstrates improved generalization. ## Dataset and Fine-Tuning The LLaMa2 model was fine-tuned on over 200K Vietnamese instructions from various sources to improve its ability to understand and generate text for different tasks. The instruction dataset comprises data from the following sources: Dataset link: Comming soon ## Testing the Model by yourself. To load the fine-tuned Llama-13B model with LoRA adapters, follow the code snippet below: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_name = "VietnamAIHub/Vietnamese_LLama2_13B_8K_SFT_General_Domain_Knowledge" ## Loading Base LLaMa model weight and Merge with Adapter Weight wiht the base model m = AutoModelForCausalLM.from_pretrained( model_name, load_in_8bit=True, torch_dtype=torch.bfloat16, pretraining_tp=1, # use_auth_token=True, # trust_remote_code=True, cache_dir=cache_dir, ) tok = AutoTokenizer.from_pretrained( model_name, cache_dir=cache_dir, padding_side="right", use_fast=False, # Fast tokenizer giving issues. tokenizer_type='llama', #if 'llama' in args.model_name_or_path else None, # Needed for HF name change use_auth_token=True, ) tok.bos_token_id = 1 stop_token_ids = [0] class StopOnTokens(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: for stop_id in stop_token_ids: if input_ids[0][-1] == stop_id: return True return False generation_config = dict( temperature=0.2, top_k=20, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.2, max_new_tokens=400, early_stopping=True, ) prompts_input="Cách để học tập về một môn học thật tốt" system_prompt=f"<s>[INST] <<SYS>>\n You are a helpful assistant, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\ that your responses are socially unbiased and positive in nature.\ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \ correct. If you don't know the answer to a question, please response as language model you are not able to respone detailed to these kind of question.\n<</SYS>>\n\n {prompts_input} [/INST] " input_ids = tok(message, return_tensors="pt").input_ids input_ids = input_ids.to(m.device) stop = StopOnTokens() streamer = TextIteratorStreamer(tok, timeout=10.0, skip_prompt=True, skip_special_tokens=True) # #print(tok.decode(output[0])) generation_config = dict( temperature=0.1, top_k=30, top_p=0.95, do_sample=True, # num_beams=1, repetition_penalty=1.2, max_new_tokens=2048, ## 8K early_stopping=True, stopping_criteria=StoppingCriteriaList([stop]), ) inputs = tok(message,return_tensors="pt") #add_special_tokens=False ? generation_output = m.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=tok.eos_token_id, pad_token_id=tok.pad_token_id, **generation_config ) generation_output_ = m.generate(input_ids = inputs["input_ids"].to(device), **generation_config) s = generation_output[0] output = tok.decode(s,skip_special_tokens=True) #response = output.split("### Output:")[1].strip() print(output) ``` ## Conclusion The Vietnamese Llama2-13b with LoRA adapters is a versatile language model that can be utilized for a wide range of NLP tasks in Vietnamese. We hope that researchers and developers find this model useful and are encouraged to experiment with it in their projects. For any questions, feedback, or contributions, please feel free to contact the maintainers of this repository TranNhiem 🙌: [Linkedin](https://www.linkedin.com/in/tran-nhiem-ab1851125/) [Twitter](https://twitter.com/TranRick2) [Facebook](https://www.facebook.com/jean.tran.336), Project [Discord](https://discord.gg/MC3yDZNz). Happy fine-tuning and experimenting with the Llama2-13B model!
5,009
[ [ -0.01617431640625, -0.07061767578125, 0.0294036865234375, 0.031402587890625, -0.0262298583984375, -0.012908935546875, -0.0239410400390625, -0.0293731689453125, 0.00962066650390625, 0.027801513671875, -0.032958984375, -0.047882080078125, -0.04998779296875, 0....
Salesforce/codegen-6B-mono
2022-10-03T16:18:48.000Z
[ "transformers", "pytorch", "codegen", "text-generation", "arxiv:2203.13474", "license:bsd-3-clause", "endpoints_compatible", "has_space", "region:us" ]
text-generation
Salesforce
null
null
Salesforce/codegen-6B-mono
35
667
transformers
2022-04-13T00:51:11
--- license: bsd-3-clause --- # CodeGen (CodeGen-Mono 6B) ## Model description CodeGen is a family of autoregressive language models for **program synthesis** from the paper: [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. The models are originally released in [this repository](https://github.com/salesforce/CodeGen), under 3 pre-training data variants (`NL`, `Multi`, `Mono`) and 4 model size variants (`350M`, `2B`, `6B`, `16B`). The checkpoint included in this repository is denoted as **CodeGen-Mono 6B** in the paper, where "Mono" means the model is initialized with *CodeGen-Multi 6B* and further pre-trained on a Python programming language dataset, and "6B" refers to the number of trainable parameters. ## Training data This checkpoint (CodeGen-Mono 6B) was firstly initialized with *CodeGen-Multi 6B*, and then pre-trained on BigPython dataset. The data consists of 71.7B tokens of Python programming language. See Section 2.1 of the [paper](https://arxiv.org/abs/2203.13474) for more details. ## Training procedure CodeGen was trained using cross-entropy loss to maximize the likelihood of sequential inputs. The family of models are trained using multiple TPU-v4-512 by Google, leveraging data and model parallelism. See Section 2.3 of the [paper](https://arxiv.org/abs/2203.13474) for more details. ## Evaluation results We evaluate our models on two code generation benchmark: HumanEval and MTPB. Please refer to the [paper](https://arxiv.org/abs/2203.13474) for more details. ## Intended Use and Limitations As an autoregressive language model, CodeGen is capable of extracting features from given natural language and programming language texts, and calculating the likelihood of them. However, the model is intended for and best at **program synthesis**, that is, generating executable code given English prompts, where the prompts should be in the form of a comment string. The model can complete partially-generated code as well. ## How to use This model can be easily loaded using the `AutoModelForCausalLM` functionality: ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-6B-mono") model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-6B-mono") text = "def hello_world():" input_ids = tokenizer(text, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=128) print(tokenizer.decode(generated_ids[0], skip_special_tokens=True)) ``` ## BibTeX entry and citation info ```bibtex @article{Nijkamp2022ACP, title={A Conversational Paradigm for Program Synthesis}, author={Nijkamp, Erik and Pang, Bo and Hayashi, Hiroaki and Tu, Lifu and Wang, Huan and Zhou, Yingbo and Savarese, Silvio and Xiong, Caiming}, journal={arXiv preprint}, year={2022} } ```
2,963
[ [ -0.03759765625, -0.047393798828125, -0.0015192031860351562, 0.021484375, -0.0021305084228515625, 0.0207977294921875, -0.0257110595703125, -0.03192138671875, -0.0023174285888671875, 0.016357421875, -0.04107666015625, -0.040618896484375, -0.0300750732421875, 0...
Salesforce/codegen-16B-multi
2022-10-03T16:18:49.000Z
[ "transformers", "pytorch", "codegen", "text-generation", "arxiv:2203.13474", "license:bsd-3-clause", "endpoints_compatible", "has_space", "region:us" ]
text-generation
Salesforce
null
null
Salesforce/codegen-16B-multi
114
667
transformers
2022-04-13T00:52:09
--- license: bsd-3-clause --- # CodeGen (CodeGen-Multi 16B) ## Model description CodeGen is a family of autoregressive language models for **program synthesis** from the paper: [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. The models are originally released in [this repository](https://github.com/salesforce/CodeGen), under 3 pre-training data variants (`NL`, `Multi`, `Mono`) and 4 model size variants (`350M`, `2B`, `6B`, `16B`). The checkpoint included in this repository is denoted as **CodeGen-Multi 16B** in the paper, where "Multi" means the model is initialized with *CodeGen-NL 16B* and further pre-trained on a dataset of multiple programming languages, and "16B" refers to the number of trainable parameters. ## Training data This checkpoint (CodeGen-Multi 16B) was firstly initialized with *CodeGen-NL 16B*, and then pre-trained on [BigQuery](https://console.cloud.google.com/marketplace/details/github/github-repos), a large-scale dataset of multiple programming languages from GitHub repositories. The data consists of 119.2B tokens and includes C, C++, Go, Java, JavaScript, and Python. ## Training procedure CodeGen was trained using cross-entropy loss to maximize the likelihood of sequential inputs. The family of models are trained using multiple TPU-v4-512 by Google, leveraging data and model parallelism. See Section 2.3 of the [paper](https://arxiv.org/abs/2203.13474) for more details. ## Evaluation results We evaluate our models on two code generation benchmark: HumanEval and MTPB. Please refer to the [paper](https://arxiv.org/abs/2203.13474) for more details. ## Intended Use and Limitations As an autoregressive language model, CodeGen is capable of extracting features from given natural language and programming language texts, and calculating the likelihood of them. However, the model is intended for and best at **program synthesis**, that is, generating executable code given English prompts, where the prompts should be in the form of a comment string. The model can complete partially-generated code as well. ## How to use This model can be easily loaded using the `AutoModelForCausalLM` functionality: ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-16B-multi") model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-16B-multi") text = "def hello_world():" input_ids = tokenizer(text, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=128) print(tokenizer.decode(generated_ids[0], skip_special_tokens=True)) ``` ## BibTeX entry and citation info ```bibtex @article{Nijkamp2022ACP, title={A Conversational Paradigm for Program Synthesis}, author={Nijkamp, Erik and Pang, Bo and Hayashi, Hiroaki and Tu, Lifu and Wang, Huan and Zhou, Yingbo and Savarese, Silvio and Xiong, Caiming}, journal={arXiv preprint}, year={2022} } ```
3,067
[ [ -0.038543701171875, -0.04693603515625, 0.0033245086669921875, 0.0271759033203125, 0.0055694580078125, 0.027984619140625, -0.0295257568359375, -0.029205322265625, -0.012298583984375, 0.0209808349609375, -0.04425048828125, -0.044952392578125, -0.0288543701171875, ...
stanfordnlp/SteamSHP-flan-t5-large
2023-10-10T23:56:37.000Z
[ "transformers", "pytorch", "t5", "text2text-generation", "human feedback", "rlhf", "preferences", "reddit", "preference model", "RL", "NLG", "evaluation", "en", "dataset:stanfordnlp/SHP", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-gen...
text2text-generation
stanfordnlp
null
null
stanfordnlp/SteamSHP-flan-t5-large
29
667
transformers
2023-02-21T00:42:04
--- license: apache-2.0 datasets: - stanfordnlp/SHP language: - en metrics: - accuracy tags: - human feedback - rlhf - preferences - reddit - preference model - RL - NLG - evaluation --- # 💨🚢 SteamSHP-Large <!-- Provide a quick summary of what the model is/does. --> **If you mention this model, please cite the paper:** [Understanding Dataset Difficulty with V-Usable Information (ICML 2022)](https://proceedings.mlr.press/v162/ethayarajh22a.html). SteamSHP-Large is a preference model trained to predict -- given some context and two possible responses -- which response humans will find more helpful. It can be used for NLG evaluation or as a reward model for RLHF. It is a FLAN-T5-large model (780M parameters) finetuned on: 1. The [Stanford Human Preferences Dataset (SHP)](https://huggingface.co/datasets/stanfordnlp/SHP), which contains collective human preferences sourced from 18 different communities on Reddit (e.g., `askculinary`, `legaladvice`, etc.). 2. The helpfulness data in [Anthropic's HH-RLHF](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset. There is a larger variant called [SteamSHP-XL](https://huggingface.co/stanfordnlp/SteamSHP-flan-t5-xl) that was made by finetuning FLAN-T5-xl (3B parameters). ## Usage ### Normal Usage The input text should be of the format: ``` POST: { the context, such as the 'history' column in SHP (not containing any newlines \n) } RESPONSE A: { first possible continuation (not containing any newlines \n) } RESPONSE B: { second possible continuation (not containing any newlines \n) } Which response is better? RESPONSE ``` The output generated by SteamSHP-Large will either be `A` or `B`. Here's how to use the model: ```python >> from transformers import T5ForConditionalGeneration, T5Tokenizer >> device = 'cuda' # if you have a GPU >> tokenizer = T5Tokenizer.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large') >> model = T5ForConditionalGeneration.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large').to(device) >> input_text = "POST: Instacart gave me 50 pounds of limes instead of 5 pounds... what the hell do I do with 50 pounds of limes? I've already donated a bunch and gave a bunch away. I'm planning on making a bunch of lime-themed cocktails, but... jeez. Ceviche? \n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE" >> x = tokenizer([input_text], return_tensors='pt').input_ids.to(device) >> y = model.generate(x, max_new_tokens=1) >> tokenizer.batch_decode(y, skip_special_tokens=True) ['B'] ``` If the input exceeds the 512 token limit, you can use [pybsd](https://github.com/nipunsadvilkar/pySBD) to break the input up into sentences and only include what fits into 512 tokens. When trying to cram an example into 512 tokens, we recommend truncating the context as much as possible and leaving the responses as untouched as possible. ### Reward Model Usage If you want to use SteamSHP-Large as a reward model -- to get a score for a single response -- then you need to structure the input such that RESPONSE A is what you want to score and RESPONSE B is just an empty input: ``` POST: { the context, such as the 'history' column in SHP (not containing any newlines \n) } RESPONSE A: { continuation (not containing any newlines \n) } RESPONSE B: . Which response is better? RESPONSE ``` Then calculate the probability assigned to the label A. This probability (or the logit, depending on what you want) is the score for the response: ```python >> input_text = "POST: Instacart gave me 50 pounds of limes instead of 5 pounds... what the hell do I do with 50 pounds of limes? I've already donated a bunch and gave a bunch away. I'm planning on making a bunch of lime-themed cocktails, but... jeez. Ceviche? \n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: .\n\n Which response is better? RESPONSE" >> x = tokenizer([input_text], return_tensors='pt').input_ids.to(device) >> outputs = model.generate(x, return_dict_in_generate=True, output_scores=True, max_new_tokens=1) >> torch.exp(outputs.scores[0][:, 71]) / torch.exp(outputs.scores[0][:,:]).sum(axis=1).item() # index 71 corresponds to the token for 'A' 0.8617 ``` The probability will almost always be high (in the range of 0.8 to 1.0), since RESPONSE B is just a null input. Therefore you may want to normalize the probability. You can also compare the two probabilities assigned independently to each response (given the same context) to infer the preference label. For example, if one response has probability 0.95 and the other has 0.80, the former will be preferred. Inferring the preference label in this way only leads to a 0.005 drop in accuracy on the SHP + HH-RLHF test data on average across all domains, meaning that there's only a very small penalty for using SteamSHP as a reward model instead of as a preference model. ## Training and Evaluation SteamSHP-Large was only finetuned on 125K of the 392K training examples that were available, since we found that: 1. When the total input length exceeded the limit (512 tokens), the loss would not converge. When possible, we crammed an example to fit under 500 tokens by truncating the context as much as possible, though some examples would still not fit despite this. We used 500 as the limit instead of 512 to allow for slight modifications to the structure of the input without any examples exceeding the actual 512 limit. 3. Training on fewer preferences with a stronger signal led to better performance than training on all the preferences. From the SHP dataset, we only used preferences where the more preferred comment was twice as preferred as the other (i.e., `score_ratio` >= 2) and used no more than 5 preferences from each context (i.e., 5 examples per unique `post_id`) to prevent ovefitting. We did no such subsampling for the HH-RLHF training data. We evaluated the model on the SHP and HH-RLHF test data using accuracy, but only on the data that could be truncated to fit within 500 tokens (a total of 18621 out of 20753 available test examples). SteamSHP-Large gets an average 72.0% accuracy across all domains: | Domain | Accuracy | | ------ | -------- | | askculinary | 0.7199 | | askhr | 0.7507 | | askdocs | 0.6920 | | askanthropology | 0.7925 | | asksciencefiction | 0.7266 | | askacademia | 0.7442 | | askengineers | 0.7146 | | legaladvice | 0.7958 | | explainlikeimfive | 0.7312 | | askbaking | 0.6656 | | askphysics | 0.7888 | | askscience | 0.6926 | | askphilosophy | 0.6837 | | askvet | 0.7696 | | changemyview | 0.6984 | | askcarguys | 0.7297 | | askhistorians | 0.7476 | | asksocialscience | 0.8231 | | anthropic (helpfulness) | 0.7310 | | ALL (unweighted) | 0.7203 | As mentioned previously, if you use SteamSHP as a reward model and try to infer the preference label based on the probability assigned to each response independently, that could also work! But doing so will lead to a 0.005 drop in accuracy on the test data (on average across all domains), meaning that there is a small penalty. ## Biases and Limitations SteamSHP is trained to predict which of two responses humans will find *more helpful*, not which response is *less harmful*. It should not be used to detect toxicity, make ethical judgments, or for a similar purpose. Biases and misinformation in the datasets used to train SteamSHP may also be propagated downstream to the model predictions. Although SHP filtered out posts with NSFW (over 18) content, chose subreddits that were well-moderated and had policies against harassment and bigotry, some of the data may contain discriminatory or harmful language. The responses that humans collectively found more helpful are also not guaranteed to be more factual. The people whose preferences are captured in SHP and HH-RLHF are not representative of the broader population. Although specific demographic information is not available, overall, the Reddit users whose preferences are captured in SHP are disproportionately male and from developed, Western, and English-speaking countries (Pew Research). [Past work](https://www.anthropic.com/model-written-evals.pdf) by Anthropic has found that models optimized for human preference can be obsequious, at the expense of the truth. ## Contact Please contact kawin@stanford.edu if you have any questions about the model. This model was created by Kawin Ethayarajh, Heidi (Chenyu) Zhang, Yizhong Wang, and Dan Jurafsky. ## Citation SHP was created using the techniques proposed in the following paper. Please cite this work if you use SHP or the SteamSHP models: ``` @InProceedings{pmlr-v162-ethayarajh22a, title = {Understanding Dataset Difficulty with $\mathcal{V}$-Usable Information}, author = {Ethayarajh, Kawin and Choi, Yejin and Swayamdipta, Swabha}, booktitle = {Proceedings of the 39th International Conference on Machine Learning}, pages = {5988--6008}, year = {2022}, editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan}, volume = {162}, series = {Proceedings of Machine Learning Research}, month = {17--23 Jul}, publisher = {PMLR}, } ```
9,257
[ [ -0.0189971923828125, -0.056610107421875, 0.0213165283203125, 0.0064544677734375, -0.019622802734375, -0.0223388671875, -0.0236663818359375, -0.03680419921875, 0.031280517578125, 0.0283660888671875, -0.03961181640625, -0.03546142578125, -0.04669189453125, -0....
stablediffusionapi/waifu-journey-2
2023-05-19T05:02:25.000Z
[ "diffusers", "stablediffusionapi.com", "stable-diffusion-api", "text-to-image", "ultra-realistic", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
stablediffusionapi
null
null
stablediffusionapi/waifu-journey-2
1
667
diffusers
2023-05-08T18:41:47
--- license: creativeml-openrail-m tags: - stablediffusionapi.com - stable-diffusion-api - text-to-image - ultra-realistic pinned: true --- # Waifu Journey 2 API Inference ![generated from stablediffusionapi.com](https://pub-8b49af329fae499aa563997f5d4068a4.r2.dev/generations/9509517861683571238.png) ## Get API Key Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed. Replace Key in below code, change **model_id** to "waifu-journey-2" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs) Model link: [View model](https://stablediffusionapi.com/models/waifu-journey-2) Credits: [View credits](https://civitai.com/?query=Waifu%20Journey%202) View all models: [View Models](https://stablediffusionapi.com/models) import requests import json url = "https://stablediffusionapi.com/api/v3/dreambooth" payload = json.dumps({ "key": "", "model_id": "waifu-journey-2", "prompt": "actual 8K portrait photo of gareth person, portrait, happy colors, bright eyes, clear eyes, warm smile, smooth soft skin, big dreamy eyes, beautiful intricate colored hair, symmetrical, anime wide eyes, soft lighting, detailed face, by makoto shinkai, stanley artgerm lau, wlop, rossdraws, concept art, digital painting, looking into camera", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
2,432
[ [ -0.0201263427734375, -0.059906005859375, 0.042877197265625, 0.03350830078125, -0.02099609375, 0.0009851455688476562, 0.03167724609375, -0.032440185546875, 0.029937744140625, 0.052825927734375, -0.076416015625, -0.04913330078125, -0.03070068359375, -0.0037288...
gooohjy/suicidal-electra
2022-03-30T12:18:23.000Z
[ "transformers", "pytorch", "electra", "text-classification", "endpoints_compatible", "region:us" ]
text-classification
gooohjy
null
null
gooohjy/suicidal-electra
3
666
transformers
2022-03-02T23:29:05
# Suicidal-ELECTRA This text classification model predicts whether a sequence of words are suicidal (1) or non-suicidal (0). ## Data The model was trained on the [Suicide and Depression Dataset](https://www.kaggle.com/nikhileswarkomati/suicide-watch) obtained from Kaggle. The dataset was scraped from Reddit and consists of 232,074 rows equally distributed between 2 classes - suicide and non-suicide. ## Parameters The model fine-tuning was conducted on 1 epoch, with batch size of 6, and learning rate of 0.00001. Due to limited computing resources and time, we were unable to scale up the number of epochs and batch size. ## Performance The model has achieved the following results after fine-tuning on the aforementioned dataset: - Accuracy: 0.9792 - Recall: 0.9788 - Precision: 0.9677 - F1 Score: 0.9732 ## How to Use Load the model via the transformers library: ``` from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("gooohjy/suicidal-electra") model = AutoModel.from_pretrained("gooohjy/suicidal-electra") ``` ## Resources For more resources, including the source code, please refer to the GitHub repository [gohjiayi/suicidal-text-detection](https://github.com/gohjiayi/suicidal-text-detection/).
1,251
[ [ -0.0156402587890625, -0.051239013671875, 0.0287017822265625, 0.0175628662109375, -0.0118255615234375, -0.0090789794921875, -0.0033702850341796875, -0.00939178466796875, -0.0084991455078125, 0.0238189697265625, -0.043792724609375, -0.0489501953125, -0.05853271484...
marefa-nlp/summarization-arabic-english-news
2021-07-13T13:06:31.000Z
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text2text-generation
marefa-nlp
null
null
marefa-nlp/summarization-arabic-english-news
2
666
transformers
2022-03-02T23:29:05
------------ ## Arabic and English News Summarization NLP Model ### About This model is for summarizing news stories in short highlights for both Arabic and English tasks. نموذج معرفي متخصص في تلخيص الأخبار العربية و الإنجليزية الى مجموعة من أهم النقاط ### Fine-Tuning The model was finetuned using the [Arabic T5 Model](https://huggingface.co/bakrianoo/t5-arabic-large) which developed by [Abu Bakr Soliman](http://github.com/bakrianoo). The primary summarization model also developed by the same developer. ### How to Use - You can use this [Colab Notebook](https://colab.research.google.com/drive/1DWND1CAfCXD4OxrfmLBEaKeXhjGmYkod?usp=sharing) to test the model 1. Install [PyTorch](https://pytorch.org/) 2. Install the following Python packages `$ pip3 install transformers==4.7.0 nltk==3.5 protobuf==3.15.3 sentencepiece==0.1.96` 3. Run this code ```python from transformers import AutoTokenizer, AutoModelWithLMHead import torch import nltk nltk.download('punkt') from nltk.tokenize import sent_tokenize device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") m_name = "marefa-nlp/summarization-arabic-english-news" tokenizer = AutoTokenizer.from_pretrained(m_name) model = AutoModelWithLMHead.from_pretrained(m_name).to(device) def get_summary(text, tokenizer, model, device="cpu", num_beams=2): if len(text.strip()) < 50: return ["Please provide more longer text"] text = "summarize: <paragraph> " + " <paragraph> ".join([ s.strip() for s in sent_tokenize(text) if s.strip() != ""]) + " </s>" text = text.strip().replace("\n","") tokenized_text = tokenizer.encode(text, return_tensors="pt").to(device) summary_ids = model.generate( tokenized_text, max_length=512, num_beams=num_beams, repetition_penalty=1.5, length_penalty=1.0, early_stopping=True ) output = tokenizer.decode(summary_ids[0], skip_special_tokens=True) return [ s.strip() for s in output.split("<hl>") if s.strip() != "" ] ## Prepare Samples samples = [ """ قال المدافع الإيطالي ليوناردو بونوتشي إن منتخب بلاده ليس خائفا من مواجهة نظيره الانجليزي على أرضه في المباراة النهائية في بطولة يورو 2020 لكرة القدم، في حين وصف المدافع الانجليزي جون ستونز المباراة المرتقبة بأنها ستكون "أكثر تميزا". وسوف تقام المباراة في استاد ويمبلي، شمال غربي لندن، يوم الأحد. وتسعى إيطاليا لإحراز اللقب الأوروبي للمرة الثانية بعد فوزها به أول مرة عام 1968. ولم يفز الفريق الانجليزي بهذا اللقب القاري من قبل. والبطولة الرئيسية الوحيدة التي فازت بها انجلترا هي كأس العالم عام 1966 الذي أقيمت مباراته النهائية في استاد ويمبلي. """, """ On a night fraught with tension, Italy clinched its first major title for 15 years with a penalty shootout win over England in the Euro 2020 final. Luke Shaw's goal inside the opening two minutes gave England a lead it looked like it would hold onto all night, before a goalmouth scramble midway through the second half allowed Leonardo Bonucci to poke home an equalizer for Italy. For the remainder of the match, it felt as though extra-time and penalties were inevitable, as neither side seemed willing or brave enough to commit enough men forward to really trouble the opposing defenders. England had suffered innumerable heartbreaks on penalties over the years and this time it was Italy's turn to inflict yet more pain on beleaguered English fans as Marcus Rashford, Jadon Sancho and Bukayo Saka all missed from the spot. """, ] ## Get summaries print("Original Article:", samples[0]) print("\n===========\nSummary: \n") hls = get_summary(samples[0], tokenizer, model, device) for hl in hls: print("\t-", hl) print("Original Article:", samples[1]) print("\n=========== \nSummary: \n") hls = get_summary(samples[1], tokenizer, model, device) for hl in hls: print("\t-", hl) ``` Results ``` Original Article: قال المدافع الإيطالي ليوناردو بونوتشي إن منتخب بلاده ليس خائفا من مواجهة نظيره الانجليزي على أرضه في المباراة النهائية في بطولة يورو 2020 لكرة القدم، في حين وصف المدافع الانجليزي جون ستونز المباراة المرتقبة بأنها ستكون "أكثر تميزا". وسوف تقام المباراة في استاد ويمبلي، شمال غربي لندن، يوم الأحد. وتسعى إيطاليا لإحراز اللقب الأوروبي للمرة الثانية بعد فوزها به أول مرة عام 1968. ولم يفز الفريق الانجليزي بهذا اللقب القاري من قبل. والبطولة الرئيسية الوحيدة التي فازت بها انجلترا هي كأس العالم عام 1966 الذي أقيمت مباراته النهائية في استاد ويمبلي. =========== Summary: - وسوف تواجه إيطاليا إنجلترا في بطولة يورو 2020 لكرة القدم يوم الأحد. - ستقام المباراة في استاد ويمبلي، شمال غربي لندن، يوم الأحد. - ولم يفز الفريق الانجليزي بهذا اللقب القاري قبل. ``` ``` Original Article: On a night fraught with tension, Italy clinched its first major title for 15 years with a penalty shootout win over England in the Euro 2020 final. Luke Shaw's goal inside the opening two minutes gave England a lead it looked like it would hold onto all night, before a goalmouth scramble midway through the second half allowed Leonardo Bonucci to poke home an equalizer for Italy. For the remainder of the match it felt as though extra-time and penalties were inevitable, as neither side seemed willing or brave enough to commit enough men forward to really trouble the opposing defenders. England had suffered innumerable heartbreaks on penalties over the years and this time it was Italy's turn to inflict yet more pain on beleaguered English fans as Marcus Rashford, Jadon Sancho and Bukayo Saka all missed from the spot. =========== Summary: - Luke Shaw's goal gave England a lead it looked like it would hold onto all night. - Leonardo Bonucci scored the equalizer for Italy. - Marcus Rashford, Jadon Sancho and Bukayo Saka all missed. ```
5,889
[ [ -0.04400634765625, -0.0369873046875, 0.03948974609375, 0.0254058837890625, -0.0202789306640625, -0.01397705078125, 0.00487518310546875, -0.02587890625, 0.034881591796875, -0.0018892288208007812, -0.0364990234375, -0.04656982421875, -0.0433349609375, 0.033996...
thennal/whisper-medium-ml
2023-09-17T10:28:17.000Z
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "ml", "dataset:mozilla-foundation/common_voice_11_0", "dataset:google/fleurs", "dataset:thennal/IMaSC", "dataset:thennal/ulca_ml", "dataset:thennal/msc", "dataset...
automatic-speech-recognition
thennal
null
null
thennal/whisper-medium-ml
3
666
transformers
2022-12-12T19:56:15
--- language: - ml license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 - google/fleurs - thennal/IMaSC - thennal/ulca_ml - thennal/msc - thennal/indic_tts_ml metrics: - wer base_model: openai/whisper-medium model-index: - name: Whisper Medium Malayalam - Thennal D K results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: ml split: test args: ml metrics: - type: wer value: 11.49 name: WER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium Malayalam This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - WER: 38.6207 - CER: 7.3256 Note that Whisper's normalization has major issues for languages like Malayalam, so the above scores are evaluated without using normalization. With normalization (for a fair comparison with other models on this platform), the results are instead: - WER: 11.49 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 8000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
2,003
[ [ -0.019775390625, -0.042633056640625, -0.0023193359375, 0.0435791015625, -0.041015625, -0.034393310546875, -0.031890869140625, -0.03131103515625, 0.0283050537109375, 0.0228271484375, -0.03839111328125, -0.043060302734375, -0.05645751953125, -0.008125305175781...
CiroN2022/hair-style
2023-08-25T10:07:12.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "license:other", "has_space", "region:us" ]
text-to-image
CiroN2022
null
null
CiroN2022/hair-style
0
666
diffusers
2023-08-25T10:07:08
--- license: other tags: - text-to-image - stable-diffusion - lora - diffusers base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: widget: - text: --- # Hair Style ![Image 0](2193827.jpeg) None ## Image examples for the model: ![Image 1](2193816.jpeg) ![Image 2](2193814.jpeg) ![Image 3](2193815.jpeg) ![Image 4](2193812.jpeg) ![Image 5](2193813.jpeg) ![Image 6](2193818.jpeg) ![Image 7](2193820.jpeg) ![Image 8](2193823.jpeg) ![Image 9](2193821.jpeg)
488
[ [ -0.039215087890625, -0.036956787109375, 0.0308990478515625, 0.0460205078125, -0.015655517578125, -0.0118255615234375, 0.0258636474609375, 0.004856109619140625, 0.039764404296875, 0.038299560546875, -0.066162109375, -0.044921875, -0.028076171875, 0.0077629089...
KappaNeuro/dressed-animals
2023-09-14T09:33:54.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "dress", "animal", "license:other", "region:us", "has_space" ]
text-to-image
KappaNeuro
null
null
KappaNeuro/dressed-animals
1
666
diffusers
2023-09-14T09:33:49
--- license: other tags: - text-to-image - stable-diffusion - lora - diffusers - dress - animal base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: Dressed animals page widget: - text: "Dressed animals - iguana having a great fashion sense" - text: "Dressed animals - A mix of animal and human that walks through the streets of New York and looks cool" - text: "Dressed animals - [Anthropomorphic Hairy Red panda Bear with retro glasses and dot pijamas" - text: "Dressed animals - a crocodile in a cool brand new clothing, minimalistic 3d cartoon style" - text: "Dressed animals - full body portrait of A Hippo animal Mafia in a mafia circus color suit with a top hat in the style of a Marionette. Happiness. exaggerated. Advertising. Photography studio background. Studio lighting. 8k. Cinematographic." - text: "Dressed animals - elegant anteater, High - fashion, poster - like, Astronaut modeling a sophisticated gown" - text: "Dressed animals - Anthropomorphic capybara as as a 1980s fashion icon on the new york fashion show" - text: "Dressed animals - A man wearing an elaborate stylish monkey costume, human face monkey costume," - text: "Dressed animals - im not actually a person im just a leopard gekko pretending to be a person" - text: "Dressed animals - Street style photo of animal like half horse half tiger on Kodak Gold 200" --- # Dressed animals ([CivitAI](https://civitai.com/models/153739) ![Image 0](2322178.jpeg) > Dressed animals - iguana having a great fashion sense <p>Dressed animals refer to animals that have been adorned or outfitted with clothing or accessories for various purposes, including entertainment, cultural traditions, or artistic expression. This practice has a long history and can be found in different cultures around the world.</p><p>In some cases, dressing animals is done for practical reasons, such as protecting them from harsh weather conditions or providing them with specialized gear for specific tasks, such as working animals or therapy animals. For example, horses may be dressed in saddles and bridles for riding, or guide dogs may wear special harnesses.</p><p>However, there is also a long tradition of dressing animals for decorative or symbolic purposes. This can be seen in various cultural practices, such as costumes worn by performing animals in circuses or costumes used in traditional ceremonies and festivals involving animals.</p><p>Dressing animals can also be seen in artistic and creative contexts. Artists may create whimsical or fantastical scenes by dressing animals in elaborate costumes or outfits, blurring the boundaries between reality and imagination. This can be seen in various forms of visual art, such as paintings, illustrations, or sculptures.</p><p>While dressing animals can sometimes raise ethical considerations, particularly when it involves discomfort or harm to the animals, in many cases it is done with care and respect for the well-being of the animals involved. In artistic and creative contexts, dressing animals can serve as a form of expression, adding a touch of whimsy, humor, or beauty to the depiction of animals in art.</p><p>Overall, dressed animals reflect the diverse ways in which humans interact with and represent the animal kingdom, blending practicality, cultural significance, and artistic expression.</p> ## Image examples for the model: ![Image 1](2322097.jpeg) > Dressed animals - A mix of animal and human that walks through the streets of New York and looks cool ![Image 2](2322132.jpeg) > Dressed animals - [Anthropomorphic Hairy Red panda Bear with retro glasses and dot pijamas ![Image 3](2322142.jpeg) > Dressed animals - a crocodile in a cool brand new clothing, minimalistic 3d cartoon style ![Image 4](2322079.jpeg) > Dressed animals - full body portrait of A Hippo animal Mafia in a mafia circus color suit with a top hat in the style of a Marionette. Happiness. exaggerated. Advertising. Photography studio background. Studio lighting. 8k. Cinematographic. ![Image 5](2322082.jpeg) > Dressed animals - elegant anteater, High - fashion, poster - like, Astronaut modeling a sophisticated gown ![Image 6](2322085.jpeg) > Dressed animals - Anthropomorphic capybara as as a 1980s fashion icon on the new york fashion show ![Image 7](2322088.jpeg) > Dressed animals - A man wearing an elaborate stylish monkey costume, human face monkey costume, ![Image 8](2322122.jpeg) > Dressed animals - im not actually a person im just a leopard gekko pretending to be a person ![Image 9](2322137.jpeg) > Dressed animals - Street style photo of animal like half horse half tiger on Kodak Gold 200
4,642
[ [ -0.059326171875, -0.0309295654296875, -0.0225677490234375, 0.059051513671875, -0.046356201171875, 0.01059722900390625, -0.0021190643310546875, -0.0823974609375, 0.06500244140625, 0.032623291015625, -0.03717041015625, -0.0213470458984375, -0.01297760009765625, ...
LeoLM/leo-mistral-hessianai-7b
2023-10-09T16:21:03.000Z
[ "transformers", "pytorch", "mistral", "text-generation", "en", "de", "dataset:oscar-corpus/OSCAR-2301", "dataset:wikipedia", "dataset:bjoernp/tagesschau-2018-2023", "license:apache-2.0", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
LeoLM
null
null
LeoLM/leo-mistral-hessianai-7b
3
666
transformers
2023-10-04T07:30:48
--- datasets: - oscar-corpus/OSCAR-2301 - wikipedia - bjoernp/tagesschau-2018-2023 language: - en - de library_name: transformers pipeline_tag: text-generation license: apache-2.0 --- # LAION LeoLM: **L**inguistically **E**nhanced **O**pen **L**anguage **M**odel Meet LeoLM-Mistral, the first open and commercially available German Foundation Language Model built on Mistral 7b. Our models extend Llama-2's capabilities into German through continued pretraining on a large corpus of German-language and mostly locality specific text. Thanks to a compute grant at HessianAI's new supercomputer **42**, we release three foundation models trained with 8k context length. [`LeoLM/leo-mistral-hessianai-7b`](https://huggingface.co/LeoLM/leo-mistral-hessianai-7b) under Apache 2.0 and [`LeoLM/leo-hessianai-7b`](https://huggingface.co/LeoLM/leo-hessianai-7b) and [`LeoLM/leo-hessianai-13b`](https://huggingface.co/LeoLM/leo-hessianai-13b) under the [Llama-2 community license](https://huggingface.co/meta-llama/Llama-2-70b/raw/main/LICENSE.txt) (70b also coming soon! 👀). With this release, we hope to bring a new wave of opportunities to German open-source and commercial LLM research and accelerate adoption. Read our [blog post](https://laion.ai/blog/leo-lm/) or our paper (preprint coming soon) for more details! *A project by Björn Plüster and Christoph Schuhmann in collaboration with LAION and HessianAI.* ## Model Details - **Finetuned from:** [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) - **Model type:** Causal decoder-only transformer language model - **Language:** English and German - **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0.html) - **Contact:** [LAION Discord](https://discord.com/invite/eq3cAMZtCC) or [Björn Plüster](mailto:bjoern.pl@outlook.de) ## Use in 🤗Transformers First install direct dependencies: ``` pip install transformers torch accelerate ``` If you want faster inference using flash-attention2, you need to install these dependencies: ```bash pip install packaging ninja pip install flash-attn ``` Then load the model in transformers: ```python from transformers import AutoModelForCausalLM, AutoTokenizer import torch model = AutoModelForCausalLM.from_pretrained( model="LeoLM/leo-mistral-hessianai-7b", device_map="auto", torch_dtype=torch.bfloat16, use_flash_attn_2=True # optional ) ``` ## Training parameters Note that for Mistral training, we changed learning rate to `1e-5` going down to `1e-6`. We also used Zero stage 3 and bfloat16 dtype. ![training_parameters](imgs/training_params.png "Training Hyperparameters") ## Benchmarks ![benchmarks](imgs/benchmarks.png "Benchmark Scores")
2,705
[ [ -0.0194854736328125, -0.046844482421875, 0.0074920654296875, 0.0374755859375, -0.0243072509765625, -0.0274200439453125, -0.005054473876953125, -0.04119873046875, -0.0011539459228515625, 0.02813720703125, -0.03472900390625, -0.030120849609375, -0.05322265625, ...
yikuan8/Clinical-BigBird
2022-04-10T17:40:08.000Z
[ "transformers", "pytorch", "big_bird", "fill-mask", "BigBird", "clinical", "en", "arxiv:2201.11838", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
yikuan8
null
null
yikuan8/Clinical-BigBird
9
665
transformers
2022-04-01T15:44:00
--- language: "en" tags: - BigBird - clinical --- <span style="font-size:larger;">**Clinical-BigBird**</span> is a clinical knowledge enriched version of BigBird that was further pre-trained using MIMIC-III clinical notes. It allows up to 4,096 tokens as the model input. Clinical-BigBird consistently out-performs ClinicalBERT across 10 baseline dataset. Those downstream experiments broadly cover named entity recognition (NER), question answering (QA), natural language inference (NLI) and text classification tasks. For more details, please refer to [our paper](https://arxiv.org/pdf/2201.11838.pdf). We also provide a sister model at [Clinical-Longformer](https://huggingface.co/yikuan8/Clinical-Longformer) ### Pre-training We initialized Clinical-BigBird from the pre-trained weights of the base version of BigBird. The pre-training process was distributed in parallel to 6 32GB Tesla V100 GPUs. FP16 precision was enabled to accelerate training. We pre-trained Clinical-BigBird for 300,000 steps with batch size of 6×2. The learning rates were 3e-5. The entire pre-training process took more than 2 weeks. ### Usage Load the model directly from Transformers: ``` from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("yikuan8/Clinical-BigBird") model = AutoModelForMaskedLM.from_pretrained("yikuan8/Clinical-BigBird") ``` ### Citing If you find our model helps, please consider citing this :) ``` @article{li2022clinical, title={Clinical-Longformer and Clinical-BigBird: Transformers for long clinical sequences}, author={Li, Yikuan and Wehbe, Ramsey M and Ahmad, Faraz S and Wang, Hanyin and Luo, Yuan}, journal={arXiv preprint arXiv:2201.11838}, year={2022} } ``` ### Questions Please email yikuanli2018@u.northwestern.edu
1,801
[ [ -0.00186920166015625, -0.0340576171875, 0.038299560546875, 0.0295257568359375, -0.01751708984375, -0.0185546875, -0.0084381103515625, -0.052001953125, 0.0293731689453125, 0.0258941650390625, -0.042022705078125, -0.037933349609375, -0.060699462890625, 0.02937...
ura-hcmut/ura-llama-7b
2023-10-09T05:33:01.000Z
[ "transformers", "safetensors", "llama", "text-generation", "vi", "en", "dataset:vietgpt/wikipedia_vi", "dataset:vietgpt/binhvq_news_vi", "arxiv:1910.09700", "license:other", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
ura-hcmut
null
null
ura-hcmut/ura-llama-7b
1
665
transformers
2023-10-05T08:03:19
--- license: other datasets: - vietgpt/wikipedia_vi - vietgpt/binhvq_news_vi language: - vi - en pipeline_tag: text-generation --- # URA-LLaMa ## Model Details ### Model Description With a strong commitment to enhancing the quality of large language models for the Vietnamese language, a collaborative effort was undertaken by Vietnamese researchers hailing from Ho Chi Minh University of Technology (HCMUT) - Vietnam National University HCMC and Stanford University. Our endeavor involved the meticulous fine-tuning of Meta LLaMa-2 models using Vietnamese articles sourced from Wikipedia and online news websites. In line with our dedication to fostering community progress, we are pleased to offer our models free of charge for research purposes. For those who wish to delve further into our research and its details, we encourage you to explore the comprehensive information provided below. - **Developed by:** - Duc Q. Nguyen - Sang T. Truong - Toan D. V. Nguyen - Dong D. Le - Nhi N. Truong - Tho Quan - **Model type:** Text generation - **Languages:** Vietnamese, English - **License:** - Custom license available at [LICENSE](https://github.com/martinakaduc/ura-llama-public/blob/main/URA-LLaMa%20Model%20User%20Agreement.pdf) - If you want to access our model, please fill in the above license and email us the scanned version at nqduc@hcmut.edu.vn (CC sttruong@cs.stanford.edu, qttho@hcmut.edu.vn) - **Finetuned from model:** Meta LLaMa-2 ### Model Sources We publicly provide starter source code and access to playground of URA-LLaMa 7B. The research paper is comming soon. - **Repository:** [URA-LLaMa Github](https://github.com/martinakaduc/ura-llama-public) - **Paper:** [Comming soon] - **Demo:** - Huggingface Playground: [https://huggingface.co/spaces/ura-hcmut/ura-llama-playground](https://huggingface.co/spaces/ura-hcmut/ura-llama-playground) - URA Playground: [https://www.ura.hcmut.edu.vn/llama-vi/](https://www.ura.hcmut.edu.vn/llama-vi/) ## Uses This model is primarily designed for text generation. However, as language models, it is versatile and can also function as an encoder for various downstream tasks, akin to other models. For a detailed understanding of its use cases, please refer to the information provided below. ### Direct Use You can use our models to perform various tasks containing * Question answering (with context) * Summarization * Language modelling * Text classification * Translation ### Downstream Use This model can serve as an encoder for a wide range of downstream tasks, spanning from pure natural language processing to combinations of natural language processing with computer vision or speech processing. ### Out-of-Scope Use While our models have undergone fine-tuning using extensive Vietnamese datasets, they may not perform optimally in specialized domains necessitating profound domain expertise, such as medicine, politics, chemistry, etc. We kindly request that you refrain from employing our models for political purposes or any endeavors that may cause harm to individuals or compromise the sovereignty and territorial integrity of Vietnam. ## Bias, Risks, and Limitations Unless required by applicable law, the URA-LLaMa materials and any output and results therefrom are provided on an "as is" basis, without warranties of any kind, either express or implied, including, without limitation, any warranties of title, non-infringement, merchantability, or fitness for a particular purpose. you are solely responsible for determining the appropriateness of using or redistributing the URA-LLaMa materials and assume any risks associated with your use of the URA-LLaMa materials and any output and results. ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. In order for the model to work well, you may need perform prompt engineering to create appropriate prompts before inference. ## How to Get Started with the Model Use the code below to get started with the model. ```python import transformers from transformers import AutoModelForCausalLM, AutoTokenizer pipeline_kwargs={ "temperature": 1.0, "max_new_tokens": 250, "top_k": 1, "repetition_penalty": 1.1 } if __name__ == "__main__": # Load model model = AutoModelForCausalLM.from_pretrained( "ura-hcmut/ura-llama-7b", device_map="auto" ) model.config.pretraining_tp = 1 model.eval() # Load tokenizer tokenizer = AutoTokenizer.from_pretrained( "ura-hcmut/ura-llama-7b", trust_remote_code=True ) tokenizer.pad_token = tokenizer.eos_token pipeline = transformers.pipeline( model=model, tokenizer=tokenizer, return_full_text=False, task='text-generation', **pipeline_kwargs ) query_template = "[INST] <<SYS>> Bạn là một trợ lý thông minh. <<SYS>> Hãy trả lời câu hỏi sau.\nCâu hỏi: {query}\nTrả lời:[/INST] " while True: query = input("Query: ") if query == "exit": break query = query_template.format(query=query) answer = pipeline(query)[0]["generated_text"] print(answer) ``` ## Finetuning Details ### Finetuning Data List of datasets used for finetuning: * Vietnamese Wikipedia: [https://huggingface.co/datasets/vietgpt/wikipedia_vi](https://huggingface.co/datasets/vietgpt/wikipedia_vi) * Binhvq News Corpus: [https://huggingface.co/datasets/vietgpt/binhvq_news_vi](https://huggingface.co/datasets/vietgpt/binhvq_news_vi) ### Finetuning Procedure We utilize the causal language modelling (next token prediction) procedure to finetune our models. Available tutorial is available at [https://huggingface.co/docs/transformers/tasks/language_modeling](https://huggingface.co/docs/transformers/tasks/language_modeling). #### Finetuning Hyperparameters - **Training regime:** BFloat16 mixed precision - **Quantization:** Normal Float 4bit - **Lora rank:** 128 - **Batch size:** 120 - **Optimizer:** Paged AdamW 32bit - **Learning rate:** 1e-5 ## Evaluation Our models are tested with various tasks. The detail of evaluation process is comming soon. ### Testing Data, Factors & Metrics #### Testing Data [Comming soon] #### Factors - Effects of prompt engineering - Effects of few-shot learning - Effects of chain-of-thought - Effects of choice orders - Ability to deal with typo mistakes (robustness) - Ability to deal with unfair situations (fairness) #### Metrics [Comming soon] ### Results [Comming soon] #### Summary [Comming soon] ## Environmental Impact Carbon emissions are estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** 1 x A100 80GB - **Hours used:** ~520h - **Carbon Emitted:** ~90 kg CO2 eq. ## Citation If you use URA-LLaMa materials in your research, please cite our model(s) as below. **BibTeX:** ```plaintext @online{ura-llama, author = {Duc Q. Nguyen, Sang T. Truong, Toan D. V. Nguyen, Dong D. Le, Nhi N. Truong, Tho Quan}, title = {URA-LLaMa: UniveRsal Adapted Large Language Model for Vietnamese}, year = 2023, url = {https://github.com/martinakaduc/ura-llama-public} } ``` ## Model Card Authors ## Contact * Mr. Duc Q. Nguyen: nqduc@hcmut.edu.vn * Mr. Sang T. Truong: sttruong@cs.stanford.edu * Assoc. Prof. Tho Quan: qttho@hcmut.edu.vn
7,477
[ [ -0.01296234130859375, -0.06768798828125, 0.03204345703125, 0.0180816650390625, -0.033538818359375, -0.0216217041015625, -0.012786865234375, -0.02252197265625, 0.006946563720703125, 0.055816650390625, -0.0341796875, -0.05810546875, -0.04876708984375, 0.027114...
google/t5-efficient-small
2023-01-24T16:50:23.000Z
[ "transformers", "pytorch", "tf", "jax", "t5", "text2text-generation", "deep-narrow", "en", "dataset:c4", "arxiv:2109.10686", "license:apache-2.0", "autotrain_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
google
null
null
google/t5-efficient-small
3
664
transformers
2022-03-02T23:29:05
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-SMALL (Deep-Narrow version) T5-Efficient-SMALL is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-small** - is of model type **Small** with no variations. It has **60.52** million parameters and thus requires *ca.* **242.08 MB** of memory in full precision (*fp32*) or **121.04 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
6,305
[ [ -0.040008544921875, -0.04681396484375, 0.0259552001953125, 0.01183319091796875, -0.01474761962890625, 0.00006276369094848633, -0.01238250732421875, -0.03753662109375, 0.0022525787353515625, 0.024688720703125, -0.035675048828125, -0.0379638671875, -0.0625, 0....
cis-lmu/glot500-base
2023-09-18T19:54:32.000Z
[ "transformers", "pytorch", "safetensors", "xlm-roberta", "fill-mask", "multilingual", "arxiv:2305.12182", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
cis-lmu
null
null
cis-lmu/glot500-base
8
664
transformers
2023-05-17T13:58:06
--- license: apache-2.0 language: - multilingual --- # Glot500 (base-sized model) Glot500 model (Glot500-m) pre-trained on 500+ languages using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/pdf/2305.12182.pdf) (ACL 2023) and first released in [this repository](https://github.com/cisnlp/Glot500). ## Usage You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='cis-lmu/glot500-base') >>> unmasker("Hello I'm a <mask> model.") ``` Here is how to use this model to get the features of a given text in PyTorch: ```python >>> from transformers import AutoTokenizer, AutoModelForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained('cis-lmu/glot500-base') >>> model = AutoModelForMaskedLM.from_pretrained("cis-lmu/glot500-base") >>> # prepare input >>> text = "Replace me by any text you'd like." >>> encoded_input = tokenizer(text, return_tensors='pt') >>> # forward pass >>> output = model(**encoded_input) ``` ### BibTeX entry and citation info ```bibtex @article{imanigooghari-etal-2023-glot500, title={Glot500: Scaling Multilingual Corpora and Language Models to 500 Languages}, author={ImaniGooghari, Ayyoob and Lin, Peiqin and Kargaran, Amir Hossein and Severini, Silvia and Jalili Sabet, Masoud and Kassner, Nora and Ma, Chunlan and Schmid, Helmut and Martins, Andr{\'e} and Yvon, Fran{\c{c}}ois and Sch{\"u}tze, Hinrich}, journal={arXiv preprint arXiv:2305.12182}, year={2023} } ``` <!--- ```bibtex @inproceedings{imanigooghari-etal-2023-glot500, title = {Glot500: Scaling Multilingual Corpora and Language Models to 500 Languages}, author = {ImaniGooghari, Ayyoob and Lin, Peiqin and Kargaran, Amir Hossein and Severini, Silvia and Jalili Sabet, Masoud and Kassner, Nora and Ma, Chunlan and Schmid, Helmut and Martins, Andr{\'e} and Yvon, Fran{\c{c}}ois and Sch{\"u}tze, Hinrich}, year = 2023, month = jul, booktitle = {Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Toronto, Canada}, pages = {1082--1117}, url = {https://aclanthology.org/2023.acl-long.61} } ``` -->
2,384
[ [ -0.038482666015625, -0.0455322265625, 0.01091766357421875, 0.018890380859375, 0.00519561767578125, 0.0013170242309570312, -0.038909912109375, -0.031646728515625, 0.03387451171875, 0.036590576171875, -0.0379638671875, -0.03350830078125, -0.04254150390625, 0.0...
luoruipu1/Valley2-7b
2023-08-07T06:11:23.000Z
[ "transformers", "pytorch", "valley", "text-generation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
luoruipu1
null
null
luoruipu1/Valley2-7b
1
664
transformers
2023-08-07T04:58:33
--- license: apache-2.0 --- # Valley2 Valley2 is a multi-modal video assistant that replaces the backbone of the large language model part of [valley](https://valley-vl.github.io/) with the latest [llama2](https://ai.meta.com/llama/) released by meta company. We currently offer valley2-7b.
293
[ [ -0.0210418701171875, -0.06549072265625, 0.0297698974609375, 0.0196533203125, -0.0222320556640625, 0.01177978515625, 0.0203857421875, -0.0233306884765625, 0.0173797607421875, 0.04486083984375, -0.062286376953125, -0.017547607421875, -0.04632568359375, -0.0181...
xlm-roberta-large-finetuned-conll02-dutch
2022-07-22T08:07:08.000Z
[ "transformers", "pytorch", "rust", "xlm-roberta", "fill-mask", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "...
fill-mask
null
null
null
xlm-roberta-large-finetuned-conll02-dutch
1
663
transformers
2022-03-02T23:29:04
--- language: - multilingual - af - am - ar - as - az - be - bg - bn - br - bs - ca - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fr - fy - ga - gd - gl - gu - ha - he - hi - hr - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lo - lt - lv - mg - mk - ml - mn - mr - ms - my - ne - nl - no - om - or - pa - pl - ps - pt - ro - ru - sa - sd - si - sk - sl - so - sq - sr - su - sv - sw - ta - te - th - tl - tr - ug - uk - ur - uz - vi - xh - yi - zh --- # xlm-roberta-large-finetuned-conll02-dutch # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Training](#training) 5. [Evaluation](#evaluation) 6. [Environmental Impact](#environmental-impact) 7. [Technical Specifications](#technical-specifications) 8. [Citation](#citation) 9. [Model Card Authors](#model-card-authors) 10. [How To Get Started With the Model](#how-to-get-started-with-the-model) # Model Details ## Model Description The XLM-RoBERTa model was proposed in [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on Facebook's RoBERTa model released in 2019. It is a large multi-lingual language model, trained on 2.5TB of filtered CommonCrawl data. This model is [XLM-RoBERTa-large](https://huggingface.co/xlm-roberta-large) fine-tuned with the [CoNLL-2002](https://huggingface.co/datasets/conll2002) dataset in Dutch. - **Developed by:** See [associated paper](https://arxiv.org/abs/1911.02116) - **Model type:** Multi-lingual language model - **Language(s) (NLP):** XLM-RoBERTa is a multilingual model trained on 100 different languages; see [GitHub Repo](https://github.com/facebookresearch/fairseq/tree/main/examples/xlmr) for full list; model is fine-tuned on a dataset in Dutch - **License:** More information needed - **Related Models:** [RoBERTa](https://huggingface.co/roberta-base), [XLM](https://huggingface.co/docs/transformers/model_doc/xlm) - **Parent Model:** [XLM-RoBERTa-large](https://huggingface.co/xlm-roberta-large) - **Resources for more information:** -[GitHub Repo](https://github.com/facebookresearch/fairseq/tree/main/examples/xlmr) -[Associated Paper](https://arxiv.org/abs/1911.02116) -[CoNLL-2002 data card](https://huggingface.co/datasets/conll2002) # Uses ## Direct Use The model is a language model. The model can be used for token classification, a natural language understanding task in which a label is assigned to some tokens in a text. ## Downstream Use Potential downstream use cases include Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. To learn more about token classification and other potential downstream use cases, see the Hugging Face [token classification docs](https://huggingface.co/tasks/token-classification). ## Out-of-Scope Use The model should not be used to intentionally create hostile or alienating environments for people. # Bias, Risks, and Limitations **CONTENT WARNING: Readers should be made aware that language generated by this model may be disturbing or offensive to some and may propagate historical and current stereotypes.** Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). ## Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. # Training See the following resources for training data and training procedure details: - [XLM-RoBERTa-large model card](https://huggingface.co/xlm-roberta-large) - [CoNLL-2002 data card](https://huggingface.co/datasets/conll2002) - [Associated paper](https://arxiv.org/pdf/1911.02116.pdf) # Evaluation See the [associated paper](https://arxiv.org/pdf/1911.02116.pdf) for evaluation details. # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** 500 32GB Nvidia V100 GPUs (from the [associated paper](https://arxiv.org/pdf/1911.02116.pdf)) - **Hours used:** More information needed - **Cloud Provider:** More information needed - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Technical Specifications See the [associated paper](https://arxiv.org/pdf/1911.02116.pdf) for further details. # Citation **BibTeX:** ```bibtex @article{conneau2019unsupervised, title={Unsupervised Cross-lingual Representation Learning at Scale}, author={Conneau, Alexis and Khandelwal, Kartikay and Goyal, Naman and Chaudhary, Vishrav and Wenzek, Guillaume and Guzm{\'a}n, Francisco and Grave, Edouard and Ott, Myle and Zettlemoyer, Luke and Stoyanov, Veselin}, journal={arXiv preprint arXiv:1911.02116}, year={2019} } ``` **APA:** - Conneau, A., Khandelwal, K., Goyal, N., Chaudhary, V., Wenzek, G., Guzmán, F., ... & Stoyanov, V. (2019). Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116. # Model Card Authors This model card was written by the team at Hugging Face. # How to Get Started with the Model Use the code below to get started with the model. You can use this model directly within a pipeline for NER. <details> <summary> Click to expand </summary> ```python >>> from transformers import AutoTokenizer, AutoModelForTokenClassification >>> from transformers import pipeline >>> tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large-finetuned-conll02-dutch") >>> model = AutoModelForTokenClassification.from_pretrained("xlm-roberta-large-finetuned-conll02-dutch") >>> classifier = pipeline("ner", model=model, tokenizer=tokenizer) >>> classifier("Mijn naam is Emma en ik woon in Londen.") [{'end': 17, 'entity': 'B-PER', 'index': 4, 'score': 0.9999807, 'start': 13, 'word': '▁Emma'}, {'end': 36, 'entity': 'B-LOC', 'index': 9, 'score': 0.9999871, 'start': 32, 'word': '▁Lond'}] ``` </details>
6,354
[ [ -0.0360107421875, -0.043487548828125, 0.00994110107421875, 0.0107269287109375, -0.0140380859375, -0.01415252685546875, -0.03363037109375, -0.044036865234375, 0.0119781494140625, 0.033447265625, -0.0300445556640625, -0.0400390625, -0.0604248046875, 0.00563049...
Helsinki-NLP/opus-mt-tc-big-en-gmq
2023-10-10T10:34:07.000Z
[ "transformers", "pytorch", "tf", "safetensors", "marian", "text2text-generation", "translation", "opus-mt-tc", "tc", "big", "en", "gmq", "license:cc-by-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
Helsinki-NLP
null
null
Helsinki-NLP/opus-mt-tc-big-en-gmq
1
662
transformers
2022-04-13T14:14:55
--- language: - da - en - fo - gmq - is - nb - nn - false - sv tags: - translation - opus-mt-tc license: cc-by-4.0 model-index: - name: opus-mt-tc-big-en-gmq results: - task: name: Translation eng-dan type: translation args: eng-dan dataset: name: flores101-devtest type: flores_101 args: eng dan devtest metrics: - name: BLEU type: bleu value: 47.7 - task: name: Translation eng-isl type: translation args: eng-isl dataset: name: flores101-devtest type: flores_101 args: eng isl devtest metrics: - name: BLEU type: bleu value: 24.1 - task: name: Translation eng-nob type: translation args: eng-nob dataset: name: flores101-devtest type: flores_101 args: eng nob devtest metrics: - name: BLEU type: bleu value: 34.5 - task: name: Translation eng-swe type: translation args: eng-swe dataset: name: flores101-devtest type: flores_101 args: eng swe devtest metrics: - name: BLEU type: bleu value: 46.9 - task: name: Translation eng-isl type: translation args: eng-isl dataset: name: newsdev2021.en-is type: newsdev2021.en-is args: eng-isl metrics: - name: BLEU type: bleu value: 22.6 - task: name: Translation eng-dan type: translation args: eng-dan dataset: name: tatoeba-test-v2021-08-07 type: tatoeba_mt args: eng-dan metrics: - name: BLEU type: bleu value: 61.6 - task: name: Translation eng-isl type: translation args: eng-isl dataset: name: tatoeba-test-v2021-08-07 type: tatoeba_mt args: eng-isl metrics: - name: BLEU type: bleu value: 39.9 - task: name: Translation eng-nno type: translation args: eng-nno dataset: name: tatoeba-test-v2021-08-07 type: tatoeba_mt args: eng-nno metrics: - name: BLEU type: bleu value: 40.1 - task: name: Translation eng-nob type: translation args: eng-nob dataset: name: tatoeba-test-v2021-08-07 type: tatoeba_mt args: eng-nob metrics: - name: BLEU type: bleu value: 57.3 - task: name: Translation eng-swe type: translation args: eng-swe dataset: name: tatoeba-test-v2021-08-07 type: tatoeba_mt args: eng-swe metrics: - name: BLEU type: bleu value: 60.9 - task: name: Translation eng-isl type: translation args: eng-isl dataset: name: newstest2021.en-is type: wmt-2021-news args: eng-isl metrics: - name: BLEU type: bleu value: 21.5 --- # opus-mt-tc-big-en-gmq Neural machine translation model for translating from English (en) to North Germanic languages (gmq). This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train). * Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.) ``` @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", } @inproceedings{tiedemann-2020-tatoeba, title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}", author = {Tiedemann, J{\"o}rg}, booktitle = "Proceedings of the Fifth Conference on Machine Translation", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2020.wmt-1.139", pages = "1174--1182", } ``` ## Model info * Release: 2022-03-17 * source language(s): eng * target language(s): dan fao isl nno nob nor swe * valid target language labels: >>dan<< >>fao<< >>isl<< >>nno<< >>nob<< >>nor<< >>swe<< * model: transformer-big * data: opusTCv20210807+bt ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge)) * tokenization: SentencePiece (spm32k,spm32k) * original model: [opusTCv20210807+bt_transformer-big_2022-03-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-gmq/opusTCv20210807+bt_transformer-big_2022-03-17.zip) * more information released models: [OPUS-MT eng-gmq README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-gmq/README.md) * more information about the model: [MarianMT](https://huggingface.co/docs/transformers/model_doc/marian) This is a multilingual translation model with multiple target languages. A sentence initial language token is required in the form of `>>id<<` (id = valid target language ID), e.g. `>>dan<<` ## Usage A short example code: ```python from transformers import MarianMTModel, MarianTokenizer src_text = [ ">>nno<< The United States borders Canada.", ">>nob<< This is the biggest hotel in this city." ] model_name = "pytorch-models/opus-mt-tc-big-en-gmq" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) for t in translated: print( tokenizer.decode(t, skip_special_tokens=True) ) # expected output: # USA grensar til Canada. # Dette er det største hotellet i denne byen. ``` You can also use OPUS-MT models with the transformers pipelines, for example: ```python from transformers import pipeline pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-en-gmq") print(pipe(">>nno<< The United States borders Canada.")) # expected output: USA grensar til Canada. ``` ## Benchmarks * test set translations: [opusTCv20210807+bt_transformer-big_2022-03-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-gmq/opusTCv20210807+bt_transformer-big_2022-03-17.test.txt) * test set scores: [opusTCv20210807+bt_transformer-big_2022-03-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-gmq/opusTCv20210807+bt_transformer-big_2022-03-17.eval.txt) * benchmark results: [benchmark_results.txt](benchmark_results.txt) * benchmark output: [benchmark_translations.zip](benchmark_translations.zip) | langpair | testset | chr-F | BLEU | #sent | #words | |----------|---------|-------|-------|-------|--------| | eng-dan | tatoeba-test-v2021-08-07 | 0.75165 | 61.6 | 10795 | 79385 | | eng-fao | tatoeba-test-v2021-08-07 | 0.40395 | 18.3 | 294 | 1933 | | eng-isl | tatoeba-test-v2021-08-07 | 0.59731 | 39.9 | 2503 | 19023 | | eng-nno | tatoeba-test-v2021-08-07 | 0.61271 | 40.1 | 460 | 3428 | | eng-nob | tatoeba-test-v2021-08-07 | 0.72380 | 57.3 | 4539 | 36119 | | eng-swe | tatoeba-test-v2021-08-07 | 0.74197 | 60.9 | 10362 | 68067 | | eng-dan | flores101-devtest | 0.70810 | 47.7 | 1012 | 24638 | | eng-isl | flores101-devtest | 0.52076 | 24.1 | 1012 | 22834 | | eng-nob | flores101-devtest | 0.62760 | 34.5 | 1012 | 23873 | | eng-swe | flores101-devtest | 0.70129 | 46.9 | 1012 | 23121 | | eng-isl | newsdev2021.en-is | 0.50376 | 22.6 | 2004 | 43721 | | eng-isl | newstest2021.en-is | 0.50516 | 21.5 | 1000 | 25233 | ## Acknowledgements The work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland. ## Model conversion info * transformers version: 4.16.2 * OPUS-MT git hash: 3405783 * port time: Wed Apr 13 17:14:46 EEST 2022 * port machine: LM0-400-22516.local
9,270
[ [ -0.031951904296875, -0.040557861328125, 0.020660400390625, 0.0176849365234375, -0.03106689453125, -0.01235198974609375, -0.033233642578125, -0.02191162109375, 0.0162200927734375, 0.0271759033203125, -0.035125732421875, -0.05609130859375, -0.047607421875, 0.0...
recogna-nlp/ptt5-base-summ-xlsum
2023-03-22T03:41:42.000Z
[ "transformers", "pytorch", "safetensors", "t5", "text2text-generation", "pt", "pt-br", "summarization", "abstractive summarization", "dataset:csebuetnlp/xlsum", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
summarization
recogna-nlp
null
null
recogna-nlp/ptt5-base-summ-xlsum
12
662
transformers
2022-08-29T20:18:03
--- language: pt license: mit tags: - t5 - pytorch - pt - pt-br - summarization - abstractive summarization datasets: - csebuetnlp/xlsum inference: parameters: min_length: 32 max_length: 256 top_k: 5 widget: - text: 'O homem, Wilmer Antonio Marin, conhecido como Hugo, seria um alto comandante das Forças Armadas Revolucionárias da Colômbia (Farc), o maior grupo rebelde do país. Ele é acusado de ter perpetrado um ataque num clube noturno em fevereiro que matou 35 pessoas e feriu 160. Hugo também estaria envolvido no assassinato do empresário japonês Chikao Muramatsu que foi encontrado morto a tiros em novembro, quase três anos depois de ter sido seqüestrado. Golpe O resgate de US$ 19 milhões (R$ 55 milhões) tinha sido pedido para a libertação de Muramatsu. As autoridades colombianas acreditam que a detenção de Hugo representa um grande golpe na estrutura da Farc em Bogotá. Wilmer Antonio Marin é acusado de administrar uma rede de seqüestros que teria, como alvo, empresários ricos e estrangeiros. Ele seria reponsável por seqüestrá-los no meio da rua e levá-los para as montanhas onde a guerrilha tem suas bases.' example_title: "Notícia 1" - text: 'Terminou a rebelião de presos no Centro de Custódia de Presos de Justiça (CCPJ), em São Luís, no começo da tarde desta quarta-feira (17). Os presos entregaram as armas e a polícia faz uma revista dentro da unidade. O motim começou durante a festa do Dia das Crianças, realizada na terça-feira (16). As 16 crianças e 14 adultos foram libertados. Segundo informações da polícia, o líder da rebelião foi transferido para o Presídio de Pedrinhas, na capital maranhense. Os presos receberam garantias, por parte do diretor da unidade, de que não haveria represálias e novas transferências. Os presos tentaram fugir durante a festa, mas o plano foi descoberto. No começo da rebelião quatro pessoas ficaram feridas, entre elas uma auxiliar de enfermagem e um agente de polícia que trabalham no presídio. A unidade ficou sem luz e água e as negociações para a libertação dos reféns foi retomada na manhã desta quarta-feira. Segundo informações da polícia, os presos temiam uma transferência em massa depois de terem iniciado uma outra rebelião durante a greve de policiais no estado, na semana passada. A CCPJ tem capacidade para cerca de 80 presos, mas abriga 203 homens.' example_title: "Notícia 2" --- # Portuguese T5 for Abstractive Summarization (PTT5 Summ) ## Introduction PTT5 Summ is a fine-tuned [PTT5](https://github.com/unicamp-dl/PTT5) model to perform Abstractive Summarization in Brazilian Portuguese texts. This model was fine-tuned on the datasets: [WikiLingua](https://github.com/esdurmus/Wikilingua), [XL-Sum](https://github.com/csebuetnlp/xl-sum), [TeMário](http://www.nilc.icmc.usp.br/nilc/download/NILCTR0706-MazieroEtAl(2).pdf) and [CSTNews](http://nilc.icmc.usp.br/CSTNews/login/?next=/CSTNews/). For further information, please go to [PTT5 Summ repository](https://github.com/pedropaiola/ptt5-summ). ## Available models | Model | Dataset used in fine-tuning| | :-: | :-: | | [phpaiola/ptt5-base-summ-wikilingua](https://huggingface.co/phpaiola/ptt5-base-summ-wikilingua) | WikiLingua | | [phpaiola/ptt5-base-summ-xlsum](https://huggingface.co/phpaiola/ptt5-base-summ-xlsum) | XL-Sum | | [phpaiola/ptt5-base-summ-temario](https://huggingface.co/phpaiola/ptt5-base-summ-temario) | 1st phase: WikiLingua. 2nd phase: TeMario | | [phpaiola/ptt5-base-summ-cstnews](https://huggingface.co/phpaiola/ptt5-base-summ-cstnews) | 1st phase: WikiLingua. 2nd phase: CSTNews| ## Usage example ```python # Tokenizer from transformers import T5Tokenizer # PyTorch model from transformers import T5Model, T5ForConditionalGeneration token_name = 'unicamp-dl/ptt5-base-portuguese-vocab' model_name = 'phpaiola/ptt5-base-summ-xlsum' tokenizer = T5Tokenizer.from_pretrained(token_name ) model_pt = T5ForConditionalGeneration.from_pretrained(model_name) text = ''' “A tendência de queda da taxa de juros no Brasil é real, é visível”, disse Meirelles, que participou na capital americana de uma série de reuniões e encontros com banqueiros e investidores que aconteceram paralelamente às reuniões do Fundo Monetário Internacional (FMI) e do Banco Mundial (Bird) no fim de semana. Para o presidente do BC, a atual política econômica do governo e a manutenção da taxa de inflação dentro da meta são fatores que garantem queda na taxa de juros a longo prazo. “Mas é importante que nós não olhemos para isso apenas no curto prazo. Temos que olhar no médio e longo prazos”, disse Meirelles. Para ele, o trabalho que o Banco Central tem feito para conter a inflação dentro da meta vai gerar queda gradual da taxa de juros. BC do ano Neste domingo, Meirelles participou da cerimônia de entrega do prêmio “Banco Central do ano”, oferecido pela revista The Banker à instituição que preside. “Este é um sinal importante de reconhecimento do nosso trabalho, de que o Brasil está indo na direção correta”, disse ele. Segundo Meirelles, o Banco Central do Brasil está sendo percebido como uma instituição comprometida com a meta de inflação. “Isso tem um ganho importante, na medida em que os agentes formadores de preços começam a apostar que a inflação vai estar na meta, que isso é levado a sério no Brasil”, completou. O presidente do Banco Central disse ainda que a crise política brasileira não foi um assunto de interesse prioritário dos investidores que encontrou no fim de semana. ''' inputs = tokenizer.encode(text, max_length=512, truncation=True, return_tensors='pt') summary_ids = model_pt.generate(inputs, max_length=256, min_length=32, num_beams=5, no_repeat_ngram_size=3, early_stopping=True) summary = tokenizer.decode(summary_ids[0]) print(summary) #<pad> O presidente do Banco Central, Henrique Meirelles, disse neste domingo, em Washington, que a taxa de juros no Brasil é real, mas que o Brasil está indo na direção correta.</s> ``` # Citation @aInProceedings{ptt5summ_bracis, author="Paiola, Pedro H. and de Rosa, Gustavo H. and Papa, Jo{\~a}o P.", editor="Xavier-Junior, Jo{\~a}o Carlos and Rios, Ricardo Ara{\'u}jo", title="Deep Learning-Based Abstractive Summarization for Brazilian Portuguese Texts", booktitle="BRACIS 2022: Intelligent Systems", year="2022", publisher="Springer International Publishing", address="Cham", pages="479--493", isbn="978-3-031-21689-3"}
6,711
[ [ -0.039154052734375, -0.0423583984375, 0.00894927978515625, 0.0343017578125, -0.051422119140625, 0.01226806640625, -0.0195770263671875, -0.037872314453125, 0.0241851806640625, 0.029083251953125, -0.0177154541015625, -0.06256103515625, -0.054168701171875, 0.02...
proximasanfinetuning/luna-diffusion
2023-05-11T21:09:13.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "painterly", "painting", "license:other", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
proximasanfinetuning
null
null
proximasanfinetuning/luna-diffusion
45
662
diffusers
2023-03-06T17:43:13
--- license: other library_name: diffusers pipeline_tag: text-to-image tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - painterly - painting - diffusers inference: false --- [<img src="https://huggingface.co/proximasanfinetuning/luna-diffusion/resolve/main/cover%232.jpg">](https://huggingface.co/proximasanfinetuning/luna-diffusion/blob/main/cover%232.jpg) # → about - this was finetuned on a few hundred & mostly hand-captioned highres images on SD 1.5 for ethereal, painterly vibes - no trigger words/ tokens, but you *can* add "painting" to the prompt to increase the painterly effect - use "illustration" in prompts to get more vector art looking images - works best at 768x768 px, 512x768 px or 768x512 px since it was finetuned on 768x768, so 512x512 will look overbaked - DPM++ 2M looks usually nice and crisp, use Euler_a for a more softer look - i recommend adding “nude, naked” to your negative prompt if you don’t like boobas because this model certainly does (¬‿¬ ) - check my [blog entry](https://proximacentaurib.xyz/checkpoints/luna-diffusion/) for more examples, comparisons and tips on settings --- [<img src="https://colab.research.google.com/assets/colab-badge.svg">](https://colab.research.google.com/drive/1ML9E3963yyMlyspmZUXcbXqPXB1g7j5x?usp=sharing) # 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion Pipeline](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview). ```python from diffusers import StableDiffusionPipeline import torch model_id = "proximasanfinetuning/luna-diffusion" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "painting of a beautiful woman with red hair, 8k, high quality" image = pipe(prompt, height=768, width=768).images[0] image.save("./result.jpg") ``` # you can also get it as [CKPT](https://huggingface.co/proximasanfinetuning/luna-diffusion/blob/main/luna_diffusion_2-2.ckpt) or [Safetensors](https://huggingface.co/proximasanfinetuning/luna-diffusion/blob/main/luna_diffusion_2-2.safetensors) ---- # → some great images users on [stablecog.com](https://stablecog.com) made with it: [<img src="https://huggingface.co/proximasanfinetuning/luna-diffusion/resolve/main/stablecog-samples.png">](https://huggingface.co/proximasanfinetuning/luna-diffusion/blob/main/stablecog-samples.png) Links: [1](https://stablecog.com/gallery?output=b1be8a4b-5d56-4443-beef-e4468ba7f800) [2](https://stablecog.com/gallery?output=8e9eb1cc-5e18-4650-b15f-c6912c421c9c) [3](https://stablecog.com/gallery?output=9a291259-471a-4a32-b565-eac352141480) [4](https://stablecog.com/gallery?output=3431ade8-2c21-438b-b4c8-d9c8b129014c) [5](https://stablecog.com/gallery?output=5cd5330e-eeb3-4db3-9cd4-0fc06fad038e) [6](https://stablecog.com/gallery?output=40748472-5e85-4e33-86a7-5fcb6edd9506) [7](https://stablecog.com/gallery?output=6559d772-bdde-431b-97f5-1de26b780ad4) [8](https://stablecog.com/gallery?output=2414bacc-8025-4e09-b91a-9826eeb34045) [9](https://stablecog.com/gallery?output=28af6f65-d5d3-4fd2-ac89-aa77778999d9) or check the [hashtag on twitter](https://twitter.com/search?q=%23lunadiffusion&src=typed_query&f=live) ---- # → finetuned to work well with specifying various skintones [<img src="https://huggingface.co/proximasanfinetuning/luna-diffusion/resolve/main/%2314.jpg">](https://huggingface.co/proximasanfinetuning/luna-diffusion/blob/main/%2314.jpg) [<img src="https://huggingface.co/proximasanfinetuning/luna-diffusion/resolve/main/%2315.jpg">](https://huggingface.co/proximasanfinetuning/luna-diffusion/blob/main/%2315.jpg) ---- if you enjoy this consider buying me a coffee or becoming a monthly supporter (ノ◕ヮ◕)ノ*:・゚✧ <a href='https://ko-fi.com/S6S6FUYKY' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi3.png?v=3' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> ---- # license This model is licensed under a modified CreativeML OpenRAIL-M license. * Utilizing and hosting the Luna Diffusion model and its derivatives on platforms that earn, will earn, or plan to earn revenue or donations requires prior authorization. **To request permission, please email proximasan@protonmail.com.** * You are permitted to host the model card and files on both commercial and non-commercial websites, apps, etc. as long as you properly credit the model by stating its full name and providing a link to the model card (https://huggingface.co/proximasanfinetuning/luna-diffusion), without performing any actual inference or finetuning. * The Luna Diffusion model and its derivatives can be hosted on non-commercial websites, apps, etc. as long as no revenue or donations are received. Proper credit must be given by stating the full model name and including a link to the model card (https://huggingface.co/proximasanfinetuning/luna-diffusion). * **The outputs of the model or its derivatives can be used for commercial purposes as long as the usage is limited to teams of 10 or fewer individuals.** * You can't use the model to deliberately produce nor share illegal or harmful outputs or content * The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license * You may re-distribute the weights. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the modified CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here: https://huggingface.co/proximasanfinetuning/luna-diffusion/blob/main/luna_diffusion_license.txt
5,819
[ [ -0.04803466796875, -0.04803466796875, 0.040069580078125, 0.044342041015625, -0.0098724365234375, -0.00496673583984375, 0.016204833984375, -0.03857421875, 0.059173583984375, 0.046783447265625, -0.051300048828125, -0.054107666015625, -0.041656494140625, 0.0034...
timm/caformer_s36.sail_in22k_ft_in1k_384
2023-05-05T05:53:48.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "dataset:imagenet-22k", "arxiv:2210.13452", "license:apache-2.0", "has_space", "region:us" ]
image-classification
timm
null
null
timm/caformer_s36.sail_in22k_ft_in1k_384
0
662
timm
2023-05-05T05:53:10
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-22k --- # Model card for caformer_s36.sail_in22k_ft_in1k_384 A CAFormer (a MetaFormer) image classification model. Pretrained on ImageNet-22k and fine-tuned on ImageNet-1k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 39.3 - GMACs: 26.1 - Activations (M): 150.3 - Image size: 384 x 384 - **Papers:** - Metaformer baselines for vision: https://arxiv.org/abs/2210.13452 - **Original:** https://github.com/sail-sg/metaformer - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-22k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('caformer_s36.sail_in22k_ft_in1k_384', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'caformer_s36.sail_in22k_ft_in1k_384', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 96, 96]) # torch.Size([1, 128, 48, 48]) # torch.Size([1, 320, 24, 24]) # torch.Size([1, 512, 12, 12]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'caformer_s36.sail_in22k_ft_in1k_384', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 12, 12) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{yu2022metaformer_baselines, title={Metaformer baselines for vision}, author={Yu, Weihao and Si, Chenyang and Zhou, Pan and Luo, Mi and Zhou, Yichen and Feng, Jiashi and Yan, Shuicheng and Wang, Xinchao}, journal={arXiv preprint arXiv:2210.13452}, year={2022} } ```
3,774
[ [ -0.040802001953125, -0.0277099609375, 0.007450103759765625, 0.0113372802734375, -0.0281982421875, -0.0250091552734375, -0.0172119140625, -0.032012939453125, 0.016632080078125, 0.0364990234375, -0.0394287109375, -0.057159423828125, -0.05615234375, -0.00708007...
Graphcore/mt5-small-ipu
2023-07-13T11:20:36.000Z
[ "optimum_graphcore", "arxiv:1910.10683", "arxiv:2010.11934", "license:apache-2.0", "region:us" ]
null
Graphcore
null
null
Graphcore/mt5-small-ipu
0
662
null
2023-05-19T15:01:28
--- license: apache-2.0 --- # Graphcore/mt5-small-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description Multilingual Text-to-Text Transfer Transformer (mT5) is the multilingual variant of [T5](https://arxiv.org/abs/1910.10683). T5 is a Transformer based model that uses a text-to-text approach for translation, question answering, and classification. It introduces an unified framework that converts all text-based language problems into a text-to-text format for transfer learning for NLP. This allows for the use of the same model, loss function, hyperparameters, etc. across our diverse set of tasks. mT5 is pretrained on the mC4 corpus, covering 101 languages: Afrikaans, Albanian, Amharic, Arabic, Armenian, Azerbaijani, Basque, Belarusian, Bengali, Bulgarian, Burmese, Catalan, Cebuano, Chichewa, Chinese, Corsican, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Haitian Creole, Hausa, Hawaiian, Hebrew, Hindi, Hmong, Hungarian, Icelandic, Igbo, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish, Kyrgyz, Lao, Latin, Latvian, Lithuanian, Luxembourgish, Macedonian, Malagasy, Malay, Malayalam, Maltese, Maori, Marathi, Mongolian, Nepali, Norwegian, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Samoan, Scottish Gaelic, Serbian, Shona, Sindhi, Sinhala, Slovak, Slovenian, Somali, Sotho, Spanish, Sundanese, Swahili, Swedish, Tajik, Tamil, Telugu, Thai, Turkish, Ukrainian, Urdu, Uzbek, Vietnamese, Welsh, West Frisian, Xhosa, Yiddish, Yoruba, Zulu. Note: mT5 was only pre-trained on mC4 excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task. Paper link :[mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the mT5 Small model (e.g. [HuggingFace/google/mt5-small](https://huggingface.co/google/mt5-small)) on Graphcore IPUs. **This model contains no model weights, only an IPUConfig.** ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/mt5-small-ipu") ```
3,196
[ [ -0.0457763671875, -0.031524658203125, 0.0158233642578125, 0.0119476318359375, -0.0242156982421875, 0.0201568603515625, -0.018218994140625, -0.031890869140625, -0.004970550537109375, 0.03375244140625, -0.04937744140625, -0.048187255859375, -0.061370849609375, ...
frankjoshua/control_v11p_sd15_openpose
2023-04-24T22:26:20.000Z
[ "diffusers", "art", "controlnet", "stable-diffusion", "controlnet-v1-1", "image-to-image", "arxiv:2302.05543", "license:openrail", "diffusers:ControlNetModel", "region:us" ]
image-to-image
frankjoshua
null
null
frankjoshua/control_v11p_sd15_openpose
0
662
diffusers
2023-07-28T00:22:12
--- license: openrail base_model: runwayml/stable-diffusion-v1-5 tags: - art - controlnet - stable-diffusion - controlnet-v1-1 - image-to-image duplicated_from: ControlNet-1-1-preview/control_v11p_sd15_openpose --- # Controlnet - v1.1 - *openpose Version* **Controlnet v1.1** is the successor model of [Controlnet v1.0](https://huggingface.co/lllyasviel/ControlNet) and was released in [lllyasviel/ControlNet-v1-1](https://huggingface.co/lllyasviel/ControlNet-v1-1) by [Lvmin Zhang](https://huggingface.co/lllyasviel). This checkpoint is a conversion of [the original checkpoint](https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_openpose.pth) into `diffusers` format. It can be used in combination with **Stable Diffusion**, such as [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5). For more details, please also have a look at the [🧨 Diffusers docs](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/controlnet). ControlNet is a neural network structure to control diffusion models by adding extra conditions. ![img](./sd.png) This checkpoint corresponds to the ControlNet conditioned on **openpose images**. ## Model Details - **Developed by:** Lvmin Zhang, Maneesh Agrawala - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based. - **Resources for more information:** [GitHub Repository](https://github.com/lllyasviel/ControlNet), [Paper](https://arxiv.org/abs/2302.05543). - **Cite as:** @misc{zhang2023adding, title={Adding Conditional Control to Text-to-Image Diffusion Models}, author={Lvmin Zhang and Maneesh Agrawala}, year={2023}, eprint={2302.05543}, archivePrefix={arXiv}, primaryClass={cs.CV} } ## Introduction Controlnet was proposed in [*Adding Conditional Control to Text-to-Image Diffusion Models*](https://arxiv.org/abs/2302.05543) by Lvmin Zhang, Maneesh Agrawala. The abstract reads as follows: *We present a neural network structure, ControlNet, to control pretrained large diffusion models to support additional input conditions. The ControlNet learns task-specific conditions in an end-to-end way, and the learning is robust even when the training dataset is small (< 50k). Moreover, training a ControlNet is as fast as fine-tuning a diffusion model, and the model can be trained on a personal devices. Alternatively, if powerful computation clusters are available, the model can scale to large amounts (millions to billions) of data. We report that large diffusion models like Stable Diffusion can be augmented with ControlNets to enable conditional inputs like edge maps, segmentation maps, keypoints, etc. This may enrich the methods to control large diffusion models and further facilitate related applications.* ## Example It is recommended to use the checkpoint with [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) as the checkpoint has been trained on it. Experimentally, the checkpoint can be used with other diffusion models such as dreamboothed stable diffusion. **Note**: If you want to process an image to create the auxiliary conditioning, external dependencies are required as shown below: 1. Install https://github.com/patrickvonplaten/controlnet_aux ```sh $ pip install controlnet_aux==0.3.0 ``` 2. Let's install `diffusers` and related packages: ``` $ pip install diffusers transformers accelerate ``` 3. Run code: ```python import torch import os from huggingface_hub import HfApi from pathlib import Path from diffusers.utils import load_image from PIL import Image import numpy as np from controlnet_aux import OpenposeDetector from diffusers import ( ControlNetModel, StableDiffusionControlNetPipeline, UniPCMultistepScheduler, ) checkpoint = "lllyasviel/control_v11p_sd15_openpose" image = load_image( "https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/input.png" ) prompt = "chef in the kitchen" processor = OpenposeDetector.from_pretrained('lllyasviel/ControlNet') control_image = processor(image, hand_and_face=True) control_image.save("./images/control.png") controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() generator = torch.manual_seed(0) image = pipe(prompt, num_inference_steps=30, generator=generator, image=control_image).images[0] image.save('images/image_out.png') ``` ![bird](./images/input.png) ![bird_canny](./images/control.png) ![bird_canny_out](./images/image_out.png) ## Other released checkpoints v1-1 The authors released 14 different checkpoints, each trained with [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) on a different type of conditioning: | Model Name | Control Image Overview| Control Image Example | Generated Image Example | |---|---|---|---| |[lllyasviel/control_v11p_sd15_canny](https://huggingface.co/lllyasviel/control_v11p_sd15_canny)<br/> *Trained with canny edge detection* | A monochrome image with white edges on a black background.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11e_sd15_ip2p](https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p)<br/> *Trained with pixel to pixel instruction* | No condition .|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint)<br/> Trained with image inpainting | No condition.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/output.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/output.png"/></a>| |[lllyasviel/control_v11p_sd15_mlsd](https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd)<br/> Trained with multi-level line segment detection | An image with annotated line segments.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11f1p_sd15_depth](https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth)<br/> Trained with depth estimation | An image with depth information, usually represented as a grayscale image.|<a href="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11p_sd15_normalbae](https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae)<br/> Trained with surface normal estimation | An image with surface normal information, usually represented as a color-coded image.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11p_sd15_seg](https://huggingface.co/lllyasviel/control_v11p_sd15_seg)<br/> Trained with image segmentation | An image with segmented regions, usually represented as a color-coded image.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11p_sd15_lineart](https://huggingface.co/lllyasviel/control_v11p_sd15_lineart)<br/> Trained with line art generation | An image with line art, usually black lines on a white background.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11p_sd15s2_lineart_anime](https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime)<br/> Trained with anime line art generation | An image with anime-style line art.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11p_sd15_openpose](https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime)<br/> Trained with human pose estimation | An image with human poses, usually represented as a set of keypoints or skeletons.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11p_sd15_scribble](https://huggingface.co/lllyasviel/control_v11p_sd15_scribble)<br/> Trained with scribble-based image generation | An image with scribbles, usually random or user-drawn strokes.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11p_sd15_softedge](https://huggingface.co/lllyasviel/control_v11p_sd15_softedge)<br/> Trained with soft edge image generation | An image with soft edges, usually to create a more painterly or artistic effect.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/image_out.png"/></a>| |[lllyasviel/control_v11e_sd15_shuffle](https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle)<br/> Trained with image shuffling | An image with shuffled patches or regions.|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/image_out.png"/></a>| ## Improvements in Openpose 1.1: - The improvement of this model is mainly based on our improved implementation of OpenPose. We carefully reviewed the difference between the pytorch OpenPose and CMU's c++ openpose. Now the processor should be more accurate, especially for hands. The improvement of processor leads to the improvement of Openpose 1.1. - More inputs are supported (hand and face). - The training dataset of previous cnet 1.0 has several problems including (1) a small group of greyscale human images are duplicated thousands of times (!!), causing the previous model somewhat likely to generate grayscale human images; (2) some images has low quality, very blurry, or significant JPEG artifacts; (3) a small group of images has wrong paired prompts caused by a mistake in our data processing scripts. The new model fixed all problems of the training dataset and should be more reasonable in many cases. ## More information For more information, please also have a look at the [Diffusers ControlNet Blog Post](https://huggingface.co/blog/controlnet) and have a look at the [official docs](https://github.com/lllyasviel/ControlNet-v1-1-nightly).
15,690
[ [ -0.0450439453125, -0.04327392578125, 0.009429931640625, 0.04217529296875, -0.017059326171875, -0.021484375, 0.002819061279296875, -0.04058837890625, 0.03961181640625, 0.0222625732421875, -0.0555419921875, -0.0269622802734375, -0.05462646484375, -0.0107803344...
HooshvareLab/roberta-fa-zwnj-base
2023-02-15T19:52:24.000Z
[ "transformers", "pytorch", "tf", "jax", "roberta", "fill-mask", "fa", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
HooshvareLab
null
null
HooshvareLab/roberta-fa-zwnj-base
1
661
transformers
2022-03-02T23:29:04
--- language: fa license: apache-2.0 --- # Roberta This model can tackle the zero-width non-joiner character for Persian writing. Also, the model was trained on new multi-types corpora with a new set of vocabulary. ## Questions? Post a Github issue on the [ParsRoBERTa Issues](https://github.com/hooshvare/parsbert/issues) repo.
332
[ [ -0.01537322998046875, -0.0540771484375, 0.053558349609375, -0.0034027099609375, -0.018096923828125, 0.006622314453125, -0.0196533203125, -0.0196533203125, 0.0238800048828125, 0.060150146484375, -0.047088623046875, -0.0445556640625, -0.032684326171875, 0.0061...
ancs21/xlm-roberta-large-vi-qa
2021-09-21T16:01:14.000Z
[ "transformers", "pytorch", "xlm-roberta", "question-answering", "vi", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
question-answering
ancs21
null
null
ancs21/xlm-roberta-large-vi-qa
4
660
transformers
2022-03-02T23:29:05
--- language: vi tags: - vi - xlm-roberta widget: - text: Toà nhà nào cao nhất Việt Nam? context: Landmark 81 là một toà nhà chọc trời trong tổ hợp dự án Vinhomes Tân Cảng, một dự án có tổng mức đầu tư 40.000 tỷ đồng, do Công ty Cổ phần Đầu tư xây dựng Tân Liên Phát thuộc Vingroup làm chủ đầu tư. Toà tháp cao 81 tầng, hiện tại là toà nhà cao nhất Việt Nam và là toà nhà cao nhất Đông Nam Á từ tháng 3 năm 2018. license: mit metrics: - f1 - em --- # XLM-RoBERTa large for QA on Vietnamese languages (also support various languages) ## Overview - Language model: xlm-roberta-large - Fine-tune: [deepset/xlm-roberta-large-squad2](https://huggingface.co/deepset/xlm-roberta-large-squad2) - Language: Vietnamese - Downstream-task: Extractive QA - Dataset: [mailong25/bert-vietnamese-question-answering](https://github.com/mailong25/bert-vietnamese-question-answering/tree/master/dataset) - Training data: train-v2.0.json (SQuAD 2.0 format) - Eval data: dev-v2.0.json (SQuAD 2.0 format) - Infrastructure: 1x Tesla P100 (Google Colab) ## Performance Evaluated on dev-v2.0.json ``` exact: 136 / 141 f1: 0.9692671394799054 ``` Evaluated on Vietnamese XQuAD: [xquad.vi.json](https://github.com/deepmind/xquad/blob/master/xquad.vi.json) ``` exact: 604 / 1190 f1: 0.7224454217571596 ``` ## Author An Pham (ancs21.ps [at] gmail.com) ## License MIT
1,371
[ [ -0.02520751953125, -0.05743408203125, 0.044830322265625, 0.0288238525390625, -0.01488494873046875, 0.026153564453125, -0.02606201171875, -0.01413726806640625, -0.0015058517456054688, 0.047454833984375, -0.04937744140625, -0.0537109375, -0.0439453125, 0.01124...
savasy/bert-base-turkish-squad
2023-06-22T14:43:05.000Z
[ "transformers", "pytorch", "jax", "safetensors", "bert", "question-answering", "tr", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
question-answering
savasy
null
null
savasy/bert-base-turkish-squad
10
660
transformers
2022-03-02T23:29:05
--- language: tr --- # Turkish SQuAD Model : Question Answering I fine-tuned Turkish-Bert-Model for Question-Answering problem with Turkish version of SQuAD; TQuAD * BERT-base: https://huggingface.co/dbmdz/bert-base-turkish-uncased * TQuAD dataset: https://github.com/TQuad/turkish-nlp-qa-dataset # Training Code ``` !python3 run_squad.py \ --model_type bert \ --model_name_or_path dbmdz/bert-base-turkish-uncased\ --do_train \ --do_eval \ --train_file trainQ.json \ --predict_file dev1.json \ --per_gpu_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 5.0 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir "./model" ``` # Example Usage > Load Model ``` from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline import torch tokenizer = AutoTokenizer.from_pretrained("savasy/bert-base-turkish-squad") model = AutoModelForQuestionAnswering.from_pretrained("savasy/bert-base-turkish-squad") nlp=pipeline("question-answering", model=model, tokenizer=tokenizer) ``` > Apply the model ``` sait="ABASIYANIK, Sait Faik. Hikayeci (Adapazarı 23 Kasım 1906-İstanbul 11 Mayıs 1954). \ İlk öğrenimine Adapazarı’nda Rehber-i Terakki Mektebi’nde başladı. İki yıl kadar Adapazarı İdadisi’nde okudu.\ İstanbul Erkek Lisesi’nde devam ettiği orta öğrenimini Bursa Lisesi’nde tamamladı (1928). İstanbul Edebiyat \ Fakültesi’ne iki yıl devam ettikten sonra babasının isteği üzerine iktisat öğrenimi için İsviçre’ye gitti. \ Kısa süre sonra iktisat öğrenimini bırakarak Lozan’dan Grenoble’a geçti. Üç yıl başıboş bir edebiyat öğrenimi \ gördükten sonra babası tarafından geri çağrıldı (1933). Bir müddet Halıcıoğlu Ermeni Yetim Mektebi'nde Türkçe \ gurup dersleri öğretmenliği yaptı. Ticarete atıldıysa da tutunamadı. Bir ay Haber gazetesinde adliye muhabirliği\ yaptı (1942). Babasının ölümü üzerine aileden kalan emlakin geliri ile avare bir hayata başladı. Evlenemedi.\ Yazları Burgaz adasındaki köşklerinde, kışları Şişli’deki apartmanlarında annesi ile beraber geçen bu fazla \ içkili bohem hayatı ömrünün sonuna kadar sürdü." print(nlp(question="Ne zaman avare bir hayata başladı?", context=sait)) print(nlp(question="Sait Faik hangi Lisede orta öğrenimini tamamladı?", context=sait)) ``` ``` # Ask your self ! type your question print(nlp(question="...?", context=sait)) ``` Check My other Model https://huggingface.co/savasy
2,397
[ [ -0.044952392578125, -0.06829833984375, 0.021087646484375, 0.0185546875, -0.0124969482421875, -0.00939178466796875, 0.0008683204650878906, -0.022491455078125, 0.028106689453125, 0.019287109375, -0.05810546875, -0.0283203125, -0.023590087890625, 0.020965576171...
DKYoon/mt5-xl-lm-adapt
2023-04-14T06:12:46.000Z
[ "transformers", "pytorch", "mt5", "text2text-generation", "arxiv:2205.12647", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
DKYoon
null
null
DKYoon/mt5-xl-lm-adapt
2
660
transformers
2023-04-13T18:51:20
--- license: apache-2.0 --- 🤗 Language model initialized from mT5 and trained for an additional 100K steps on the Prefix LM objective using mC4 data. Paper: [Overcoming Catastrophic Forgetting in Zero-Shot Cross-Lingual Generation](https://arxiv.org/abs/2205.12647) Authors: Tu Vu, Aditya Barua, Brian Lester, Daniel Cer, Mohit Iyyer, Noah Constant PyTorch port of the original Flax checkpoint at [Google/T5X repository](https://github.com/google-research/t5x).
465
[ [ -0.0265045166015625, -0.04937744140625, 0.032379150390625, -0.00008034706115722656, 0.014068603515625, 0.023834228515625, 0.00872802734375, -0.019256591796875, -0.00670623779296875, 0.025848388671875, -0.050445556640625, -0.040771484375, -0.033447265625, -0....
badmonk/kirxra
2023-07-15T06:39:31.000Z
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
badmonk
null
null
badmonk/kirxra
2
660
diffusers
2023-07-08T14:40:58
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- # Model Card for KIRXRA ## Model Description - **Developed by:** BADMONK - **Model type:** Dreambooth Model + Extracted LoRA - **Language(s) (NLP):** EN - **License:** Creativeml-Openrail-M - **Parent Model:** majicMIX realistic v6 # How to Get Started with the Model Use the code below to get started with the model. ### KIRXRA ###
433
[ [ -0.022216796875, -0.0341796875, 0.0209503173828125, 0.01209259033203125, -0.059661865234375, 0.02203369140625, 0.035125732421875, -0.02203369140625, 0.032318115234375, 0.061065673828125, -0.050506591796875, -0.04559326171875, -0.052398681640625, -0.034301757...
digiplay/OrangeChillMix_v7fix
2023-07-31T10:08:18.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:other", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us", "has_space" ]
text-to-image
digiplay
null
null
digiplay/OrangeChillMix_v7fix
0
660
diffusers
2023-07-31T09:52:26
--- license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- Model info : https://civitai.com/models/9486?modelVersionId=129974 Original Author's DEMO images : ![](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/6af60647-732f-4d14-8a4e-512acc1440f0/width=1024/20247604-1034985627-(masterpiece,%20best%20quality_1),%20(photorealistic_1.2),%20light,%20depth%20of%20field,%20(detailed%20face,%20face%20focus_1),%20game%20cg,%20ultra%20detail.jpeg) ![](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/0946c9b3-e4fd-4a38-aa37-1c44e392b5cc/width=1024/20247628-300149503-best%20quality,%20masterpiece,%20(close-up,face%20focus_0.8),__sunglasses,%20necktie,%20animal%20ears,%20gloves,%201girl,%20black%20hair,%20shirt,%20suit,.jpeg) ![](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/7e31b37b-ab22-4334-9992-3765655a2e45/width=1024/20247621-1274325725-masterpiece,%20best%20quality,%20lens%20flare,%20depth%20of%20field,(backlighting,%20Backlight_1.1),%20grating,raster,(Light%20through%20hair_1.2),_1g.jpeg)
1,073
[ [ -0.0390625, -0.027984619140625, 0.030059814453125, 0.02154541015625, -0.02789306640625, -0.004985809326171875, 0.006221771240234375, -0.02264404296875, 0.03375244140625, 0.037567138671875, -0.06109619140625, -0.0428466796875, -0.018096923828125, -0.001491546...
luodian/OTTER-Image-LLaMA7B-LA-InContext
2023-08-07T11:02:35.000Z
[ "transformers", "pytorch", "otter", "license:other", "endpoints_compatible", "region:us" ]
null
luodian
null
null
luodian/OTTER-Image-LLaMA7B-LA-InContext
22
659
transformers
2023-04-27T11:22:33
--- license: other --- # Please Dont User this version for Evaluation, this is the deprecated version. ## 🦦 Simple Code For Otter-9B Here is an example of multi-modal ICL (in-context learning) with 🦦 Otter. We provide two demo images with corresponding instructions and answers, then we ask the model to generate an answer given our instruct. You may change your instruction and see how the model responds. Please first clone [Otter](https://github.com/Luodian/Otter) to your local disk. Place following script inside the Otter folder to make sure it has the access to otter/modeling_otter.py. ``` python import mimetypes import os from io import BytesIO from typing import Union import cv2 import requests import torch import transformers from PIL import Image from torchvision.transforms import Compose, Resize, ToTensor from tqdm import tqdm import sys from otter.modeling_otter import OtterForConditionalGeneration # Disable warnings requests.packages.urllib3.disable_warnings() # ------------------- Utility Functions ------------------- def get_content_type(file_path): content_type, _ = mimetypes.guess_type(file_path) return content_type # ------------------- Image and Video Handling Functions ------------------- def get_image(url: str) -> Union[Image.Image, list]: if "://" not in url: # Local file content_type = get_content_type(url) else: # Remote URL content_type = requests.head(url, stream=True, verify=False).headers.get("Content-Type") if "image" in content_type: if "://" not in url: # Local file return Image.open(url) else: # Remote URL return Image.open(requests.get(url, stream=True, verify=False).raw) else: raise ValueError("Invalid content type. Expected image or video.") # ------------------- OTTER Prompt and Response Functions ------------------- def get_formatted_prompt(prompt: str, in_context_prompts: list = []) -> str: in_context_string = "" for in_context_prompt, in_context_answer in in_context_prompts: in_context_string += f"<image>User: {in_context_prompt} GPT:<answer> {in_context_answer}<|endofchunk|>" return f"{in_context_string}<image>User: {prompt} GPT:<answer>" def get_response(image_list, prompt: str, model=None, image_processor=None, in_context_prompts: list = []) -> str: input_data = image_list if isinstance(input_data, Image.Image): vision_x = image_processor.preprocess([input_data], return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0) elif isinstance(input_data, list): # list of video frames vision_x = image_processor.preprocess(input_data, return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0) else: raise ValueError("Invalid input data. Expected PIL Image or list of video frames.") lang_x = model.text_tokenizer( [ get_formatted_prompt(prompt, in_context_prompts), ], return_tensors="pt", ) bad_words_id = tokenizer(["User:", "GPT1:", "GFT:", "GPT:"], add_special_tokens=False).input_ids generated_text = model.generate( vision_x=vision_x.to(model.device), lang_x=lang_x["input_ids"].to(model.device), attention_mask=lang_x["attention_mask"].to(model.device), max_new_tokens=512, num_beams=3, no_repeat_ngram_size=3, bad_words_ids=bad_words_id, ) parsed_output = ( model.text_tokenizer.decode(generated_text[0]) .split("<answer>")[-1] .lstrip() .rstrip() .split("<|endofchunk|>")[0] .lstrip() .rstrip() .lstrip('"') .rstrip('"') ) return parsed_output # ------------------- Main Function ------------------- if __name__ == "__main__": model = OtterForConditionalGeneration.from_pretrained("luodian/OTTER-9B-LA-InContext", device_map="auto") model.text_tokenizer.padding_side = "left" tokenizer = model.text_tokenizer image_processor = transformers.CLIPImageProcessor() model.eval() while True: urls = [ "https://images.cocodataset.org/train2017/000000339543.jpg", "https://images.cocodataset.org/train2017/000000140285.jpg", ] encoded_frames_list = [] for url in urls: frames = get_image(url) encoded_frames_list.append(frames) in_context_prompts = [] in_context_examples = [ "What does the image describe?::A family is taking picture in front of a snow mountain.", ] for in_context_input in in_context_examples: in_context_prompt, in_context_answer = in_context_input.split("::") in_context_prompts.append((in_context_prompt.strip(), in_context_answer.strip())) # prompts_input = input("Enter the prompts separated by commas (or type 'quit' to exit): ") prompts_input = "What does the image describe?" prompts = [prompt.strip() for prompt in prompts_input.split(",")] for prompt in prompts: print(f"\nPrompt: {prompt}") response = get_response(encoded_frames_list, prompt, model, image_processor, in_context_prompts) print(f"Response: {response}") if prompts_input.lower() == "quit": break ```
5,319
[ [ -0.0367431640625, -0.0546875, 0.0175323486328125, 0.01873779296875, -0.0269622802734375, -0.0091400146484375, -0.00830078125, -0.0247039794921875, -0.00676727294921875, 0.01062774658203125, -0.0587158203125, -0.03271484375, -0.0396728515625, 0.01491546630859...
huspacy/hu_core_news_lg
2023-10-27T17:59:14.000Z
[ "spacy", "token-classification", "hu", "license:cc-by-sa-4.0", "model-index", "region:us" ]
token-classification
huspacy
null
null
huspacy/hu_core_news_lg
3
658
spacy
2022-03-02T23:29:05
--- tags: - spacy - token-classification language: - hu license: cc-by-sa-4.0 model-index: - name: hu_core_news_lg results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.8714565876 - name: NER Recall type: recall value: 0.8593530239 - name: NER F Score type: f_score value: 0.8653624856 - task: name: TAG type: token-classification metrics: - name: TAG (XPOS) Accuracy type: accuracy value: 0.9688501842 - task: name: POS type: token-classification metrics: - name: POS (UPOS) Accuracy type: accuracy value: 0.9670319154 - task: name: MORPH type: token-classification metrics: - name: Morph (UFeats) Accuracy type: accuracy value: 0.9362618432 - task: name: LEMMA type: token-classification metrics: - name: Lemma Accuracy type: accuracy value: 0.9759831595 - task: name: UNLABELED_DEPENDENCIES type: token-classification metrics: - name: Unlabeled Attachment Score (UAS) type: f_score value: 0.8332400672 - task: name: LABELED_DEPENDENCIES type: token-classification metrics: - name: Labeled Attachment Score (LAS) type: f_score value: 0.76922216 - task: name: SENTS type: token-classification metrics: - name: Sentences F-Score type: f_score value: 0.9821428571 --- Core Hungarian model for HuSpaCy. Components: tok2vec, senter, tagger, morphologizer, lemmatizer, parser, ner | Feature | Description | | --- | --- | | **Name** | `hu_core_news_lg` | | **Version** | `3.7.0` | | **spaCy** | `>=3.7.0,<3.8.0` | | **Default Pipeline** | `tok2vec`, `senter`, `tagger`, `morphologizer`, `lookup_lemmatizer`, `trainable_lemmatizer`, `parser`, `ner` | | **Components** | `tok2vec`, `senter`, `tagger`, `morphologizer`, `lookup_lemmatizer`, `trainable_lemmatizer`, `parser`, `ner` | | **Vectors** | -1 keys, 200000 unique vectors (300 dimensions) | | **Sources** | [UD Hungarian Szeged](https://universaldependencies.org/treebanks/hu_szeged/index.html) (Richárd Farkas, Katalin Simkó, Zsolt Szántó, Viktor Varga, Veronika Vincze (MTA-SZTE Research Group on Artificial Intelligence))<br>[NYTK-NerKor Corpus](https://github.com/nytud/NYTK-NerKor) (Eszter Simon, Noémi Vadász (Department of Language Technology and Applied Linguistics))<br>[Szeged NER Corpus](https://rgai.inf.u-szeged.hu/node/130) (György Szarvas, Richárd Farkas, László Felföldi, András Kocsor, János Csirik (MTA-SZTE Research Group on Artificial Intelligence))<br>[Hungarian lg Floret vectors](https://huggingface.co/huspacy/hu_vectors_web_lg) (Szeged AI) | | **License** | `cc-by-sa-4.0` | | **Author** | [SzegedAI, MILAB](https://github.com/huspacy/huspacy) | ### Label Scheme <details> <summary>View label scheme (1209 labels for 4 components)</summary> | Component | Labels | | --- | --- | | **`tagger`** | `ADJ`, `ADP`, `ADV`, `AUX`, `CCONJ`, `DET`, `INTJ`, `NOUN`, `NUM`, `PART`, `PRON`, `PROPN`, `PUNCT`, `SCONJ`, `SYM`, `VERB`, `X` | | **`morphologizer`** | `Definite=Def\|POS=DET\|PronType=Art`, `Case=Ine\|Number=Sing\|POS=NOUN`, `POS=ADV`, `Case=Nom\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Sing\|POS=ADJ\|VerbForm=PartPres`, `Case=Nom\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Nom\|Number=Sing\|POS=NOUN`, `Definite=Ind\|POS=DET\|PronType=Tot`, `Case=Ade\|Number=Sing\|POS=NOUN`, `Case=Nom\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `POS=PUNCT`, `Case=Nom\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Case=Acc\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Ind\|POS=DET\|PronType=Ind`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `POS=ADP`, `POS=CCONJ`, `Case=Del\|Number=Sing\|POS=NOUN`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Sbl\|Number=Sing\|POS=NOUN`, `Case=Nom\|Number=Sing\|POS=ADJ\|VerbForm=PartPast`, `Case=Del\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Sing\|POS=PROPN`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Sing\|POS=NOUN`, `Case=Sup\|Number=Sing\|POS=PROPN`, `Case=Ess\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Ine\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Sup\|Number=Plur\|POS=NOUN`, `Degree=Pos\|POS=ADV`, `Case=Sup\|Number=Sing\|POS=NOUN`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Cau\|Number=Plur\|POS=NOUN`, `Case=Cau\|Number=Sing\|POS=NOUN`, `Case=Gen\|Number=Sing\|POS=NOUN`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Tra\|Number=Sing\|POS=ADJ\|VerbForm=PartPres`, `Case=Nom\|Number=Plur\|POS=NOUN`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Definite=Def\|Mood=Pot\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ins\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Ins\|Number=Sing\|POS=NOUN`, `POS=ADV\|PronType=Neg`, `Case=Ine\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Gen\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `POS=SCONJ`, `Case=Acc\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Sbl\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Abl\|Number=Sing\|POS=NOUN`, `Case=Dat\|Number=Sing\|POS=NOUN`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|Voice=Act`, `POS=VERB\|VerbForm=Inf\|Voice=Act`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Nom\|Degree=Sup\|Number=Sing\|POS=ADJ`, `POS=ADV\|PronType=Dem`, `Case=Ins\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ade\|Degree=Pos\|Number=Sing\|POS=ADJ`, `POS=ADV\|PronType=Int`, `Case=Tra\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Definite=Ind\|Mood=Pot\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Sbl\|Number=Sing\|POS=PROPN`, `Case=Sbl\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Definite=Ind\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `POS=PART`, `Case=Sup\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `POS=ADV\|PronType=Tot`, `Case=Ill\|Definite=Ind\|POS=DET\|PronType=Ind`, `Number=Sing\|POS=VERB\|Person=3\|VerbForm=Inf\|Voice=Act`, `Case=Ill\|Number=Sing\|POS=NOUN`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Nom\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|Voice=Act`, `Definite=Ind\|Mood=Cnd\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Sup\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Ade\|Number=Sing\|POS=ADJ\|VerbForm=PartPast`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ess\|Number=Sing\|POS=ADJ\|VerbForm=PartPres`, `Case=Acc\|Number=Sing\|POS=PROPN`, `Case=Nom\|Number=Sing\|POS=ADJ\|VerbForm=PartFut`, `Case=Ine\|NumType=Card\|Number=Sing\|POS=NUM`, `Definite=Ind\|Mood=Pot\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Plur\|POS=NOUN`, `Case=Del\|Number=Plur\|POS=NOUN`, `Case=Gen\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Nom\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Tra\|Number=Sing\|POS=NOUN`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Definite=Ind\|Mood=Imp\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Def\|Mood=Imp\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Acc\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Definite=Ind\|POS=DET\|PronType=Art`, `Case=Dat\|Number=Plur\|POS=NOUN`, `Case=Ins\|Number=Plur\|POS=NOUN`, `Case=Sbl\|Number=Plur\|POS=NOUN`, `Case=Ela\|Number=Sing\|POS=NOUN`, `Definite=Ind\|Mood=Pot\|Number=Plur\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=All\|Number=Sing\|POS=NOUN`, `Case=Ine\|Number=Plur\|POS=NOUN`, `Case=Dat\|Number=Plur\|POS=ADJ\|VerbForm=PartPres`, `Case=Ela\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Abl\|Number=Sing\|POS=PROPN`, `Case=Cau\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Case=Ins\|Number=Sing\|POS=PROPN`, `Case=Ess\|Number=Sing\|POS=ADJ\|VerbForm=PartPast`, `Number=Plur\|POS=VERB\|Person=3\|VerbForm=Inf\|Voice=Act`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=All\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Abl\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|Degree=Pos\|Number=Plur\|POS=ADJ`, `POS=ADV\|PronType=Rel`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Cau`, `Case=Del\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Gen\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Case=Ill\|Number=Plur\|POS=NOUN`, `Case=Ela\|Number=Plur\|POS=NOUN`, `Case=Ill\|Number=Sing\|POS=PROPN`, `Case=Ela\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Definite=Def\|POS=DET\|PronType=Ind`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Ter\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `POS=ADV\|VerbForm=Conv`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Sup\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Aspect=Iter\|Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Aspect=Iter\|Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Ind\|Mood=Pot\|Number=Sing\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Definite=Def\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Dis\|Number=Sing\|POS=NOUN`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Ade\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Case=All\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Dat\|Number=Plur\|POS=ADJ\|VerbForm=PartPast`, `Case=Dat\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Plur\|POS=PROPN`, `Case=Nom\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Cau\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Dat\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Ine\|Number=Sing\|POS=PROPN`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Cau`, `Case=Acc\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Cau`, `Case=Abs\|Number=Sing\|POS=NOUN`, `Case=Ade\|Number=Sing\|POS=PROPN`, `Case=Ins\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Sup\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Abl\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Gen\|Number=Sing\|POS=PROPN`, `Case=Del\|Number=Sing\|POS=PROPN`, `Case=Sbl\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Loc\|Number=Sing\|POS=NOUN`, `Case=Acc\|Definite=Ind\|POS=DET\|PronType=Ind`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Definite=Ind\|Mood=Pot\|Number=Plur\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Definite=Ind\|Mood=Cnd\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|Voice=Act`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Ter\|Number=Sing\|POS=NOUN`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `POS=X`, `Definite=Def\|Mood=Cnd\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Gen\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Ind\|Mood=Imp\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Del\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Neg`, `Case=Tra\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Degree=Pos\|POS=ADV\|PronType=Dem`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|Reflex=Yes`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Definite=Ind\|Mood=Cnd,Pot\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ade\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|PronType=Neg`, `Definite=Def\|Mood=Imp\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Aspect=Iter\|Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Case=Ine\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Def\|Mood=Cnd,Pot\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Sbl\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=All\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Ess\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Definite=Ind\|Mood=Cnd\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Dat\|Number=Sing\|POS=PROPN`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Nom\|Number=Plur\|POS=ADJ\|VerbForm=PartPres`, `Case=Sbl\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Ess\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Acc\|Number=Plur\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3\|VerbForm=PartPast`, `Definite=Def\|Mood=Pot\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Cau`, `Definite=Ind\|POS=DET\|PronType=Neg`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ter\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Definite=Def\|Mood=Pot\|Number=Sing\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Definite=Def\|Mood=Pot\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Nom\|Number=Plur\|POS=ADJ\|VerbForm=PartPast`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Definite=Ind\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Cau`, `Case=Acc\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Case=Acc\|Number=Plur\|POS=ADJ\|VerbForm=PartPast`, `Case=Dat\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Cau\|Number=Sing\|POS=PROPN`, `Case=Abs\|Number=Sing\|POS=ADJ\|VerbForm=PartPres`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Ine\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Cau`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Ess\|Number=Sing\|POS=NOUN`, `Case=Ter\|Number=Plur\|POS=NOUN`, `Case=Tem\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `POS=INTJ`, `Case=Ine\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `Number=Plur\|POS=VERB\|Person=1\|VerbForm=Inf\|Voice=Act`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Definite=Ind\|Mood=Pot\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=All\|Number=Sing\|POS=PROPN`, `Case=Ter\|Number=Sing\|POS=PROPN`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Sbl\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Gen\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Pres\|Voice=Act`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Neg`, `Case=Sbl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Definite=Ind\|Mood=Imp\|Number=Plur\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Cau`, `Definite=Ind\|Mood=Cnd\|Number=Plur\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Definite=Ind\|Mood=Cnd\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Sbl\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Definite=Def\|POS=DET\|PronType=Prs`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ill\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Del\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Acc\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=1\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ill\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Cau`, `Definite=Ind\|Mood=Imp,Pot\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Definite=Def\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Definite=Def\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ine\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Definite=Ind\|Mood=Pot\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Def\|Mood=Pot\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Cau\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Ins\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Case=Ins\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=2\|PronType=Prs`, `Definite=2\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `Case=Sbl\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `Definite=Ind\|Mood=Pot\|Number=Sing\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=All\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=All\|Number=Plur\|POS=NOUN`, `Case=Ela\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `Case=Abs\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Ine\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Case=Ine\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs\|Reflex=Yes`, `Case=Ins\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `POS=AUX\|VerbForm=Inf\|Voice=Act`, `Case=Nom\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Definite=Def\|Mood=Cnd\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Def\|Mood=Cnd\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ela\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Case=Nom\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=All\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Case=Acc\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Aspect=Iter\|Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Gen\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Ter\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Gen\|Number=Plur\|POS=NOUN`, `Case=Tem\|Number=Sing\|POS=NOUN`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Acc\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Ade\|Number=Plur\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Ins\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `POS=ADV\|PronType=Ind`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ill\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Definite=Def\|POS=DET\|PronType=Int`, `Case=Gen\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Definite=Ind\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Abs\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Del\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Acc\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Acc\|Number=Plur\|POS=PROPN`, `Case=Abl\|NumType=Card\|Number=Sing\|POS=NUM`, `Definite=Def\|Mood=Pot\|Number=Plur\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Abs\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Definite=Def\|Mood=Pot\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Abl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Ela\|Number=Sing\|POS=PROPN`, `Case=Ade\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ela\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Sbl\|Number=Sing\|POS=DET\|Person=3\|PronType=Dem`, `Definite=Def\|Mood=Imp,Pot\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Def\|POS=DET\|PronType=Tot`, `Definite=Def\|POS=DET\|PronType=Neg`, `Case=Ins\|Degree=Cmp\|Number=Plur\|POS=ADJ`, `Case=Ine\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Acc\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Sup\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Sbl\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Dat\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Pres\|Voice=Act`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Case=Gen\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin\|Voice=Cau`, `Case=Sbl\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Tra\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Case=Ess\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ess\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Sup\|Number=Plur\|POS=DET\|Person=3\|PronType=Dem`, `Case=Acc\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Nom\|Degree=Pos\|Number=Sing\|POS=ADJ\|VerbForm=PartPres`, `Case=Nom\|Degree=Pos\|Number=Sing\|POS=ADJ\|VerbForm=PartPast`, `Case=Ess\|Degree=Pos\|Number=Sing\|POS=ADJ\|VerbForm=PartPres`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Degree=Cmp\|POS=ADV`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=2\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=2\|PronType=Prs`, `Case=All\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Case=Ela\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ins\|NumType=Card\|Number=Sing\|POS=NUM`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Degree=Pos\|Number=Sing\|POS=ADJ\|VerbForm=PartFut`, `Case=Dat\|Degree=Pos\|Number=Plur\|POS=ADJ\|VerbForm=PartPast`, `Degree=Sup\|POS=ADV`, `Case=Acc\|NumType=Card\|Number=Sing\|POS=NUM`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ine\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ade\|Number=Plur\|POS=NOUN`, `Case=Acc\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Tra\|Degree=Pos\|Number=Sing\|POS=ADJ\|VerbForm=PartPres`, `Case=Nom\|Degree=Pos\|Number=Plur\|POS=ADJ\|VerbForm=PartPres`, `Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Inf\|Voice=Act`, `Case=All\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Cau\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Nom\|Degree=Pos\|Number=Sing\|Number[psed]=Sing\|POS=ADJ`, `Case=Nom\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ela\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Cau`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Ine\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Mood=Pot\|POS=VERB\|VerbForm=Inf\|Voice=Act`, `Case=Ela\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Ade\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Definite=Def\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Cau`, `Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Inf\|Voice=Act`, `Case=Ela\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Case=Nom\|NumType=Card\|Number=Plur\|POS=NUM`, `Case=Sbl\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Definite=Ind\|Mood=Imp\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin\|Voice=Cau`, `Case=Ade\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Dat\|Degree=Pos\|Number=Plur\|POS=ADJ\|VerbForm=PartPres`, `Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Inf\|Voice=Act`, `Case=Ine\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Number=Plur\|POS=ADV\|Person=1\|PronType=PrsPron`, `POS=ADV\|PronType=v`, `Definite=Ind\|Mood=Imp\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Abl\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Number=Sing\|POS=ADV\|Person=3\|PronType=PrsPron`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Nom\|NumType[sem]=Time\|Number=Sing\|POS=NUM`, `Case=Tem\|NumType[sem]=Time\|Number=Sing\|POS=NUM`, `Case=Abl\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Number=Sing\|POS=ADV\|Person=1\|PronType=PrsPron`, `Case=Ter\|NumType[sem]=Time\|Number=Sing\|POS=NUM`, `Case=Ill\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=2\|PronType=Prs`, `Number=Sing\|POS=VERB\|Person=1\|VerbForm=Inf\|Voice=Act`, `Case=Ine\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Ine\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Number=Plur\|POS=ADV\|Person=3\|PronType=PrsPron`, `Case=Ins\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Ela\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Ins\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ter\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Sbl\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Cas=6\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ela\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Sup\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Ade\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Definite=Ind\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Sup\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Sbl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Del\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Nom\|NumType=Dist\|Number=Sing\|POS=NUM`, `Case=Sup\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Abl\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Definite=Ind\|Mood=Cnd\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Sbl\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=1\|Tense=Pres\|Voice=Act`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=1\|Tense=Pres\|Voice=Act`, `Case=Acc\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=1`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Number=Plur\|POS=ADV\|Person=2\|PronType=PrsPron`, `Case=Ine\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=1\|Tense=Pres\|Voice=Act`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=All\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Dat\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Definite=Ind\|Mood=Cnd\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Sup\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Gen\|Degree=Cmp\|Number=Plur\|POS=ADJ`, `Case=Nom\|Degree=Sup\|Number=Plur\|POS=ADJ`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Del\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Nom\|Degree=Cmp\|Number=Plur\|POS=ADJ`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=2`, `Case=All\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=All\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Abl\|Number=Plur\|POS=NOUN`, `Case=Dat\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ade\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Cas=6\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|Poss=Yes\|PronType=Prs`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=2\|Poss=Yes\|PronType=Prs`, `Case=Gen\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ess\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Sbl\|NumType[sem]=Time\|Number=Sing\|POS=NUM`, `Case=All\|Number=Plur\|POS=PROPN`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|PronType=Ind`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Acc\|Degree=Cmp\|Number=Plur\|POS=ADJ`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ine\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Ade\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=2\|PronType=Prs`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=VERB\|Person=2\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=Ins\|Number=Plur\|POS=PROPN`, `Case=Nom\|NumType=Ord\|Number=Plur\|POS=ADJ`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Definite=Def\|Mood=Cnd\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=All\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Nom\|NumType[sem]=Dot\|Number=Sing\|POS=NUM`, `Case=Sup\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Degree=Pos\|POS=ADV\|PronType=Ind`, `Case=Ela\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=1\|Tense=Pres\|Voice=Act`, `Case=Ade\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Sup\|NumType[sem]=Time\|Number=Sing\|POS=NUM`, `Case=Gen\|Number=Plur\|POS=PROPN`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Ins\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Ill\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Del\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Number=Sing\|POS=ADV\|Person=2\|PronType=PrsPron`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Gen\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Ill\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Degree=Cmp\|POS=ADV\|PronType=Dem`, `Case=Ins\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Ins\|NumType=Card\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ill\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Dat\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Del\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Ins\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=1\|PronType=Tot`, `Case=Gen\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Dat\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Acc\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Case=Nom\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=1`, `Case=Sbl\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Tem\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Tem\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|NumType[sem]=Result\|Number=Sing\|POS=NUM`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Acc\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Acc\|Number=Plur\|Number[psed]=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Acc\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|Poss=Yes\|PronType=Prs`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Ade\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Dat\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|PronType=Tot`, `Case=Sbl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ela\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=2\|PronType=Prs`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=2\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Del\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Del\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Dat\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Definite=2\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=Com\|Number=Sing\|POS=NOUN`, `Case=Tra\|Number=Plur\|POS=NOUN`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=1\|PronType=Tot`, `Case=Ade\|Number=Plur\|POS=PROPN`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=2\|PronType=Prs`, `Case=Ess\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Dat\|Degree=Cmp\|Number=Plur\|POS=ADJ`, `Definite=Def\|Mood=Imp\|Number=Plur\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Sbl\|NumType[sem]=Quotient\|Number=Sing\|POS=NUM`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=3\|PronType=Int`, `Case=Del\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Del\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Ade\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ins\|NumType=Card\|Number=Plur\|POS=NUM`, `Case=Ess\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Del\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Cau\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Tem\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Ill\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Ill\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Nom\|Degree=Pos\|Number=Sing\|Number[psor]=Plur\|POS=ADJ\|Person[psor]=1`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Del\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Gen\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=1`, `Case=Gen\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Dat\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Gen\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Ins\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Dat\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=1`, `Case=Sbl\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Acc\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=3\|PronType=Int`, `Case=Acc\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=2`, `Definite=Def\|Mood=Cnd\|Number=Plur\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Sbl\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Case=Sup\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Tem\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Ins\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Tra\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Abs\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=All\|Number=Plur\|POS=PRON\|Person=1\|PronType=Tot`, `Case=Dat\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|PronType=Ind`, `Case=Ine\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Sup\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=2\|Tense=Past\|VerbForm=Fin\|Voice=Act`, `Case=All\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Cas=1\|Number=Sing\|POS=PROPN`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=2\|Tense=Pres\|Voice=Act`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Sbl\|Number=Sing\|POS=PRON\|Person=2\|Reflexive=Yes`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=2\|Reflexive=Yes`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=1`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Sbl\|NumType[sem]=Result\|Number=Sing\|POS=NUM`, `Case=Nom\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=2`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Sbl\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Ind\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Past\|Voice=Act`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Ins\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Ela\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Gen\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Dat\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=1\|PronType=Tot`, `Definite=Ind\|Mood=Imp\|Number=Plur\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Cau\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Acc\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=1\|PronType=Tot`, `Case=Abl\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Tra\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Cau\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Sup\|Number=Plur\|POS=PROPN`, `Case=Ess\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Definite=Def\|Mood=Cnd\|Number=Sing\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Dis\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Ill\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Nom\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=1`, `Case=Nom\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Cas=6\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=3\|PronType=Int`, `Case=Sup\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Sbl\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Sup\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Abl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Gen\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Abs\|Number=Plur\|POS=NOUN`, `Case=Sup\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|PronType=Tot`, `Case=Ine\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Tra\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Sbl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Ins\|Degree=Cmp\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Ade\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Sbl\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=All\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Abl\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PROPN\|Person[psor]=1`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Del\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Del\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=All\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Ade\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ter\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Nom\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=2\|Reflexive=Yes`, `Case=Dat\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Ins\|Number=Plur\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Sup\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Ine\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Acc\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Sbl\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Definite=Ind\|Mood=Cnd\|Number=Sing\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Ill\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Acc\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Gen\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Ade\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=All\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Ill\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Definite=2\|Mood=Cnd\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Cas=6\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Cau\|Number=Plur\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Abl\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Abs\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Acc\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|NumType=Card\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Cau\|Number=Plur\|POS=PRON\|Person=3\|PronType=Dem`, `Case=All\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Acc\|Degree=Sup\|Number=Plur\|POS=ADJ`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Case=Ade\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Ill\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Del\|Degree=Sup\|Number=Plur\|POS=ADJ`, `Case=Nom\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Cau\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Number=Sing\|POS=VERB\|Person=2\|VerbForm=Inf\|Voice=Act`, `Case=Ine\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Acc\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Cau\|Degree=Cmp\|Number=Plur\|POS=ADJ`, `Case=Ine\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Ela\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Sup\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Cau\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Sbl\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Ter\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Tra\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Definite=Ind\|Mood=Cnd\|Number=Plur\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Acc\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Dat\|NumType=Card\|Number=Plur\|POS=NUM`, `Case=Dat\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Abl\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Tem\|Number=Plur\|POS=NOUN`, `Case=Abs\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Sbl\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ins\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=All\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Acc\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=All\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=Nom\|Number=Plur\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Ade\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Ade\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=2\|PronType=Tot`, `Case=Abl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Cau\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Del\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Cau\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ill\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Ade\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Ade\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=2\|Reflexive=Yes`, `Case=Ins\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=All\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Acc\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Ess\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=Cau\|Degree=Sup\|Number=Plur\|POS=ADJ`, `Cas=6\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Tra\|NumType=Card\|Number=Sing\|POS=NUM`, `Number=Plur\|POS=VERB\|Person=2\|VerbForm=Inf\|Voice=Act`, `Case=Nom\|Degree=Pos\|Number=Plur\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Cas=6\|Number=Sing\|POS=NOUN`, `Case=Ins\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Ins\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Ela\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Sup\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Abl\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|PronType=Int`, `Case=Ill\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=2`, `Case=Del\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Tra\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Sbl\|NumType=Card\|Number=Plur\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Acc\|NumType=Card\|Number=Plur\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Int`, `Case=Nom\|Number=Plur\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Ine\|Number=Sing\|POS=PRON\|Person=2\|Reflexive=Yes`, `Case=Abl\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Gen\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Gen\|NumType=Card\|Number=Plur\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|Poss=Yes\|PronType=Prs`, `Case=Ins\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=All\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Acc\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Tra\|Degree=Sup\|Number=Plur\|POS=ADJ`, `Case=Sbl\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Ins\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=2`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=1\|PronType=Dem`, `Case=Nom\|Degree=Cmp\|Number=Plur\|Number[psed]=Sing\|POS=ADJ`, `Case=Acc\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Cau\|Number=Plur\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=2\|PronType=Ind`, `Case=All\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=All\|Number=Sing\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Case=Tem\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Case=Ill\|Number=Sing\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Case=Abl\|Number=Sing\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Case=Cau\|Number=Plur\|POS=PROPN`, `Case=Ade\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ade\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=All\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Del\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Definite=Ind\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=2\|Tense=Pres\|Voice=Act`, `Definite=Def\|Mood=Ind\|Number=Plur\|POS=AUX\|Person=2\|Tense=Pres\|Voice=Act`, `Case=Sup\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=2`, `Case=Tra\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Ins\|Number=Sing\|POS=PRON\|Person=2\|PronType=Prs`, `Case=Sup\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Nom\|NumType=Card\|Number=Plur\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Sbl\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Nom\|Number=Plur\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=All\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Ill\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ela\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Ela\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Del\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|PronType=Int`, `Case=Tra\|Number=Plur\|Number[psed]=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ter\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Acc\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Acc\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=All\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Acc\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Ter\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=All\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Definite=Def\|Mood=Ind\|Number=Sing\|POS=AUX\|Person=2\|Tense=Pres\|Voice=Act`, `Case=Gen\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Cau\|Number=Plur\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Ins\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Gen\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|Poss=Yes\|PronType=Prs`, `Case=Del\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Tem\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Del\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Sup\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Ter\|Number=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Sup\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ine\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Abs\|Number=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=All\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Sup\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=Cau\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Sup\|NumType=Ord\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Sup\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Abl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Nom\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Int`, `Case=Ela\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Dat\|NumType=Ord\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Ill\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=All\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=1`, `Case=Nom\|Number=Plur\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=1`, `Case=Ine\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=1`, `Case=Abl\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=1`, `Case=Ela\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Ade\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Ill\|Number=Sing\|Number[psor]=Plur\|POS=PROPN\|Person[psor]=1`, `Case=Ade\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=All\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Acc\|Degree=Pos\|Number=Plur\|Number[psor]=Plur\|POS=ADJ\|Person[psor]=3`, `Case=Dat\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Nom\|Number=Sing\|POS=SYM\|Type=w`, `Case=Gen\|Number=Sing\|POS=SYM\|Type=w`, `Case=Abl\|Number=Sing\|POS=SYM\|Type=w`, `Case=Acc\|Number=Sing\|POS=SYM\|Type=w`, `Case=Ade\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=All\|Number=Sing\|POS=SYM\|Type=w`, `Case=Tra\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Case=Ins\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Abl\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|Degree=Pos\|Number=Plur\|Number[psed]=Sing\|POS=ADJ`, `Case=Sup\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Sup\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Nom\|NumType[sem]=Quotient\|Number=Sing\|POS=NUM`, `Case=Gen\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Acc\|Number=Sing\|Number[psor]=Plur\|POS=PROPN\|Person[psor]=1`, `Case=Ins\|Number=Sing\|Number[psed]=Plur\|POS=NOUN`, `Case=Gen\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Ine\|Degree=Pos\|Number=Sing\|Number[psor]=Plur\|POS=ADJ\|Person[psor]=3`, `Case=Abs\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Ela\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Dat\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Gen\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|Number=Sing\|POS=SYM\|Type=o`, `Case=Gen\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Sup\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Nom\|NumType[sem]=Signed\|Number=Sing\|POS=NUM`, `Case=Com\|Number=Sing\|POS=PROPN`, `Case=Acc\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Ins\|Number=Sing\|Number[psed]=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Ill\|Number=Plur\|POS=PRON\|Person=1\|Reflexive=Yes`, `Case=Nom\|Number=Plur\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ins\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Gen\|NumType=Dist\|Number=Sing\|POS=NUM`, `Case=Nom\|NumType[sem]=Formula\|Number=Sing\|POS=NUM`, `Case=Del\|Number=Sing\|POS=SYM\|Type=w`, `Case=Ade\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Ins\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Case=Nom\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Rel`, `Case=Ine\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|Degree=Pos\|Number=Sing\|Number[psor]=Plur\|POS=ADJ\|Person[psor]=3`, `Case=Ade\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Acc\|Number=Sing\|POS=SYM\|Type=o`, `Case=Ins\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ela\|Number=Sing\|POS=SYM\|Type=o`, `Case=Dat\|Degree=Pos\|Number=Plur\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=All\|Number=Plur\|Number[psed]=Sing\|POS=SYM\|Type=w`, `Case=Ade\|Number=Sing\|POS=SYM\|Type=w`, `Case=Sbl\|Number=Sing\|POS=SYM\|Type=w`, `Case=Ade\|NumType=Card\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ill\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Ine\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Acc\|NumType=Card\|Number=Plur\|POS=NUM`, `Case=Ill\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Case=Acc\|Degree=Sup\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Sup\|NumType=Card\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Dat\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ine\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Ill\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=3`, `Case=Sup\|Number=Sing\|POS=SYM\|Type=w`, `Case=Ine\|Degree=Pos\|Number=Plur\|POS=ADJ`, `Case=Gen\|Degree=Pos\|Number=Plur\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Ins\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Ela\|Number=Sing\|POS=SYM\|Type=w`, `Case=Sbl\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Nom\|Number=Sing\|POS=SYM\|Type=p`, `Case=Abl\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|NumType[sem]=Measure\|Number=Sing\|POS=NUM`, `Case=Abs\|Number=Sing\|POS=PROPN`, `Case=Ins\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Nom\|Number=Sing\|Number[psed]=Plur\|POS=NOUN`, `Case=Nom\|Number=Sing\|POS=SYM\|Type=m`, `Case=Acc\|Number=Sing\|POS=SYM\|Type=m`, `Case=Sup\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Ine\|Number=Plur\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=SYM\|Type=o`, `Case=Ins\|Number=Sing\|POS=SYM\|Type=o`, `Case=Ins\|Number=Sing\|POS=SYM\|Type=w`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=3\|PronType=Int`, `Case=Acc\|Number=Sing\|Number[psed]=Plur\|POS=NOUN`, `Case=Gen\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=Gen\|Number=Plur\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Sbl\|Number=Plur\|POS=PRON\|Person=3\|PronType=Int`, `Case=Abl\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Acc\|Degree=Pos\|Number=Plur\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Abs\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Ill\|Number=Sing\|POS=SYM\|Type=w`, `Case=Ela\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Abl\|NumType[sem]=Time\|Number=Sing\|POS=NUM`, `Case=Gen\|Degree=Sup\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Abs\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|Poss=Yes\|PronType=Prs`, `Case=Sup\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Gen\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Gen\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Ins\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Sup\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Abs\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Acc\|Degree=Pos\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Acc\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ter\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Dat\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Acc\|NumType[sem]=Percent\|Number=Sing\|POS=NUM`, `Case=Ter\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ade\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ins\|NumType[sem]=Percent\|Number=Sing\|POS=NUM`, `Case=Ins\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Gen\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Dat\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Sbl\|NumType[sem]=Percent\|Number=Sing\|POS=NUM`, `Case=Ine\|NumType[sem]=Percent\|Number=Sing\|POS=NUM`, `Case=All\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ade\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Nom\|NumType[sem]=Percent\|Number=Sing\|POS=NUM`, `Case=All\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Abl\|NumType=Card\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ter\|NumType=Card\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Acc\|NumType=Card\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ter\|NumType[sem]=Formula\|Number=Sing\|POS=NUM`, `Case=Sbl\|NumType[sem]=Percent\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=All\|Number=Plur\|POS=PRON\|Person=3\|PronType=Int`, `Case=Nom\|Number=Plur\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Del\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Cau\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Gen\|Number=Plur\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Ins\|NumType=Ord\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Ade\|NumType=Frac\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Ine\|NumType=Frac\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Acc\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Ind`, `Case=Sup\|NumType=Card\|Number=Plur\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Tra\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Ine\|NumType=Card\|Number=Plur\|POS=NUM`, `Case=Gen\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Tra\|Number=Sing\|Number[psed]=Sing\|POS=NOUN`, `Case=Gen\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Gen\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Ind`, `Case=Tem\|Degree=Sup\|Number=Sing\|POS=ADJ`, `Case=Dat\|NumType[sem]=Dot\|Number=Sing\|POS=NUM`, `Case=Sbl\|NumType=Card\|Number=Plur\|POS=NUM`, `Case=All\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Ine\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=All\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Sbl\|Number=Plur\|POS=PROPN`, `Case=Tra\|Number=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Sup\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Dat\|Number=Plur\|Number[psed]=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Dat\|Number=Sing\|POS=SYM\|Type=w`, `Case=Ill\|Number=Plur\|POS=PROPN`, `Case=Loc\|Number=Sing\|POS=PROPN`, `Case=Ess\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Acc\|Degree=Pos\|Number=Plur\|Number[psed]=Sing\|POS=ADJ`, `Case=Abl\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=All\|NumType=Frac\|Number=Sing\|POS=NUM`, `Case=Ade\|Degree=Cmp\|Number=Plur\|POS=ADJ`, `Case=Ine\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=Ine\|Number=Sing\|POS=SYM\|Type=w`, `Case=Cau\|NumType=Frac\|Number=Sing\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Ela\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Abs\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rel`, `Case=Sbl\|NumType[sem]=Dot\|Number=Sing\|POS=NUM`, `Case=Tem\|Number=Sing\|POS=PROPN`, `Case=Del\|NumType[sem]=Dot\|Number=Sing\|POS=NUM`, `Case=Ade\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Ine\|Number=Sing\|Number[psor]=Plur\|POS=PROPN\|Person[psor]=1`, `Case=Nom\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Tot`, `Case=Acc\|Degree=Sup\|Number=Plur\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Ade\|Number=Plur\|Number[psed]=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=3`, `Case=Ela\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Acc\|Number=Plur\|Number[psed]=Plur\|POS=PRON\|Person=1\|Poss=Yes\|PronType=Prs`, `Case=Del\|Number=Plur\|Number[psed]=Sing\|POS=NOUN`, `Case=Nom\|Degree=Sup\|Number=Sing\|Number[psor]=Sing\|POS=ADJ\|Person[psor]=3`, `Case=Dat\|Number=Plur\|POS=PROPN`, `Case=Ill\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Case=Sbl\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Ins\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=1\|Poss=Yes\|PronType=Prs`, `Case=Dat\|Number=Plur\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Ter\|Number=Plur\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Ess\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Sup\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=2`, `Case=Acc\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=2\|PronType=Tot`, `Case=Gen\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Ine\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Int`, `Case=All\|Number=Sing\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=Dat\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=2`, `Case=Gen\|Number=Sing\|POS=PRON\|Person=3\|PronType=Rcp`, `Definite=Ind\|Mood=Imp\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|Voice=Act`, `Case=Tra\|Number=Sing\|Number[psor]=Plur\|POS=NOUN\|Person[psor]=1`, `Case=Ins\|NumType=Card\|Number=Plur\|Number[psor]=Sing\|POS=NUM\|Person[psor]=3`, `Case=Del\|Number=Sing\|POS=PRON\|Person=2\|Reflexive=Yes`, `Case=Sbl\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=1`, `Case=Dat\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=2\|PronType=Ind`, `Case=All\|Number=Sing\|POS=PRON\|Person=2\|Poss=Yes\|PronType=Prs`, `Case=Sbl\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Ill\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Ine\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=3`, `Case=Del\|Number=Sing\|POS=PRON\|Person=2\|PronType=Prs`, `Case=Acc\|Number=Sing\|Number[psed]=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=2\|PronType=Tot`, `Case=Abl\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Int`, `Case=Ine\|Number=Sing\|Number[psed]=Sing\|POS=PROPN`, `Case=Cau\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Del\|Number=Plur\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Cau\|Number=Sing\|POS=PRON\|Person=3\|Reflexive=Yes`, `Case=Nom\|NumType=Card\|Number=Sing\|Number[psor]=Plur\|POS=NUM\|Person[psor]=2`, `Case=Abl\|Number=Sing\|Number[psor]=Sing\|POS=NOUN\|Person[psor]=2`, `Case=Ine\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Definite=2\|Mood=Imp\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin\|Voice=Act`, `Case=Dat\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Ela\|Degree=Cmp\|Number=Sing\|POS=ADJ`, `Case=Acc\|Number=Sing\|POS=SYM\|Type=p`, `Case=Abl\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Case=Acc\|Number=Plur\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Dem`, `Case=Ine\|Number=Plur\|POS=PROPN`, `Case=Sbl\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=3\|Person[psor]=3\|PronType=Tot`, `Case=Ins\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=3\|Person[psor]=3\|Poss=Yes\|PronType=Prs`, `Case=Ter\|Degree=Pos\|Number=Sing\|POS=ADJ`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=1\|PronType=Prs`, `Case=Acc\|Number=Plur\|Number[psor]=Sing\|POS=PROPN\|Person[psor]=3`, `Case=All\|Number=Sing\|Number[psed]=Sing\|POS=PRON\|Person=3\|PronType=Tot` | | **`parser`** | `ROOT`, `acl`, `advcl`, `advmod`, `advmod:locy`, `advmod:mode`, `advmod:que`, `advmod:tfrom`, `advmod:tlocy`, `advmod:to`, `advmod:tto`, `amod:att`, `appos`, `aux`, `case`, `cc`, `ccomp`, `ccomp:obj`, `ccomp:obl`, `ccomp:pred`, `compound`, `compound:preverb`, `conj`, `cop`, `csubj`, `dep`, `det`, `flat:name`, `iobj`, `list`, `mark`, `nmod`, `nmod:att`, `nmod:obl`, `nsubj`, `nummod`, `obj`, `obj:lvc`, `obl`, `orphan`, `parataxis`, `punct`, `xcomp` | | **`ner`** | `LOC`, `MISC`, `ORG`, `PER` | </details> ### Accuracy | Type | Score | | --- | --- | | `TOKEN_ACC` | 99.99 | | `TOKEN_P` | 99.86 | | `TOKEN_R` | 99.93 | | `TOKEN_F` | 99.89 | | `SENTS_P` | 98.43 | | `SENTS_R` | 98.00 | | `SENTS_F` | 98.21 | | `TAG_ACC` | 96.89 | | `POS_ACC` | 96.70 | | `MORPH_ACC` | 93.63 | | `MORPH_MICRO_P` | 97.03 | | `MORPH_MICRO_R` | 95.98 | | `MORPH_MICRO_F` | 96.50 | | `LEMMA_ACC` | 97.60 | | `DEP_UAS` | 83.32 | | `DEP_LAS` | 76.92 | | `ENTS_P` | 87.15 | | `ENTS_R` | 85.94 | | `ENTS_F` | 86.54 |
78,221
[ [ -0.02978515625, -0.03106689453125, 0.02825927734375, 0.03814697265625, -0.01837158203125, -0.0127410888671875, -0.02313232421875, -0.0017147064208984375, 0.049560546875, 0.031890869140625, -0.0430908203125, -0.0662841796875, -0.0268096923828125, 0.0346374511...
patrickvonplaten/wav2vec2_tiny_random_robust
2021-09-01T14:48:17.000Z
[ "transformers", "pytorch", "wav2vec2", "feature-extraction", "automatic-speech-recognition", "en", "dataset:librispeech_asr", "license:apache-2.0", "endpoints_compatible", "region:us" ]
feature-extraction
patrickvonplaten
null
null
patrickvonplaten/wav2vec2_tiny_random_robust
0
658
transformers
2022-03-02T23:29:05
--- language: en datasets: - librispeech_asr tags: - automatic-speech-recognition license: apache-2.0 --- ## Test model To test this model run the following code: ```python from datasets import load_dataset from transformers import Wav2Vec2ForCTC import torchaudio import torch ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2_tiny_random_robust") def load_audio(batch): batch["samples"], _ = torchaudio.load(batch["file"]) return batch ds = ds.map(load_audio) input_values = torch.nn.utils.rnn.pad_sequence([torch.tensor(x[0]) for x in ds["samples"][:10]], batch_first=True) # forward logits = model(input_values).logits pred_ids = torch.argmax(logits, dim=-1) # dummy loss dummy_labels = pred_ids.clone() dummy_labels[dummy_labels == model.config.pad_token_id] = 1 # can't have CTC blank token in label dummy_labels = dummy_labels[:, -(dummy_labels.shape[1] // 4):] # make sure labels are shorter to avoid "inf" loss (can still happen though...) loss = model(input_values, labels=dummy_labels).loss ```
1,134
[ [ -0.0219573974609375, -0.04205322265625, 0.01480865478515625, 0.025360107421875, -0.017486572265625, -0.01232147216796875, -0.0137939453125, -0.0074462890625, -0.01364898681640625, 0.033172607421875, -0.05181884765625, -0.031219482421875, -0.050140380859375, ...
stanfordnlp/SteamSHP-flan-t5-xl
2023-10-10T23:56:13.000Z
[ "transformers", "pytorch", "t5", "text2text-generation", "human feedback", "rlhf", "preferences", "reddit", "preference model", "RL", "NLG", "evaluation", "en", "dataset:stanfordnlp/SHP", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inferen...
text2text-generation
stanfordnlp
null
null
stanfordnlp/SteamSHP-flan-t5-xl
40
658
transformers
2023-02-20T04:46:43
--- license: apache-2.0 datasets: - stanfordnlp/SHP language: - en metrics: - accuracy tags: - human feedback - rlhf - preferences - reddit - preference model - RL - NLG - evaluation --- # 💨🚢 SteamSHP-XL <!-- Provide a quick summary of what the model is/does. --> **If you mention this model, please cite the paper:** [Understanding Dataset Difficulty with V-Usable Information (ICML 2022)](https://proceedings.mlr.press/v162/ethayarajh22a.html). SteamSHP-XL is a preference model trained to predict -- given some context and two possible responses -- which response humans will find more helpful. It can be used for NLG evaluation or as a reward model for RLHF. It is a FLAN-T5-xl model (3B parameters) finetuned on: 1. The [Stanford Human Preferences Dataset (SHP)](https://huggingface.co/datasets/stanfordnlp/SHP), which contains collective human preferences sourced from 18 different communities on Reddit (e.g., `askculinary`, `legaladvice`, etc.). 2. The helpfulness data in [Anthropic's HH-RLHF](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset. There is a smaller variant called [SteamSHP-Large](https://huggingface.co/kawine/SteamSHP-flan-t5-large) that was made by finetuning FLAN-T5-large (780M parameters). Despite being 1/4 of the size, it is on average only 0.75 points less accurate on the SHP + Anthropic test data (across all domains). ## Usage ### Normal Usage The input text should be of the format: ``` POST: { the context, such as the 'history' column in SHP (not containing any newlines \n) } RESPONSE A: { first possible continuation (not containing any newlines \n) } RESPONSE B: { second possible continuation (not containing any newlines \n) } Which response is better? RESPONSE ``` The output generated by SteamSHP-XL will either be `A` or `B`. Here's how to use the model: ```python >> from transformers import T5ForConditionalGeneration, T5Tokenizer >> device = 'cuda' # if you have a GPU >> tokenizer = T5Tokenizer.from_pretrained('stanfordnlp/SteamSHP-flan-t5-xl') >> model = T5ForConditionalGeneration.from_pretrained('stanfordnlp/SteamSHP-flan-t5-xl').to(device) >> input_text = "POST: Instacart gave me 50 pounds of limes instead of 5 pounds... what the hell do I do with 50 pounds of limes? I've already donated a bunch and gave a bunch away. I'm planning on making a bunch of lime-themed cocktails, but... jeez. Ceviche? \n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE" >> x = tokenizer([input_text], return_tensors='pt').input_ids.to(device) >> y = model.generate(x, max_new_tokens=1) >> tokenizer.batch_decode(y, skip_special_tokens=True) ['A'] ``` If the input exceeds the 512 token limit, you can use [pybsd](https://github.com/nipunsadvilkar/pySBD) to break the input up into sentences and only include what fits into 512 tokens. When trying to cram an example into 512 tokens, we recommend truncating the context as much as possible and leaving the responses as untouched as possible. ### Reward Model Usage If you want to use SteamSHP-XL as a reward model -- to get a score for a single response -- then you need to structure the input such that RESPONSE A is what you want to score and RESPONSE B is just an empty input: ``` POST: { the context, such as the 'history' column in SHP (not containing any newlines \n) } RESPONSE A: { continuation (not containing any newlines \n) } RESPONSE B: . Which response is better? RESPONSE ``` Then calculate the probability assigned to the label A. This probability (or the logit, depending on what you want) is the score for the response: ```python >> input_text = "POST: Instacart gave me 50 pounds of limes instead of 5 pounds... what the hell do I do with 50 pounds of limes? I've already donated a bunch and gave a bunch away. I'm planning on making a bunch of lime-themed cocktails, but... jeez. Ceviche? \n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: .\n\n Which response is better? RESPONSE" >> x = tokenizer([input_text], return_tensors='pt').input_ids.to(device) >> outputs = model.generate(x, return_dict_in_generate=True, output_scores=True, max_new_tokens=1) >> torch.exp(outputs.scores[0][:, 71]) / torch.exp(outputs.scores[0][:,:]).sum(axis=1).item() # index 71 corresponds to the token for 'A' 0.819 ``` The probability will almost always be high (in the range of 0.8 to 1.0), since RESPONSE B is just a null input. Therefore you may want to normalize the probability. You can also compare the two probabilities assigned independently to each response (given the same context) to infer the preference label. For example, if one response has probability 0.95 and the other has 0.80, the former will be preferred. Inferring the preference label in this way only leads to a 0.006 drop in accuracy on the SHP + HH-RLHF test data on average across all domains, meaning that there's only a very small penalty for using SteamSHP-XL as a reward model instead of as a preference model. ## Training and Evaluation SteamSHP-XL was only finetuned on 125K of the 392K training examples that were available, since we found that: 1. When the total input length exceeded the limit (512 tokens), the loss would not converge. When possible, we crammed an example to fit under 500 tokens by truncating the context as much as possible, though some examples would still not fit despite this. We used 500 as the limit instead of 512 to allow for slight modifications to the structure of the input without any examples exceeding the actual 512 limit. 3. Training on fewer preferences with a stronger signal led to better performance than training on all the preferences. From the SHP dataset, we only used preferences where the more preferred comment was twice as preferred as the other (i.e., `score_ratio` >= 2) and used no more than 5 preferences from each context (i.e., 5 examples per unique `post_id`) to prevent ovefitting. We did no such subsampling for the HH-RLHF training data. We evaluated the model on the SHP and HH-RLHF test data using accuracy, but only on the data that could be truncated to fit within 500 tokens (a total of 18621 out of 20753 available test examples). SteamSHP-XL gets an average 72.8% accuracy across all domains: | Domain | Accuracy | | ------ | -------- | | askculinary | 0.7199 | | askhr | 0.7743 | | askdocs | 0.7210 | | askanthropology | 0.7594 | | asksciencefiction | 0.7283 | | askacademia | 0.7442 | | askengineers | 0.7183 | | legaladvice | 0.8068 | | explainlikeimfive | 0.7392 | | askbaking | 0.6741 | | askphysics | 0.8000 | | askscience | 0.7114 | | askphilosophy | 0.6907 | | askvet | 0.7742 | | changemyview | 0.7043 | | askcarguys | 0.7568 | | askhistorians | 0.7476 | | asksocialscience | 0.7308 | | anthropic (helpfulness) | 0.7310 | | ALL (unweighted) | 0.7278 | As mentioned previously, if you use SteamSHP as a reward model and try to infer the preference label based on the probability assigned to each response independently, that could also work! But doing so will lead to a 0.006 drop in accuracy on the test data (on average across all domains), meaning that there is a small penalty. ## Biases and Limitations SteamSHP is trained to predict which of two responses humans will find *more helpful*, not which response is *less harmful*. It should not be used to detect toxicity, make ethical judgments, or for a similar purpose. Biases and misinformation in the datasets used to train SteamSHP may also be propagated downstream to the model predictions. Although SHP filtered out posts with NSFW (over 18) content, chose subreddits that were well-moderated and had policies against harassment and bigotry, some of the data may contain discriminatory or harmful language. The responses that humans collectively found more helpful are also not guaranteed to be more factual. The people whose preferences are captured in SHP and HH-RLHF are not representative of the broader population. Although specific demographic information is not available, overall, the Reddit users whose preferences are captured in SHP are disproportionately male and from developed, Western, and English-speaking countries (Pew Research). [Past work](https://www.anthropic.com/model-written-evals.pdf) by Anthropic has found that models optimized for human preference can be obsequious, at the expense of the truth. ## Contact Please contact kawin@stanford.edu if you have any questions about the model. This model was created by Kawin Ethayarajh, Heidi (Chenyu) Zhang, Yizhong Wang, and Dan Jurafsky. ## Citation SHP was created using the techniques proposed in the following paper. Please cite this work if you use SHP or the SteamSHP models: ``` @InProceedings{pmlr-v162-ethayarajh22a, title = {Understanding Dataset Difficulty with $\mathcal{V}$-Usable Information}, author = {Ethayarajh, Kawin and Choi, Yejin and Swayamdipta, Swabha}, booktitle = {Proceedings of the 39th International Conference on Machine Learning}, pages = {5988--6008}, year = {2022}, editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan}, volume = {162}, series = {Proceedings of Machine Learning Research}, month = {17--23 Jul}, publisher = {PMLR}, } ```
9,371
[ [ -0.016021728515625, -0.05419921875, 0.0201263427734375, 0.0028209686279296875, -0.01885986328125, -0.019989013671875, -0.0144805908203125, -0.036895751953125, 0.02850341796875, 0.028472900390625, -0.04248046875, -0.035736083984375, -0.0458984375, -0.00746536...
cvssp/audioldm-l-full
2023-08-29T14:41:45.000Z
[ "diffusers", "arxiv:2301.12503", "diffusers:AudioLDMPipeline", "region:us" ]
null
cvssp
null
null
cvssp/audioldm-l-full
11
658
diffusers
2023-04-04T08:47:26
--- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/model-cards {} --- # AudioLDM AudioLDM is a latent text-to-audio diffusion model capable of generating realistic audio samples given any text input. It is available in the 🧨 Diffusers library from v0.15.0 onwards. # Model Details AudioLDM was proposed in the paper [AudioLDM: Text-to-Audio Generation with Latent Diffusion Models](https://arxiv.org/abs/2301.12503) by Haohe Liu et al. Inspired by [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion-v1-4), AudioLDM is a text-to-audio _latent diffusion model (LDM)_ that learns continuous audio representations from [CLAP](https://huggingface.co/laion/clap-htsat-unfused) latents. AudioLDM takes a text prompt as input and predicts the corresponding audio. It can generate text-conditional sound effects, human speech and music. # Checkpoint Details This is the **large** version of the AudioLDM model, with twice the number of UNet channels and head channels as the small checkpoints. The four AudioLDM checkpoints are summarised in the table below: **Table 1:** Summary of the AudioLDM checkpoints. | Checkpoint | Training Steps | Audio conditioning | CLAP audio dim | UNet dim | Params | |-----------------------------------------------------------------------|----------------|--------------------|----------------|----------|--------| | [audioldm-s-full](https://huggingface.co/cvssp/audioldm) | 1.5M | No | 768 | 128 | 421M | | [audioldm-s-full-v2](https://huggingface.co/cvssp/audioldm-s-full-v2) | > 1.5M | No | 768 | 128 | 421M | | [audioldm-m-full](https://huggingface.co/cvssp/audioldm-m-full) | 1.5M | Yes | 1024 | 192 | 652M | | [audioldm-l-full](https://huggingface.co/cvssp/audioldm-l-full) | 1.5M | No | 768 | 256 | 975M | ## Model Sources - [**Original Repository**](https://github.com/haoheliu/AudioLDM) - [**🧨 Diffusers Pipeline**](https://huggingface.co/docs/diffusers/api/pipelines/audioldm) - [**Paper**](https://arxiv.org/abs/2301.12503) - [**Demo**](https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation) # Usage First, install the required packages: ``` pip install --upgrade diffusers transformers accelerate ``` ## Text-to-Audio For text-to-audio generation, the [AudioLDMPipeline](https://huggingface.co/docs/diffusers/api/pipelines/audioldm) can be used to load pre-trained weights and generate text-conditional audio outputs: ```python from diffusers import AudioLDMPipeline import torch repo_id = "cvssp/audioldm-l-full" pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "Techno music with a strong, upbeat tempo and high melodic riffs" audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0] ``` The resulting audio output can be saved as a .wav file: ```python import scipy scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) ``` Or displayed in a Jupyter Notebook / Google Colab: ```python from IPython.display import Audio Audio(audio, rate=16000) ``` <audio controls> <source src="https://huggingface.co/datasets/sanchit-gandhi/audioldm-readme-samples/resolve/main/audioldm-l-full-techno.wav" type="audio/wav"> Your browser does not support the audio element. </audio> ## Tips Prompts: * Descriptive prompt inputs work best: you can use adjectives to describe the sound (e.g. "high quality" or "clear") and make the prompt context specific (e.g., "water stream in a forest" instead of "stream"). * It's best to use general terms like 'cat' or 'dog' instead of specific names or abstract objects that the model may not be familiar with. Inference: * The _quality_ of the predicted audio sample can be controlled by the `num_inference_steps` argument: higher steps give higher quality audio at the expense of slower inference. * The _length_ of the predicted audio sample can be controlled by varying the `audio_length_in_s` argument. # Citation **BibTeX:** ``` @article{liu2023audioldm, title={AudioLDM: Text-to-Audio Generation with Latent Diffusion Models}, author={Liu, Haohe and Chen, Zehua and Yuan, Yi and Mei, Xinhao and Liu, Xubo and Mandic, Danilo and Wang, Wenwu and Plumbley, Mark D}, journal={arXiv preprint arXiv:2301.12503}, year={2023} } ```
4,636
[ [ -0.04302978515625, -0.075927734375, 0.04241943359375, 0.009490966796875, -0.00080108642578125, 0.0018482208251953125, -0.0185699462890625, -0.019927978515625, 0.00972747802734375, 0.0361328125, -0.06585693359375, -0.06488037109375, -0.037841796875, -0.003971...
timm/vgg13.tv_in1k
2023-04-25T20:08:33.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:1409.1556", "license:bsd-3-clause", "region:us" ]
image-classification
timm
null
null
timm/vgg13.tv_in1k
0
658
timm
2023-04-25T20:06:41
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg13.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 133.0 - GMACs: 11.3 - Activations (M): 12.3 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg13.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg13.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg13.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
3,634
[ [ -0.035797119140625, -0.035797119140625, 0.0014133453369140625, 0.0023784637451171875, -0.030548095703125, -0.022247314453125, -0.02105712890625, -0.0286865234375, 0.01157379150390625, 0.0307464599609375, -0.029937744140625, -0.05987548828125, -0.0572509765625, ...
syzymon/long_llama_3b_instruct
2023-08-05T00:03:02.000Z
[ "transformers", "pytorch", "longllama", "text-generation", "code", "text-generation-inference", "custom_code", "dataset:Open-Orca/OpenOrca", "dataset:zetavg/ShareGPT-Processed", "dataset:bigcode/starcoderdata", "dataset:togethercomputer/RedPajama-Data-1T", "dataset:tiiuae/falcon-refinedweb", ...
text-generation
syzymon
null
null
syzymon/long_llama_3b_instruct
22
658
transformers
2023-08-04T17:35:59
--- datasets: - Open-Orca/OpenOrca - zetavg/ShareGPT-Processed - bigcode/starcoderdata - togethercomputer/RedPajama-Data-1T - tiiuae/falcon-refinedweb metrics: - code_eval - accuracy pipeline_tag: text-generation tags: - code - text-generation-inference model-index: - name: long_llama_3b_instruct results: - task: name: Code Generation type: code-generation dataset: name: "HumanEval" type: openai_humaneval metrics: - name: pass@1 type: pass@1 value: 0.12 verified: false --- # LongLLaMA: Focused Transformer Training for Context Scaling <div align="center"> <a href="https://colab.research.google.com/github/CStanKonrad/long_llama/blob/main/long_llama_instruct_colab.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg"></a> </div> <div align="center"> [TLDR](#TLDR) | [Overview](#Overview) | [Usage](#Usage) | [LongLLaMA performance](#LongLLaMA-performance) | [Authors](#Authors) | [Citation](#Citation) | [License](#License) | [Acknowledgments](#Acknowledgments) </div> ## TLDR This repo contains [LongLLaMA-Instruct-3Bv1.1](https://huggingface.co/syzymon/long_llama_3b_instruct) that is for **research purposes only**. LongLLaMA is built upon the foundation of [OpenLLaMA](https://github.com/openlm-research/open_llama) and fine-tuned using the [Focused Transformer (FoT)](https://arxiv.org/abs/2307.03170) method. We release a smaller 3B base variant (not instruction tuned) of the LongLLaMA model on a permissive license (Apache 2.0) and inference code supporting longer contexts on [Hugging Face](https://huggingface.co/syzymon/long_llama_3b). Our model weights can serve as the drop-in replacement of LLaMA in existing implementations (for short context up to 2048 tokens). Additionally, we provide evaluation results and comparisons against the original OpenLLaMA models. Stay tuned for further updates. ## Overview ### Base models [Focused Transformer: Contrastive Training for Context Scaling](https://arxiv.org/abs/2307.03170) (FoT) presents a simple method for endowing language models with the ability to handle context consisting possibly of millions of tokens while training on significantly shorter input. FoT permits a subset of attention layers to access a memory cache of (key, value) pairs to extend the context length. The distinctive aspect of FoT is its training procedure, drawing from contrastive learning. Specifically, we deliberately expose the memory attention layers to both relevant and irrelevant keys (like negative samples from unrelated documents). This strategy incentivizes the model to differentiate keys connected with semantically diverse values, thereby enhancing their structure. This, in turn, makes it possible to extrapolate the effective context length much beyond what is seen in training. **LongLLaMA** is an [OpenLLaMA](https://github.com/openlm-research/open_llama) model finetuned with the FoT method, with three layers used for context extension. **Crucially, LongLLaMA is able to extrapolate much beyond the context length seen in training: $8k$. E.g., in the passkey retrieval task, it can handle inputs of length $256k$**. <div align="center"> | | [LongLLaMA-3B](https://huggingface.co/syzymon/long_llama_3b_instruct) | [LongLLaMA-3Bv1.1](https://huggingface.co/syzymon/long_llama_3b_v1_1) | LongLLaMA-7B<br />*(coming soon)*| LongLLaMA-13B<br />*(coming soon)*| |----------------|----------|----------|-----------|-----------| | Source model | [OpenLLaMA-3B](https://huggingface.co/openlm-research/open_llama_3b_easylm) | [OpenLLaMA-3Bv2](https://huggingface.co/openlm-research/open_llama_3b_v2_easylm) | - | - | | Source model tokens | 1T | 1 T | - | - | | Fine-tuning tokens | 10B | 5B | - | -| | Memory layers | 6, 12, 18 | 6, 12, 18 | - | -| </div> ### Instruction/Chat tuning In the [fine_tuning](fine_tuning) subfolder we provide the code that was used to create [LongLLaMA-Instruct-3Bv1.1](https://huggingface.co/syzymon/long_llama_3b_instruct), an instruction-tuned version of [LongLLaMA-3Bv1.1](https://huggingface.co/syzymon/long_llama_3b_v1_1). We used [OpenOrca](https://huggingface.co/datasets/Open-Orca/OpenOrca) (instructions) and [zetavg/ShareGPT-Processed](https://huggingface.co/datasets/zetavg/ShareGPT-Processed) (chat) datasets for tuning. ## Usage See also: * [Colab with LongLLaMA-Instruct-3Bv1.1](https://colab.research.google.com/github/CStanKonrad/long_llama/blob/main/long_llama_instruct_colab.ipynb). * [Colab with an example usage of base LongLLaMA](https://colab.research.google.com/github/CStanKonrad/long_llama/blob/main/long_llama_colab.ipynb). ### Requirements ``` pip install --upgrade pip pip install transformers==4.30 sentencepiece accelerate ``` ### Loading model ```python import torch from transformers import LlamaTokenizer, AutoModelForCausalLM tokenizer = LlamaTokenizer.from_pretrained("syzymon/long_llama_3b_instruct") model = AutoModelForCausalLM.from_pretrained("syzymon/long_llama_3b_instruct", torch_dtype=torch.float32, trust_remote_code=True) ``` ### Input handling and generation LongLLaMA uses the Hugging Face interface, the long input given to the model will be split into context windows and loaded into the memory cache. ```python prompt = "My name is Julien and I like to" input_ids = tokenizer(prompt, return_tensors="pt").input_ids outputs = model(input_ids=input_ids) ``` During the model call, one can provide the parameter `last_context_length` (default $1024$), which specifies the number of tokens left in the last context window. Tuning this parameter can improve generation as the first layers do not have access to memory. See details in [How LongLLaMA handles long inputs](#How-LongLLaMA-handles-long-inputs). ```python generation_output = model.generate( input_ids=input_ids, max_new_tokens=256, num_beams=1, last_context_length=1792, do_sample=True, temperature=1.0, ) print(tokenizer.decode(generation_output[0])) ``` ### Additional configuration LongLLaMA has several other parameters: * `mem_layers` specifies layers endowed with memory (should be either an empty list or a list of all memory layers specified in the description of the checkpoint). * `mem_dtype` allows changing the type of memory cache * `mem_attention_grouping` can trade off speed for reduced memory usage. When equal to `(4, 2048)`, the memory layers will process at most $4*2048$ queries at once ($4$ heads and $2048$ queries for each head). ```python import torch from transformers import LlamaTokenizer, AutoModelForCausalLM tokenizer = LlamaTokenizer.from_pretrained("syzymon/long_llama_3b_instruct") model = AutoModelForCausalLM.from_pretrained( "syzymon/long_llama_3b_instruct", torch_dtype=torch.float32, mem_layers=[], mem_dtype='bfloat16', trust_remote_code=True, mem_attention_grouping=(4, 2048), ) ``` ### Drop-in use with LLaMA code LongLLaMA checkpoints can also be used as a drop-in replacement for LLaMA checkpoints in [Hugging Face implementation of LLaMA](https://huggingface.co/docs/transformers/main/model_doc/llama), but in this case, they will be limited to the original context length of $2048$. ```python from transformers import LlamaTokenizer, LlamaForCausalLM import torch tokenizer = LlamaTokenizer.from_pretrained("syzymon/long_llama_3b_instruct") model = LlamaForCausalLM.from_pretrained("syzymon/long_llama_3b_instruct", torch_dtype=torch.float32) ``` ### How LongLLaMA handles long inputs Inputs over $2048$ tokens are automatically split into windows $w_1, \ldots, w_m$. The first $m-2$ windows contain $2048$ tokens each, $w_{m-1}$ has no more than $2048$ tokens, and $w_m$ contains the number of tokens specified by `last_context_length`. The model processes the windows one by one extending the memory cache after each. If `use_cache` is `True`, the last window will not be loaded to the memory cache but to the local (generation) cache. The memory cache stores $(key, value)$ pairs for each head of the specified memory layers `mem_layers`. In addition to this, it stores attention masks. If `use_cache=True` (which is the case in generation), LongLLaMA will use two caches: the memory cache for the specified layers and the local (generation) cache for all layers. When the local cache exceeds $2048$ elements, its content is moved to the memory cache for the memory layers. For simplicity, context extension is realized with a memory cache and full attention in this repo. Replacing this simple mechanism with a KNN search over an external database is possible with systems like [Faiss](https://github.com/facebookresearch/faiss). This potentially would enable further context length scaling. We leave this as a future work. ## LongLLaMA performance We present some illustrative examples of LongLLaMA results. Refer to our paper [Focused Transformer: Contrastive Training for Context Scaling](https://arxiv.org/abs/2307.03170) for more details. We manage to achieve good performance on the passkey retrieval task from [Landmark Attention: Random-Access Infinite Context Length for Transformers](https://arxiv.org/abs/2305.16300). The code for generating the prompt and running the model is located in `examples/passkey.py`. <p align="center" width="100%"> <img src="assets/plot_passkey.png" alt="LongLLaMA" style="width: 70%; min-width: 300px; display: block; margin: auto;"> </p> Our LongLLaMA 3B model also shows improvements when using long context on two downstream tasks, TREC question classification and WebQS question answering. <div align="center"> | Context/Dataset | TREC | WebQS | | --- | --- | --- | | $2K$ | 67.0 | 21.2 | | $4K$ | 71.6 | 21.4 | | $6K$ | 72.9 | 22.2 | | $8K$ | **73.3** | **22.4** | </div> LongLLaMA retains performance on tasks that do not require long context. We provide a comparison with OpenLLaMA on [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) in the zero-shot setting. <div align="center"> | Task/Metric | OpenLLaMA-3B | LongLLaMA-3B | |----------------|----------|-----------| | anli_r1/acc | 0.33 | 0.32 | | anli_r2/acc | 0.32 | 0.33 | | anli_r3/acc | 0.35 | 0.35 | | arc_challenge/acc | 0.34 | 0.34 | | arc_challenge/acc_norm | 0.37 | 0.37 | | arc_easy/acc | 0.69 | 0.68 | | arc_easy/acc_norm | 0.65 | 0.63 | | boolq/acc | 0.68 | 0.68 | | hellaswag/acc | 0.49 | 0.48 | | hellaswag/acc_norm | 0.67 | 0.65 | | openbookqa/acc | 0.27 | 0.28 | | openbookqa/acc_norm | 0.40 | 0.38 | | piqa/acc | 0.75 | 0.73 | | piqa/acc_norm | 0.76 | 0.75 | | record/em | 0.88 | 0.87 | | record/f1 | 0.89 | 0.87 | | rte/acc | 0.58 | 0.60 | | truthfulqa_mc/mc1 | 0.22 | 0.24 | | truthfulqa_mc/mc2 | 0.35 | 0.38 | | wic/acc | 0.48 | 0.50 | | winogrande/acc | 0.62 | 0.60 | | Avg score | 0.53 | 0.53 | </div> Starting with v1.1 models we have decided to use [EleutherAI](https://github.com/EleutherAI) implementation of [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) wit a slight modification, that adds `<bos>` token at beginning of input sequence. The results are provided in the table below. <div align="center"> | description | LongLLaMA-3B | OpenLLaMA-3Bv2 | LongLLaMA-3Bv1.1 | LongLLaMA-Instruct-3Bv1.1 | |:-----------------------|:--------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:--------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------| | anli_r1/acc | 0.32 | 0.33 | 0.31 | 0.33 | | anli_r2/acc | 0.33 | 0.35 | 0.33 | 0.35 | | anli_r3/acc | 0.35 | 0.38 | 0.35 | 0.38 | | arc_challenge/acc | 0.34 | 0.33 | 0.32 | 0.36 | | arc_challenge/acc_norm | 0.37 | 0.36 | 0.36 | 0.37 | | arc_easy/acc | 0.67 | 0.68 | 0.68 | 0.7 | | arc_easy/acc_norm | 0.63 | 0.63 | 0.63 | 0.63 | | boolq/acc | 0.68 | 0.67 | 0.66 | 0.77 | | hellaswag/acc | 0.48 | 0.53 | 0.52 | 0.52 | | hellaswag/acc_norm | 0.65 | 0.7 | 0.69 | 0.68 | | openbookqa/acc | 0.28 | 0.28 | 0.28 | 0.28 | | openbookqa/acc_norm | 0.38 | 0.39 | 0.37 | 0.41 | | piqa/acc | 0.73 | 0.77 | 0.77 | 0.78 | | piqa/acc_norm | 0.75 | 0.78 | 0.77 | 0.77 | | record/em | 0.87 | 0.87 | 0.86 | 0.85 | | record/f1 | 0.88 | 0.88 | 0.87 | 0.86 | | rte/acc | 0.6 | 0.53 | 0.62 | 0.7 | | truthfulqa_mc/mc1 | 0.24 | 0.22 | 0.21 | 0.25 | | truthfulqa_mc/mc2 | 0.38 | 0.35 | 0.35 | 0.4 | | wic/acc | 0.5 | 0.5 | 0.5 | 0.54 | | winogrande/acc | 0.6 | 0.66 | 0.63 | 0.65 | | Avg score | 0.53 | 0.53 | 0.53 | 0.55 | </div> We also provide the results on human-eval. We cut the generated text after either * `"\ndef "` * `"\nclass "` * `"\nif __name__"` <div align="center"> | | OpenLLaMA-3Bv2 | LongLLaMA-3Bv1.1 | LongLLaMA-Instruct-3Bv1.1 | | - | - | - | - | | pass@1| 0.09| 0.12 | 0.12 | </div> ## Authors - [Szymon Tworkowski](https://scholar.google.com/citations?user=1V8AeXYAAAAJ&hl=en) - [Konrad Staniszewski](https://scholar.google.com/citations?user=CM6PCBYAAAAJ) - [Mikołaj Pacek](https://scholar.google.com/citations?user=eh6iEbQAAAAJ&hl=en&oi=ao) - [Henryk Michalewski](https://scholar.google.com/citations?user=YdHW1ycAAAAJ&hl=en) - [Yuhuai Wu](https://scholar.google.com/citations?user=bOQGfFIAAAAJ&hl=en) - [Piotr Miłoś](https://scholar.google.pl/citations?user=Se68XecAAAAJ&hl=pl&oi=ao) ## Citation To cite this work please use ```bibtex @misc{tworkowski2023focused, title={Focused Transformer: Contrastive Training for Context Scaling}, author={Szymon Tworkowski and Konrad Staniszewski and Mikołaj Pacek and Yuhuai Wu and Henryk Michalewski and Piotr Miłoś}, year={2023}, eprint={2307.03170}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## License The code and base models checkpoints are licensed under [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0). The instruction/chat tuned models are for research purposes only. Some of the examples use external code (see headers of files for copyright notices and licenses). ## Acknowledgments We gratefully acknowledge the TPU Research Cloud program, which was instrumental to our research by providing significant computational resources. We are also grateful to Xinyang Geng and Hao Liu for releasing [OpenLLaMA](https://github.com/openlm-research/open_llama) checkpoints and the [EasyLM](https://github.com/young-geng/EasyLM) library.
21,825
[ [ -0.035186767578125, -0.061309814453125, 0.033355712890625, 0.041168212890625, -0.0229339599609375, -0.0195770263671875, -0.0364990234375, -0.053924560546875, 0.0189208984375, 0.02618408203125, -0.047119140625, -0.0423583984375, -0.03558349609375, -0.00357627...
PlanTL-GOB-ES/roberta-large-bne-capitel-ner
2022-11-30T09:00:05.000Z
[ "transformers", "pytorch", "roberta", "token-classification", "national library of spain", "spanish", "bne", "capitel", "ner", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us...
token-classification
PlanTL-GOB-ES
null
null
PlanTL-GOB-ES/roberta-large-bne-capitel-ner
0
657
transformers
2022-03-02T23:29:04
--- language: - es license: apache-2.0 tags: - "national library of spain" - "spanish" - "bne" - "capitel" - "ner" datasets: - "bne" - "capitel" metrics: - "f1" inference: parameters: aggregation_strategy: "first" model-index: - name: roberta-large-bne-capiter-ner results: - task: type: token-classification dataset: type: ner name: CAPITEL-NERC metrics: - name: F1 type: f1 value: 0.9051 widget: - "Me llamo Francisco Javier y vivo en Madrid." - "Mi hermano Ramón y su mejor amigo Luis trabajan en el BSC." --- # Spanish RoBERTa-large trained on BNE finetuned for CAPITEL Named Entity Recognition (NER) dataset. ## Table of contents <details> <summary>Click to expand</summary> - [Model description](#model-description) - [Intended uses and limitations](#intended-use) - [How to use](#how-to-use) - [Limitations and bias](#limitations-and-bias) - [Training](#training) - [Training](#training) - [Training data](#training-data) - [Training procedure](#training-procedure) - [Evaluation](#evaluation) - [Evaluation](#evaluation) - [Variable and metrics](#variable-and-metrics) - [Evaluation results](#evaluation-results) - [Additional information](#additional-information) - [Author](#author) - [Contact information](#contact-information) - [Copyright](#copyright) - [Licensing information](#licensing-information) - [Funding](#funding) - [Citing information](#citing-information) - [Disclaimer](#disclaimer) </details> ## Model description The **roberta-large-bne-capitel-ner** is a Named Entity Recognition (NER) model for the Spanish language fine-tuned from the [roberta-large-bne](https://huggingface.co/PlanTL-GOB-ES/roberta-large-bne) model, a [RoBERTa](https://arxiv.org/abs/1907.11692) large model pre-trained using the largest Spanish corpus known to date, with a total of 570GB of clean and deduplicated text, processed for this work, compiled from the web crawlings performed by the [National Library of Spain (Biblioteca Nacional de España)](http://www.bne.es/en/Inicio/index.html) from 2009 to 2019. ## Intended uses and limitations **roberta-large-bne-capitel-ner** model can be used to recognize Named Entities (NE). The model is limited by its training dataset and may not generalize well for all use cases. ## How to use ```python from transformers import pipeline from pprint import pprint nlp = pipeline("ner", model="PlanTL-GOB-ES/roberta-large-bne-capitel-ner") example = "Me llamo Francisco Javier y vivo en Madrid." ner_results = nlp(example) pprint(ner_results) ``` ## Limitations and bias At the time of submission, no measures have been taken to estimate the bias embedded in the model. However, we are well aware that our models may be biased since the corpora have been collected using crawling techniques on multiple web sources. We intend to conduct research in these areas in the future, and if completed, this model card will be updated. ## Training The dataset used is the one from the [CAPITEL competition at IberLEF 2020](https://sites.google.com/view/capitel2020) (sub-task 1). ### Training procedure The model was trained with a batch size of 32 and a learning rate of 3e-5 for 5 epochs. We then selected the best checkpoint using the downstream task metric in the corresponding development set and then evaluated it on the test set. ## Evaluation ### Variable and metrics This model was finetuned maximizing F1 score. ## Evaluation results We evaluated the **roberta-large-bne-capitel-ner** on the CAPITEL-NERC test set against standard multilingual and monolingual baselines: | Model | CAPITEL-NERC (F1) | | ------------|:----| | roberta-large-bne-capitel-ner | **90.51** | | roberta-base-bne-capitel-ner | 89.60| | BETO | 87.72 | | mBERT | 88.10 | | BERTIN | 88.56 | | ELECTRA | 80.35 | For more details, check the fine-tuning and evaluation scripts in the official [GitHub repository](https://github.com/PlanTL-GOB-ES/lm-spanish). ## Additional information ### Author Text Mining Unit (TeMU) at the Barcelona Supercomputing Center (bsc-temu@bsc.es) ### Contact information For further information, send an email to <plantl-gob-es@bsc.es> ### Copyright Copyright by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) (2022) ### Licensing information [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) ### Funding This work was funded by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) within the framework of the Plan-TL. ## Citing information If you use this model, please cite our [paper](http://journal.sepln.org/sepln/ojs/ojs/index.php/pln/article/view/6405): ``` @article{, abstract = {We want to thank the National Library of Spain for such a large effort on the data gathering and the Future of Computing Center, a Barcelona Supercomputing Center and IBM initiative (2020). This work was funded by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) within the framework of the Plan-TL.}, author = {Asier Gutiérrez Fandiño and Jordi Armengol Estapé and Marc Pàmies and Joan Llop Palao and Joaquin Silveira Ocampo and Casimiro Pio Carrino and Carme Armentano Oller and Carlos Rodriguez Penagos and Aitor Gonzalez Agirre and Marta Villegas}, doi = {10.26342/2022-68-3}, issn = {1135-5948}, journal = {Procesamiento del Lenguaje Natural}, keywords = {Artificial intelligence,Benchmarking,Data processing.,MarIA,Natural language processing,Spanish language modelling,Spanish language resources,Tractament del llenguatge natural (Informàtica),Àrees temàtiques de la UPC::Informàtica::Intel·ligència artificial::Llenguatge natural}, publisher = {Sociedad Española para el Procesamiento del Lenguaje Natural}, title = {MarIA: Spanish Language Models}, volume = {68}, url = {https://upcommons.upc.edu/handle/2117/367156#.YyMTB4X9A-0.mendeley}, year = {2022}, } ``` ### Disclaimer The models published in this repository are intended for a generalist purpose and are available to third parties. These models may have bias and/or any other undesirable distortions. When third parties, deploy or provide systems and/or services to other parties using any of these models (or using systems based on these models) or become users of the models, they should note that it is their responsibility to mitigate the risks arising from their use and, in any event, to comply with applicable regulations, including regulations regarding the use of artificial intelligence. In no event shall the owner of the models (SEDIA – State Secretariat for digitalization and artificial intelligence) nor the creator (BSC – Barcelona Supercomputing Center) be liable for any results arising from the use made by third parties of these models. Los modelos publicados en este repositorio tienen una finalidad generalista y están a disposición de terceros. Estos modelos pueden tener sesgos y/u otro tipo de distorsiones indeseables. Cuando terceros desplieguen o proporcionen sistemas y/o servicios a otras partes usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) o se conviertan en usuarios de los modelos, deben tener en cuenta que es su responsabilidad mitigar los riesgos derivados de su uso y, en todo caso, cumplir con la normativa aplicable, incluyendo la normativa en materia de uso de inteligencia artificial. En ningún caso el propietario de los modelos (SEDIA – Secretaría de Estado de Digitalización e Inteligencia Artificial) ni el creador (BSC – Barcelona Supercomputing Center) serán responsables de los resultados derivados del uso que hagan terceros de estos modelos.
7,765
[ [ -0.040008544921875, -0.048309326171875, 0.024658203125, 0.0257720947265625, -0.00684356689453125, -0.0081329345703125, -0.034515380859375, -0.05535888671875, 0.0163726806640625, 0.035125732421875, -0.0440673828125, -0.0498046875, -0.044830322265625, 0.029693...
edbeeching/decision-transformer-gym-hopper-medium
2022-06-29T19:15:16.000Z
[ "transformers", "pytorch", "decision_transformer", "feature-extraction", "deep-reinforcement-learning", "reinforcement-learning", "decision-transformer", "gym-continous-control", "arxiv:2106.01345", "endpoints_compatible", "has_space", "region:us" ]
reinforcement-learning
edbeeching
null
null
edbeeching/decision-transformer-gym-hopper-medium
2
657
transformers
2022-03-16T08:20:31
--- tags: - deep-reinforcement-learning - reinforcement-learning - decision-transformer - gym-continous-control pipeline_tag: reinforcement-learning --- # Decision Transformer model trained on medium trajectories sampled from the Gym Hopper environment This is a trained [Decision Transformer](https://arxiv.org/abs/2106.01345) model trained on medium trajectories sampled from the Gym Hopper environment. The following normlization coefficients are required to use this model: mean = [ 1.311279, -0.08469521, -0.5382719, -0.07201576, 0.04932366, 2.1066856, -0.15017354, 0.00878345, -0.2848186, -0.18540096, -0.28461286] std = [0.17790751, 0.05444621, 0.21297139, 0.14530419, 0.6124444, 0.85174465, 1.4515252, 0.6751696, 1.536239, 1.6160746, 5.6072536 ] See our [Blog Post](https://colab.research.google.com/drive/1K3UuajwoPY1MzRKNkONNRS3gS5DxZ-qF?usp=sharing), [Colab notebook](https://colab.research.google.com/drive/1K3UuajwoPY1MzRKNkONNRS3gS5DxZ-qF?usp=sharing) or [Example Script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/decision_transformer) for usage.
1,118
[ [ -0.0343017578125, -0.041290283203125, 0.0206756591796875, 0.011444091796875, -0.01291656494140625, -0.015716552734375, -0.002613067626953125, 0.0011472702026367188, 0.01727294921875, 0.0202178955078125, -0.061065673828125, -0.03472900390625, -0.0462646484375, ...
google/switch-base-64
2023-01-24T17:19:59.000Z
[ "transformers", "pytorch", "switch_transformers", "text2text-generation", "en", "dataset:c4", "arxiv:2101.03961", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text2text-generation
google
null
null
google/switch-base-64
2
657
transformers
2022-11-04T07:59:09
--- language: - en tags: - text2text-generation widget: - text: "The <extra_id_0> walks in <extra_id_1> park" example_title: "Masked Language Modeling" datasets: - c4 license: apache-2.0 --- # Model Card for Switch Transformers Base - 64 experts ![model image](https://s3.amazonaws.com/moonup/production/uploads/1666966931908-62441d1d9fdefb55a0b7d12c.png) # Table of Contents 0. [TL;DR](#TL;DR) 1. [Model Details](#model-details) 2. [Usage](#usage) 3. [Uses](#uses) 4. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 5. [Training Details](#training-details) 6. [Evaluation](#evaluation) 7. [Environmental Impact](#environmental-impact) 8. [Citation](#citation) 9. [Model Card Authors](#model-card-authors) # TL;DR Switch Transformers is a Mixture of Experts (MoE) model trained on Masked Language Modeling (MLM) task. The model architecture is similar to the classic T5, but with the Feed Forward layers replaced by the Sparse MLP layers containing "experts" MLP. According to the [original paper](https://arxiv.org/pdf/2101.03961.pdf) the model enables faster training (scaling properties) while being better than T5 on fine-tuned tasks. As mentioned in the first few lines of the abstract : > we advance the current scale of language models by pre-training up to trillion parameter models on the “Colossal Clean Crawled Corpus”, and achieve a 4x speedup over the T5-XXL model. **Disclaimer**: Content from **this** model card has been written by the Hugging Face team, and parts of it were copy pasted from the [original paper](https://arxiv.org/pdf/2101.03961.pdf). # Model Details ## Model Description - **Model type:** Language model - **Language(s) (NLP):** English - **License:** Apache 2.0 - **Related Models:** [All Switch Transformers Checkpoints](https://huggingface.co/models?search=switch) - **Original Checkpoints:** [All Original Switch Transformers Checkpoints](https://github.com/google-research/t5x/blob/main/docs/models.md#mixture-of-experts-moe-checkpoints) - **Resources for more information:** - [Research paper](https://arxiv.org/pdf/2101.03961.pdf) - [GitHub Repo](https://github.com/google-research/t5x) - [Hugging Face Switch Transformers Docs (Similar to T5) ](https://huggingface.co/docs/transformers/model_doc/switch_transformers) # Usage Note that these checkpoints has been trained on Masked-Language Modeling (MLM) task. Therefore the checkpoints are not "ready-to-use" for downstream tasks. You may want to check `FLAN-T5` for running fine-tuned weights or fine-tune your own MoE following [this notebook](https://colab.research.google.com/drive/1aGGVHZmtKmcNBbAwa9hbu58DDpIuB5O4?usp=sharing) Find below some example scripts on how to use the model in `transformers`: ## Using the Pytorch model ### Running the model on a CPU <details> <summary> Click to expand </summary> ```python from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("google/switch-base-64") model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-64") input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." input_ids = tokenizer(input_text, return_tensors="pt").input_ids outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) >>> <pad> <extra_id_0> man<extra_id_1> beer<extra_id_2> a<extra_id_3> salt<extra_id_4>.</s> ``` </details> ### Running the model on a GPU <details> <summary> Click to expand </summary> ```python # pip install accelerate from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("google/switch-base-64") model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-64", device_map="auto") input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0) outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) >>> <pad> <extra_id_0> man<extra_id_1> beer<extra_id_2> a<extra_id_3> salt<extra_id_4>.</s> ``` </details> ### Running the model on a GPU using different precisions #### FP16 <details> <summary> Click to expand </summary> ```python # pip install accelerate from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("google/switch-base-64") model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-64", device_map="auto", torch_dtype=torch.float16) input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0) outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) >>> <pad> <extra_id_0> man<extra_id_1> beer<extra_id_2> a<extra_id_3> salt<extra_id_4>.</s> ``` </details> #### INT8 <details> <summary> Click to expand </summary> ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("google/switch-base-64") model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-64", device_map="auto") input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0) outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) >>> <pad> <extra_id_0> man<extra_id_1> beer<extra_id_2> a<extra_id_3> salt<extra_id_4>.</s> ``` </details> # Uses ## Direct Use and Downstream Use See the [research paper](https://arxiv.org/pdf/2101.03961.pdf) for further details. ## Out-of-Scope Use More information needed. # Bias, Risks, and Limitations More information needed. ## Ethical considerations and risks More information needed. ## Known Limitations More information needed. ## Sensitive Use: More information needed. # Training Details ## Training Data The model was trained on a Masked Language Modeling task, on Colossal Clean Crawled Corpus (C4) dataset, following the same procedure as `T5`. ## Training Procedure According to the model card from the [original paper](https://arxiv.org/pdf/2101.03961.pdf) the model has been trained on TPU v3 or TPU v4 pods, using [`t5x`](https://github.com/google-research/t5x) codebase together with [`jax`](https://github.com/google/jax). # Evaluation ## Testing Data, Factors & Metrics The authors evaluated the model on various tasks and compared the results against T5. See the table below for some quantitative evaluation: ![image.png](https://s3.amazonaws.com/moonup/production/uploads/1666967660372-62441d1d9fdefb55a0b7d12c.png) For full details, please check the [research paper](https://arxiv.org/pdf/2101.03961.pdf). ## Results For full results for Switch Transformers, see the [research paper](https://arxiv.org/pdf/2101.03961.pdf), Table 5. # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** Google Cloud TPU Pods - TPU v3 or TPU v4 | Number of chips ≥ 4. - **Hours used:** More information needed - **Cloud Provider:** GCP - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Citation **BibTeX:** ```bibtex @misc{https://doi.org/10.48550/arxiv.2101.03961, doi = {10.48550/ARXIV.2101.03961}, url = {https://arxiv.org/abs/2101.03961}, author = {Fedus, William and Zoph, Barret and Shazeer, Noam}, keywords = {Machine Learning (cs.LG), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity}, publisher = {arXiv}, year = {2021}, copyright = {arXiv.org perpetual, non-exclusive license} } ```
8,211
[ [ -0.03546142578125, -0.031341552734375, 0.014862060546875, 0.01483154296875, -0.00698089599609375, 0.003726959228515625, -0.01123046875, -0.03076171875, -0.0030460357666015625, 0.0279693603515625, -0.0438232421875, -0.0233001708984375, -0.058258056640625, 0.0...
timm/inception_resnet_v2.tf_ens_adv_in1k
2023-05-10T01:08:43.000Z
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:1602.07261", "arxiv:1804.00097", "license:apache-2.0", "region:us" ]
image-classification
timm
null
null
timm/inception_resnet_v2.tf_ens_adv_in1k
0
657
timm
2023-04-25T21:33:03
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for inception_resnet_v2.tf_ens_adv_in1k A Inception-ResNet-v2 image classification model. Adversarially (ensemble) trained on ImageNet-1k by paper authors. Ported from Tensorflow by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 55.8 - GMACs: 13.2 - Activations (M): 25.1 - Image size: 299 x 299 - **Papers:** - https://arxiv.org/abs/1602.07261: https://arxiv.org/abs/1602.07261 - Adversarial Attacks and Defences Competition: https://arxiv.org/abs/1804.00097 - **Original:** https://github.com/tensorflow/models - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('inception_resnet_v2.tf_ens_adv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'inception_resnet_v2.tf_ens_adv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 147, 147]) # torch.Size([1, 192, 71, 71]) # torch.Size([1, 320, 35, 35]) # torch.Size([1, 1088, 17, 17]) # torch.Size([1, 1536, 8, 8]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'inception_resnet_v2.tf_ens_adv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1536, 8, 8) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Szegedy2016Inceptionv4IA, title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning}, author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alexander A. Alemi}, journal={ArXiv}, year={2016}, volume={abs/1602.07261} } ``` ```bibtex @article{Kurakin2018AdversarialAA, title={Adversarial Attacks and Defences Competition}, author={Alexey Kurakin and Ian J. Goodfellow and Samy Bengio and Yinpeng Dong and Fangzhou Liao and Ming Liang and Tianyu Pang and Jun Zhu and Xiaolin Hu and Cihang Xie and Jianyu Wang and Zhishuai Zhang and Zhou Ren and Alan Loddon Yuille and Sangxia Huang and Yao Zhao and Yuzhe Zhao and Zhonglin Han and Junjiajia Long and Yerkebulan Berdibekov and Takuya Akiba and Seiya Tokui and Motoki Abe}, journal={ArXiv}, year={2018}, volume={abs/1804.00097} } ```
4,440
[ [ -0.03973388671875, -0.041595458984375, 0.0013484954833984375, -0.004669189453125, -0.017852783203125, -0.0182647705078125, -0.01348114013671875, -0.03277587890625, 0.01340484619140625, 0.0252685546875, -0.034515380859375, -0.046966552734375, -0.051361083984375, ...
TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF
2023-09-27T12:47:09.000Z
[ "transformers", "llama", "facebook", "meta", "pytorch", "llama-2", "text-generation", "en", "dataset:garage-bAInd/Open-Platypus", "arxiv:2307.09288", "license:llama2", "has_space", "text-generation-inference", "region:us" ]
text-generation
TheBloke
null
null
TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF
40
656
transformers
2023-09-02T09:17:16
--- language: - en license: llama2 library_name: transformers tags: - facebook - meta - pytorch - llama - llama-2 datasets: - garage-bAInd/Open-Platypus model_name: Speechess Lllama2 Hermes Orca-Platypus WizardLM 13B base_model: uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b inference: false model_creator: Jiangwen Su model_type: llama pipeline_tag: text-generation prompt_template: 'Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response: ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Speechess Lllama2 Hermes Orca-Platypus WizardLM 13B - GGUF - Model creator: [Jiangwen Su](https://huggingface.co/uukuguy) - Original model: [Speechess Lllama2 Hermes Orca-Platypus WizardLM 13B](https://huggingface.co/uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b) <!-- description start --> ## Description This repo contains GGUF format model files for [Jiangwen Su's Speechess Lllama2 Hermes Orca-Platypus WizardLM 13B](https://huggingface.co/uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. GGUF offers numerous advantages over GGML, such as better tokenisation, and support for special tokens. It is also supports metadata, and is designed to be extensible. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF) * [Jiangwen Su's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Alpaca ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response: ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d36d5be95a0d9088b674dbb27354107221](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q2_K.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q2_K.gguf) | Q2_K | 2 | 5.43 GB| 7.93 GB | smallest, significant quality loss - not recommended for most purposes | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q3_K_S.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q3_K_S.gguf) | Q3_K_S | 3 | 5.66 GB| 8.16 GB | very small, high quality loss | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q3_K_M.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q3_K_M.gguf) | Q3_K_M | 3 | 6.34 GB| 8.84 GB | very small, high quality loss | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q3_K_L.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q3_K_L.gguf) | Q3_K_L | 3 | 6.93 GB| 9.43 GB | small, substantial quality loss | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q4_0.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q4_0.gguf) | Q4_0 | 4 | 7.37 GB| 9.87 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q4_K_S.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q4_K_S.gguf) | Q4_K_S | 4 | 7.41 GB| 9.91 GB | small, greater quality loss | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q4_K_M.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q4_K_M.gguf) | Q4_K_M | 4 | 7.87 GB| 10.37 GB | medium, balanced quality - recommended | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q5_0.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q5_0.gguf) | Q5_0 | 5 | 8.97 GB| 11.47 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q5_K_S.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q5_K_S.gguf) | Q5_K_S | 5 | 8.97 GB| 11.47 GB | large, low quality loss - recommended | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q5_K_M.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q5_K_M.gguf) | Q5_K_M | 5 | 9.23 GB| 11.73 GB | large, very low quality loss - recommended | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q6_K.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q6_K.gguf) | Q6_K | 6 | 10.68 GB| 13.18 GB | very large, extremely low quality loss | | [speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q8_0.gguf](https://huggingface.co/TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF/blob/main/speechless-llama2-hermes-orca-platypus-wizardlm-13b.Q8_0.gguf) | Q8_0 | 8 | 13.83 GB| 16.33 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF and below it, a specific filename to download, such as: speechless-llama2-hermes-orca-platypus-wizardlm-13b.q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub>=0.17.1 ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF speechless-llama2-hermes-orca-platypus-wizardlm-13b.q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF speechless-llama2-hermes-orca-platypus-wizardlm-13b.q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows CLI users: Use `set HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1` before running the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d36d5be95a0d9088b674dbb27354107221](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m speechless-llama2-hermes-orca-platypus-wizardlm-13b.q4_K_M.gguf --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{prompt}\n\n### Response:" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 4096` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model from Python using ctransformers #### First install the package ```bash # Base ctransformers with no GPU acceleration pip install ctransformers>=0.2.24 # Or with CUDA GPU acceleration pip install ctransformers[cuda]>=0.2.24 # Or with ROCm GPU acceleration CT_HIPBLAS=1 pip install ctransformers>=0.2.24 --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems CT_METAL=1 pip install ctransformers>=0.2.24 --no-binary ctransformers ``` #### Simple example code to load one of these GGUF models ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/Speechless-Llama2-Hermes-Orca-Platypus-WizardLM-13B-GGUF", model_file="speechless-llama2-hermes-orca-platypus-wizardlm-13b.q4_K_M.gguf", model_type="llama", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here's guides on using llama-cpp-python or ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Jiangwen Su's Speechess Lllama2 Hermes Orca-Platypus WizardLM 13B <p><h1> speechless-llama2-hermes-orca-platypus-wizardlm-13b </h1></p> speechless-llama2-hermes-orca-platypus-wizardlm-13b is a merge of NousResearch/Nous-Hermes-Llama2-13b, Open-Orca/OpenOrca-Platypus2-13B and WizardLM/WizardLM-13B-V1.2. | Metric | Value | | --- | --- | | ARC | | | HellaSwag | | | MMLU | | | TruthfulQA | | | Average | | # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 13B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)| |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)| <!-- original-model-card end -->
28,688
[ [ -0.050567626953125, -0.0552978515625, 0.019561767578125, 0.0203704833984375, -0.0271148681640625, -0.010223388671875, 0.004962921142578125, -0.05511474609375, 0.034149169921875, 0.01666259765625, -0.052581787109375, -0.035369873046875, -0.03564453125, 0.0108...
nickprock/bert-italian-finetuned-ner
2023-09-12T07:25:47.000Z
[ "transformers", "pytorch", "safetensors", "bert", "token-classification", "generated_from_trainer", "it", "dataset:tner/wikiann", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
nickprock
null
null
nickprock/bert-italian-finetuned-ner
6
655
transformers
2023-03-31T20:12:09
--- language: - it license: mit tags: - generated_from_trainer datasets: - tner/wikiann metrics: - precision - recall - f1 - accuracy widget: - text: 'Ciao, sono Giacomo. Vivo a Milano e lavoro da Armani. ' example_title: Example 1 - text: 'Domenica andrò allo stadio con Giovanna a guardare la Fiorentina. ' example_title: Example 2 base_model: dbmdz/bert-base-italian-cased model-index: - name: bert-italian-finetuned-ner results: - task: type: token-classification name: Token Classification dataset: name: wiki_neural type: wiki_neural config: it split: validation args: it metrics: - type: precision value: 0.9438064759036144 name: Precision - type: recall value: 0.954225352112676 name: Recall - type: f1 value: 0.9489873178118493 name: F1 - type: accuracy value: 0.9917883014379933 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-italian-finetuned-ner This model is a fine-tuned version of [dbmdz/bert-base-italian-cased](https://huggingface.co/dbmdz/bert-base-italian-cased) on the wiki_neural dataset. It achieves the following results on the evaluation set: - Loss: 0.0361 - Precision: 0.9438 - Recall: 0.9542 - F1: 0.9490 - Accuracy: 0.9918 ## Model description Token classification for italian language experiment, NER. ### Example ```python from transformers import pipeline ner_pipeline = pipeline("ner", model="nickprock/bert-italian-finetuned-ner", aggregation_strategy="simple") text = "La sede storica della Olivetti è ad Ivrea" output = ner_pipeline(text) ``` ## Intended uses & limitations The model can be used on token classification, in particular NER. It is fine tuned on italian language. ## Training and evaluation data The dataset used is [wikiann](https://huggingface.co/datasets/tner/wikiann) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0297 | 1.0 | 11050 | 0.0323 | 0.9324 | 0.9420 | 0.9372 | 0.9908 | | 0.0173 | 2.0 | 22100 | 0.0324 | 0.9445 | 0.9514 | 0.9479 | 0.9915 | | 0.0057 | 3.0 | 33150 | 0.0361 | 0.9438 | 0.9542 | 0.9490 | 0.9918 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
2,900
[ [ -0.041412353515625, -0.0399169921875, -0.0007529258728027344, 0.0021152496337890625, -0.017364501953125, -0.03582763671875, -0.0204315185546875, -0.0208740234375, 0.0285797119140625, 0.0185394287109375, -0.045806884765625, -0.04595947265625, -0.047637939453125, ...
shubhangkhare/stable-diffusion-xl-base-1.0-v1
2023-10-07T07:46:54.000Z
[ "diffusers", "text-to-image", "autotrain", "has_space", "region:us" ]
text-to-image
shubhangkhare
null
null
shubhangkhare/stable-diffusion-xl-base-1.0-v1
1
655
diffusers
2023-10-07T07:46:48
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: photo of Shubhang Khare tags: - text-to-image - diffusers - autotrain inference: true --- # DreamBooth trained by AutoTrain Text encoder was not trained.
236
[ [ 0.004848480224609375, -0.011810302734375, 0.0156097412109375, 0.0089569091796875, -0.036346435546875, 0.06683349609375, 0.01294708251953125, -0.013519287109375, 0.035552978515625, -0.00022685527801513672, -0.03582763671875, -0.002941131591796875, -0.059753417968...
lberglund/sweep_full_0_20231012104517
2023-10-12T11:09:57.000Z
[ "diffusers", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "text-to-image", "lora", "license:openrail++", "has_space", "region:us" ]
text-to-image
lberglund
null
null
lberglund/sweep_full_0_20231012104517
1
655
diffusers
2023-10-12T10:45:21
--- license: openrail++ base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: "a photo of a person showing <thumbs_up> thumbs up" tags: - stable-diffusion-xl - stable-diffusion-xl-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - lberglund/sweep_full_0_20231012104517 These are LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0. The weights were trained on "a photo of a person showing <thumbs_up> thumbs up" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. LoRA for the text encoder was enabled: False. Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
699
[ [ -0.024566650390625, -0.029815673828125, 0.0249176025390625, 0.006069183349609375, -0.0352783203125, 0.007305145263671875, 0.02593994140625, -0.022918701171875, 0.082275390625, 0.04144287109375, -0.037384033203125, -0.0251007080078125, -0.050933837890625, -0....