modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
ClaudeCOULOMBE/RickBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - en thumbnail: https://avatars3.githubusercontent.com/u/32437151?s=460&u=4ec59abc8d21d5feea3dab323d23a5860e6996a4&v=4 tags: - text-classification - go-emotion - pytorch license: apache-2.0 datasets: - go_emotions metrics: - Accuracy --- # Bert-Base-Uncased-Go-Emotion ## Model description: ## Training Parameters: ``` Num examples = 169208 Num Epochs = 3 Instantaneous batch size per device = 16 Total train batch size (w. parallel, distributed & accumulation) = 16 Gradient Accumulation steps = 1 Total optimization steps = 31728 ``` ## TrainOutput: ``` 'train_loss': 0.12085497042373672, ``` ## Evalution Output: ``` 'eval_accuracy_thresh': 0.9614765048027039, 'eval_loss': 0.1164659634232521 ``` ## Colab Notebook: [Notebook](https://github.com/bhadreshpsavani/UnderstandingNLP/blob/master/go_emotion_of_transformers_multilabel_text_classification_v2.ipynb)
Connor-tech/bert_cn_finetuning
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- language: zh --- ## roberta-chinese-homie-large This is a Chinese pre-training model RoBERTa, pre-trained on a large-scale corpus. It is suitable for fine-tuning on specific downstream tasks, or as a parameter initialization for pre-training, which can improve performance. Due to excessive alchemy, it is not suitable for Fill Mask directly, unless you have performed a small amount of pre-training. I don't know what homie means, but someone calls me that. I believe this is an interesting natural language processing. I don't know what homie means, but I've been called that and I can feel the meaning. ## Bibtex entry and citation info Please cite if you find it helpful. ``` @article{zhu2023metaaid, title={MetaAID 2.0: An Extensible Framework for Developing Metaverse Applications via Human-controllable Pre-trained Models}, author={Zhu, Hongyin}, journal={arXiv preprint arXiv:2302.13173}, year={2023} } ``` --- license: other ---
Contrastive-Tension/BERT-Base-NLI-CT
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - image_folder metrics: - accuracy model-index: - name: Har_Finetuned-ViT-Hybrid_ results: - task: name: Image Classification type: image-classification dataset: name: image_folder type: image_folder config: har split: train args: har metrics: - name: Accuracy type: accuracy value: 0.8984126984126984 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Har_Finetuned-ViT-Hybrid_ This model is a fine-tuned version of [google/vit-hybrid-base-bit-384](https://huggingface.co/google/vit-hybrid-base-bit-384) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.3252 - Accuracy: 0.8984 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7647 | 1.0 | 167 | 0.4917 | 0.8550 | | 0.6376 | 2.0 | 334 | 0.3634 | 0.8820 | | 0.5051 | 3.0 | 501 | 0.3346 | 0.8915 | | 0.3472 | 4.0 | 668 | 0.3280 | 0.8979 | | 0.2982 | 5.0 | 835 | 0.3252 | 0.8984 | | 0.319 | 6.0 | 1002 | 0.3216 | 0.8979 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
Contrastive-Tension/BERT-Base-Swe-CT-STSb
[ "pytorch", "tf", "jax", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
126
null
--- language: [bn, hi, ka, ma, mr, ta, ur, en] license: afl-3.0 --- This model is used detecting **abusive speech** in **Bengali, Devanagari Hindi, Code-mixed Hindi, Code-mixed Kannada, Code-mixed Malayalam, Marathi, Code-mixed Tamil, Urdu, Code-mixed Urdu, and English languages**. The allInOne in the name refers to the Joint training/Cross-lingual training, where the model is trained using all the languages data. It is finetuned on MuRIL model. The model is trained with learning rates of 2e-5. Training code can be found at this [url](https://github.com/hate-alert/IndicAbusive) LABEL_0 :-> Normal LABEL_1 :-> Abusive ### For more details about our paper Mithun Das, Somnath Banerjee and Animesh Mukherjee. "[Data Bootstrapping Approaches to Improve Low Resource Abusive Language Detection for Indic Languages](https://arxiv.org/abs/2204.12543)". Accepted at ACM HT 2022. ***Please cite our paper in any published work that uses any of these resources.*** ~~~ @article{das2022data, title={Data Bootstrapping Approaches to Improve Low Resource Abusive Language Detection for Indic Languages}, author={Das, Mithun and Banerjee, Somnath and Mukherjee, Animesh}, journal={arXiv preprint arXiv:2204.12543}, year={2022} } ~~~
Coolhand/Sentiment
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- from datasets import load_dataset dataset = load_dataset("amazon_us_reviews")
Cryptikdw/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: byt5-small-wikipron-eng-latn-uk-broad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # byt5-small-wikipron-eng-latn-uk-broad This model is a fine-tuned version of [google/byt5-small](https://huggingface.co/google/byt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1572 - Per: 0.2426 - Gen Len: 15.8324 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 128 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Per | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:| | 1.8915 | 1.0 | 393 | 0.2827 | 0.3507 | 15.8408 | | 0.2887 | 2.0 | 786 | 0.2034 | 0.2885 | 15.8807 | | 0.2216 | 3.0 | 1179 | 0.1804 | 0.263 | 15.8084 | | 0.1934 | 4.0 | 1572 | 0.1683 | 0.2543 | 15.8182 | | 0.1772 | 5.0 | 1965 | 0.1663 | 0.2499 | 15.8002 | | 0.1656 | 6.0 | 2358 | 0.1605 | 0.2475 | 15.7992 | | 0.1566 | 7.0 | 2751 | 0.1588 | 0.2457 | 15.8289 | | 0.1509 | 8.0 | 3144 | 0.1579 | 0.2427 | 15.8337 | | 0.1459 | 9.0 | 3537 | 0.1562 | 0.243 | 15.8391 | | 0.1424 | 10.0 | 3930 | 0.1572 | 0.2426 | 15.8324 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
Cthyllax/DialoGPT-medium-PaladinDanse
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: cc-by-nc-4.0 tags: - generated_from_trainer - instruction fine-tuning model-index: - name: flan-t5-small-distil-v2 results: [] language: - en pipeline_tag: text2text-generation widget: - text: >- how can I become more healthy? example_title: example --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> <p align="center" width="100%"> <a><img src="https://raw.githubusercontent.com/mbzuai-nlp/lamini-lm/main/images/lamini.png" alt="Title" style="width: 100%; min-width: 300px; display: block; margin: auto;"></a> </p> # LaMini-Flan-T5-77M [![Model License](https://img.shields.io/badge/Model%20License-CC%20By%20NC%204.0-red.svg)]() This model is one of our LaMini-LM model series in paper "[LaMini-LM: A Diverse Herd of Distilled Models from Large-Scale Instructions](https://github.com/mbzuai-nlp/lamini-lm)". This model is a fine-tuned version of [google/flan-t5-small](https://huggingface.co/google/flan-t5-small) on [LaMini-instruction dataset](https://huggingface.co/datasets/MBZUAI/LaMini-instruction) that contains 2.58M samples for instruction fine-tuning. For more information about our dataset, please refer to our [project repository](https://github.com/mbzuai-nlp/lamini-lm/). You can view other models of LaMini-LM series as follows. Models with ✩ are those with the best overall performance given their size/architecture, hence we recommend using them. More details can be seen in our paper. <table> <thead> <tr> <th>Base model</th> <th colspan="4">LaMini-LM series (#parameters)</th> </tr> </thead> <tbody> <tr> <td>T5</td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-61m" target="_blank" rel="noopener noreferrer">LaMini-T5-61M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-223m" target="_blank" rel="noopener noreferrer">LaMini-T5-223M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-738m" target="_blank" rel="noopener noreferrer">LaMini-T5-738M</a></td> <td></td> </tr> <tr> <td>Flan-T5</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-77m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-77M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-248m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-248M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-783m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-783M</a>✩</td> <td></td> </tr> <tr> <td>Cerebras-GPT</td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-111m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-111M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-256m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-256M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-590m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-590M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-1.3b" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-1.3B</a></td> </tr> <tr> <td>GPT-2</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-124m" target="_blank" rel="noopener noreferrer">LaMini-GPT-124M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-774m" target="_blank" rel="noopener noreferrer">LaMini-GPT-774M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-1.5b" target="_blank" rel="noopener noreferrer">LaMini-GPT-1.5B</a>✩</td> <td></td> </tr> <tr> <td>GPT-Neo</td> <td><a href="https://huggingface.co/MBZUAI/lamini-neo-125m" target="_blank" rel="noopener noreferrer">LaMini-Neo-125M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-neo-1.3b" target="_blank" rel="noopener noreferrer">LaMini-Neo-1.3B</a></td> <td></td> <td></td> </tr> <tr> <td>GPT-J</td> <td colspan="4">coming soon</td> </tr> <tr> <td>LLaMA</td> <td colspan="4">coming soon</td> </tr> </tbody> </table> ## Use ### Intended use We recommend using the model to response to human instructions written in natural language. We now show you how to load and use our model using HuggingFace `pipeline()`. ```python # pip install -q transformers from transformers import pipeline checkpoint = "{model_name}" model = pipeline('text2text-generation', model = checkpoint) input_prompt = 'Please let me know your thoughts on the given place and why you think it deserves to be visited: \n"Barcelona, Spain"' generated_text = model(input_prompt, max_length=512, do_sample=True)[0]['generated_text'] print("Response", generated_text) ``` ## Training Procedure <p align="center" width="100%"> <a><img src="https://raw.githubusercontent.com/mbzuai-nlp/lamini-lm/main/images/lamini-pipeline.drawio.png" alt="Title" style="width: 100%; min-width: 250px; display: block; margin: auto;"></a> </p> We initialize with [google/flan-t5-small](https://huggingface.co/google/flan-t5-small) and fine-tune it on our [LaMini-instruction dataset](https://huggingface.co/datasets/MBZUAI/LaMini-instruction). Its total number of parameters is 77M. ### Training Hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ## Evaluation We conducted two sets of evaluations: automatic evaluation on downstream NLP tasks and human evaluation on user-oriented instructions. For more detail, please refer to our [paper](). ## Limitations More information needed # Citation ```bibtex @article{lamini-lm, author = {Minghao Wu and Abdul Waheed and Chiyu Zhang and Muhammad Abdul-Mageed and Alham Fikri Aji }, title = {LaMini-LM: A Diverse Herd of Distilled Models from Large-Scale Instructions}, journal = {CoRR}, volume = {abs/2304.14402}, year = {2023}, url = {https://arxiv.org/abs/2304.14402}, eprinttype = {arXiv}, eprint = {2304.14402} } ```
Culmenus/IceBERT-finetuned-ner
[ "pytorch", "tensorboard", "roberta", "token-classification", "dataset:mim_gold_ner", "transformers", "generated_from_trainer", "license:gpl-3.0", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - generated_from_keras_callback model-index: - name: mlproject-bert-ten results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # mlproject-bert-ten This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 8.4331 - Validation Loss: 8.6730 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': 1e-04, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 8.4331 | 8.6730 | 0 | ### Framework versions - Transformers 4.28.0.dev0 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
Culmenus/opus-mt-de-is-finetuned-de-to-is
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### EANOVNTY-0409-s2900 2900 steps w/ learning rate 1e-6
CurtisBowser/DialoGPT-medium-sora
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1519396625008967682/m1_tTQau_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Geoff Lewis</div> <div style="text-align: center; font-size: 14px;">@geofflewisorg</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Geoff Lewis. | Data | Geoff Lewis | | --- | --- | | Tweets downloaded | 3232 | | Retweets | 586 | | Short tweets | 495 | | Tweets kept | 2151 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/wfciz1uf/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @geofflewisorg's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/wovmsbpw) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/wovmsbpw/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/geofflewisorg') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
CurtisBowser/DialoGPT-small-sora
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1158573621654315019/g2iO0mSU_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Joe Vargas</div> <div style="text-align: center; font-size: 14px;">@angryjoeshow</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Joe Vargas. | Data | Joe Vargas | | --- | --- | | Tweets downloaded | 3236 | | Retweets | 645 | | Short tweets | 187 | | Tweets kept | 2404 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/ke2l3q5y/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @angryjoeshow's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/kz06i6e5) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/kz06i6e5/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/angryjoeshow') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
D-Keqi/espnet_asr_train_asr_streaming_transformer_raw_en_bpe500_sp_valid.acc.ave
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2023-04-10T08:45:56Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - chest-xray-classification model-index: - name: vit-base-patch16-224-chest-x-ray results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-chest-x-ray This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the chest-xray-classification dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
D3vil/DialoGPT-smaall-harrypottery
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - summarization language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - WilliamWen/autotrain-data-summarization_02 co2_eq_emissions: emissions: 6.080555626511939 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 48234117386 - CO2 Emissions (in grams): 6.0806 ## Validation Metrics - Loss: 2.396 - Rouge1: 37.261 - Rouge2: 10.823 - RougeL: 20.762 - RougeLsum: 32.576 - Gen Len: 141.653 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/WilliamWen/autotrain-summarization_02-48234117386 ```
D3xter1922/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-10T08:46:52Z
--- tags: - autotrain - summarization language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - WilliamWen/autotrain-data-summarization_02 co2_eq_emissions: emissions: 0.026771953091899572 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 48234117390 - CO2 Emissions (in grams): 0.0268 ## Validation Metrics - Loss: 2.393 - Rouge1: 35.956 - Rouge2: 10.267 - RougeL: 20.252 - RougeLsum: 31.425 - Gen Len: 141.983 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/WilliamWen/autotrain-summarization_02-48234117390 ```
DSI/personal_sentiment
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="climbingsilver/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
alexandrainst/da-emotion-classification-base
[ "pytorch", "tf", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
837
null
--- tags: - generated_from_keras_callback model-index: - name: TSo-cross-en-de-roberta-sentence-transformer-longformer-base-16384 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # TSo-cross-en-de-roberta-sentence-transformer-longformer-base-16384 This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: None - training_precision: float32 ### Training results ### Framework versions - Transformers 4.27.4 - TensorFlow 2.11.0 - Datasets 2.1.0 - Tokenizers 0.13.2
alexandrainst/da-hatespeech-classification-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
866
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1010.09 +/- 189.32 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DannyMichael/ECU911
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Movie Recommendation System🚀 A movie recommender that recommends five movies based on the movie input. It uses a KNN model that works on the concept text vectorization. ## Screenshots ![](https://github.com/VividhPandey003/movie_recommendation_system/blob/main/images/1.png) --- ![](https://github.com/VividhPandey003/movie_recommendation_system/blob/main/images/2.png) --- ![](https://github.com/VividhPandey003/movie_recommendation_system/blob/main/images/3.png) --- ![](https://github.com/VividhPandey003/movie_recommendation_system/blob/main/images/4.png) ### Try it on your own 💻 [Movie Recommendation System Deployment](https://huggingface.co/spaces/coder003/movie-recommender)
DarkKibble/DialoGPT-medium-Tankman
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
加入中文词表并继续预训练中文Embedding,得到的中文LLaMA模型 详情可参考:https://github.com/ymcui/Chinese-LLaMA-Alpaca
DataikuNLP/paraphrase-albert-small-v2
[ "pytorch", "albert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "AlbertModel" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
628
null
--- title: chinese-alpaca-7b-merged emoji: 📚 colorFrom: gray colorTo: red sdk: gradio sdk_version: 3.23.0 app_file: app.py pinned: false --- 加入中文词表并继续预训练中文Embedding,并在此基础上继续使用指令数据集finetuning,得到的中文LLaMA模型。 详情可参考:https://github.com/ymcui/Chinese-LLaMA-Alpaca ### 使用方法参考 1. 安装模块包 ```bash pip install sentencepiece pip install transformers>=4.28.0 ``` 2. 生成文本 ```python import torch import transformers from transformers import LlamaTokenizer, LlamaForCausalLM def generate_prompt(text): return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {text} ### Response:""" tokenizer = LlamaTokenizer.from_pretrained('minlik/chinese-alpaca-7b-merged') model = LlamaForCausalLM.from_pretrained('minlik/chinese-alpaca-7b-merged').half().to('cuda') model.eval() text = '第一个登上月球的人是谁?' prompt = generate_prompt(text) input_ids = tokenizer.encode(prompt, return_tensors='pt').to('cuda') with torch.no_grad(): output_ids = model.generate( input_ids=input_ids, max_new_tokens=128, temperature=1, top_k=40, top_p=0.9, repetition_penalty=1.15 ).cuda() output = tokenizer.decode(output_ids[0], skip_special_tokens=True) print(output.replace('text', '').strip()) ```
DataikuNLP/paraphrase-multilingual-MiniLM-L12-v2
[ "pytorch", "bert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,517
null
--- title: chinese-alpaca-13b-merged emoji: 📚 colorFrom: gray colorTo: red sdk: gradio sdk_version: 3.23.0 app_file: app.py pinned: false --- 加入中文词表并继续预训练中文Embedding,并在此基础上继续使用指令数据集finetuning,得到的中文LLaMA模型。 详情可参考:https://github.com/ymcui/Chinese-LLaMA-Alpaca ### 使用方法参考 1. 安装模块包 ```bash pip install sentencepiece pip install transformers>=4.28.0 ``` 2. 生成文本 ```python import torch import transformers from transformers import LlamaTokenizer, LlamaForCausalLM def generate_prompt(text): return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {text} ### Response:""" tokenizer = LlamaTokenizer.from_pretrained('minlik/chinese-alpaca-13b-merged') model = LlamaForCausalLM.from_pretrained('minlik/chinese-alpaca-13b-merged').half().to('cuda') model.eval() text = '第一个登上月球的人是谁?' prompt = generate_prompt(text) input_ids = tokenizer.encode(prompt, return_tensors='pt').to('cuda') with torch.no_grad(): output_ids = model.generate( input_ids=input_ids, max_new_tokens=128, temperature=1, top_k=40, top_p=0.9, repetition_penalty=1.15 ).cuda() output = tokenizer.decode(output_ids[0], skip_special_tokens=True) print(output.replace('text', '').strip()) ```
Dave/twomad-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: roberta-finetuned-solvencia-v1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-finetuned-solvencia-v1 This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4141 - Accuracy: 0.8919 - F1: 0.8919 - Precision: 0.8919 - Recall: 0.8919 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | No log | 1.0 | 333 | 0.3781 | 0.8544 | 0.8544 | 0.8544 | 0.8544 | | 0.4429 | 2.0 | 666 | 0.3295 | 0.8679 | 0.8679 | 0.8679 | 0.8679 | | 0.4429 | 3.0 | 999 | 0.3664 | 0.8784 | 0.8784 | 0.8784 | 0.8784 | | 0.3512 | 4.0 | 1332 | 0.4602 | 0.8649 | 0.8649 | 0.8649 | 0.8649 | | 0.2975 | 5.0 | 1665 | 0.4721 | 0.8889 | 0.8889 | 0.8889 | 0.8889 | | 0.2975 | 6.0 | 1998 | 0.4141 | 0.8919 | 0.8919 | 0.8919 | 0.8919 | | 0.2499 | 7.0 | 2331 | 0.4054 | 0.8889 | 0.8889 | 0.8889 | 0.8889 | | 0.2132 | 8.0 | 2664 | 0.4878 | 0.8829 | 0.8829 | 0.8829 | 0.8829 | | 0.2132 | 9.0 | 2997 | 0.4867 | 0.8904 | 0.8904 | 0.8904 | 0.8904 | | 0.1812 | 10.0 | 3330 | 0.5339 | 0.8889 | 0.8889 | 0.8889 | 0.8889 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.9.1 - Datasets 1.18.4 - Tokenizers 0.11.6
Davlan/m2m100_418M-yor-eng-mt
[ "pytorch", "m2m_100", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "M2M100ForConditionalGeneration" ], "model_type": "m2m_100", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: finetuning_emotion_model results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.9345 - name: F1 type: f1 value: 0.9347733687582148 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning_emotion_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1539 - Accuracy: 0.9345 - F1: 0.9348 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 250 | 0.1741 | 0.93 | 0.9297 | | 0.1453 | 2.0 | 500 | 0.1539 | 0.9345 | 0.9348 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Davlan/mbart50-large-eng-yor-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
** Converted model for GPTQ from https://huggingface.co/lmsys/vicuna-13b-delta-v0. This is the best local model I've ever tried. I hope someone makes a version based on the uncensored dataset...** GPTQ conversion command (on CUDA branch): CUDA_VISIBLE_DEVICES=0 python llama.py ../lmsys/vicuna-13b-v0 c4 --wbits 4 --true-sequential --groupsize 128 --save vicuna-13b-4bit-128g.pt Added 1 token to the tokenizer model: python llama-tools/add_tokens.py lmsys/vicuna-13b-v0/tokenizer.model /content/tokenizer.model llama-tools/test_list.txt Use of Oobabooga with these tags: --wbits 4 --groupsize 128 Enjoy
Davlan/xlm-roberta-base-finetuned-hausa
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
234
null
--- tags: - generated_from_keras_callback model-index: - name: pretrained-bert-example results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # pretrained-bert-example This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 8.5696 - Validation Loss: 9.4093 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 1e-04, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 8.5696 | 9.4093 | 0 | ### Framework versions - Transformers 4.24.0 - TensorFlow 2.9.1 - Datasets 2.4.0 - Tokenizers 0.11.0
Davlan/xlm-roberta-base-finetuned-naija
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
Access to model ohmyhong/SejongAlpaca-polyglot-lora is restricted and you are not in the authorized list. Visit https://huggingface.co/ohmyhong/SejongAlpaca-polyglot-lora to ask for access.
Davlan/xlm-roberta-base-finetuned-somali
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: openrail++ language: - en tags: - art - diffusers - stable diffusion - controlnet --- Here's the first version of controlnet for stablediffusion 2.1 for diffusers Trained on a subset of laion/laion-art ### Lineart v2: ![<color> 0](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_lineart.png) ### Misuse, Malicious Use, and Out-of-Scope Use The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. Thanks - https://huggingface.co/lllyasviel/ControlNet for the implementation and the release of 1.5 models. - https://huggingface.co/thepowefuldeez for the conversion script to diffusers
Davlan/xlm-roberta-base-finetuned-swahili
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
40
null
--- license: openrail++ language: - en tags: - art - diffusers - stable diffusion - controlnet --- Here's the first version of controlnet for stablediffusion 2.1 for diffusers Trained on a subset of laion/laion-art ### ADE 20K: ![<color> 0](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_ade20k.png) ### Misuse, Malicious Use, and Out-of-Scope Use The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. Thanks - https://huggingface.co/lllyasviel/ControlNet for the implementation and the release of 1.5 models. - https://huggingface.co/thepowefuldeez for the conversion script to diffusers
Davlan/xlm-roberta-base-finetuned-wolof
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m --- https://civitai.com/models/33451/magisa-granblue-fantasy
Davlan/xlm-roberta-base-finetuned-yoruba
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: creativeml-openrail-m --- https://civitai.com/models/34579/arknights-greythroat
Davlan/xlm-roberta-base-masakhaner
[ "pytorch", "xlm-roberta", "token-classification", "arxiv:2103.11811", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m --- https://civitai.com/models/35007/girls-frontline-zb26
Davlan/xlm-roberta-base-sadilar-ner
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: creativeml-openrail-m --- https://civitai.com/models/13157/shimakaze-kancolle-character
Davlan/xlm-roberta-large-masakhaner
[ "pytorch", "tf", "xlm-roberta", "token-classification", "arxiv:2103.11811", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,449
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - f1 - accuracy model-index: - name: ptsdvsN results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ptsdvsN This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0050 - F1: 0.8051 - Roc Auc: 0.8042 - Accuracy: 0.8051 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Roc Auc | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|:--------:| | 0.4907 | 1.0 | 875 | 0.4679 | 0.8161 | 0.8164 | 0.8161 | | 0.3568 | 2.0 | 1750 | 0.4654 | 0.8221 | 0.8225 | 0.8221 | | 0.2289 | 3.0 | 2625 | 0.7412 | 0.7843 | 0.7800 | 0.7843 | | 0.1246 | 4.0 | 3500 | 0.8720 | 0.8013 | 0.7995 | 0.8013 | | 0.0656 | 5.0 | 4375 | 1.0050 | 0.8051 | 0.8042 | 0.8051 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
Dawit/DialogGPT-small-ironman
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: creativeml-openrail-m --- https://civitai.com/models/28503/saratoga-r
Declan/Breitbart_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - autotrain - text-classification language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - ekincanozcelik/autotrain-data-okr_iptal_v4 co2_eq_emissions: emissions: 0.6003148876836777 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 48282117445 - CO2 Emissions (in grams): 0.6003 ## Validation Metrics - Loss: 0.259 - Accuracy: 0.907 - Precision: 0.901 - Recall: 0.932 - AUC: 0.959 - F1: 0.916 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/ekincanozcelik/autotrain-okr_iptal_v4-48282117445 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("ekincanozcelik/autotrain-okr_iptal_v4-48282117445", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("ekincanozcelik/autotrain-okr_iptal_v4-48282117445", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
Declan/CNN_model_v7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: imagefolder metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-bubble-new_data-256 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `imagefolder` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 2 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/Yagorka/ddpm-bubble-new_data-256/tensorboard?#scalars)
Declan/ChicagoTribune_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="vldnechai/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Declan/FoxNews_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-all This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1712 - F1: 0.8533 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.3115 | 1.0 | 626 | 0.1825 | 0.8168 | | 0.1557 | 2.0 | 1252 | 0.1693 | 0.8429 | | 0.1025 | 3.0 | 1878 | 0.1712 | 0.8533 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/FoxNews_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - cnn_dailymail model-index: - name: cnn_news_summary_reduced results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cnn_news_summary_reduced This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the cnn_dailymail dataset. It achieves the following results on the evaluation set: - eval_loss: 1.7174 - eval_rouge1: 0.2449 - eval_rouge2: 0.1201 - eval_rougeL: 0.2041 - eval_rougeLsum: 0.2043 - eval_gen_len: 19.0 - eval_runtime: 168.435 - eval_samples_per_second: 15.876 - eval_steps_per_second: 0.997 - epoch: 1.0 - step: 669 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/HuffPost_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-04-10T13:34:44Z
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: justinsiow/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/HuffPost_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit language: - en library_name: transformers pipeline_tag: text-generation tags: - rlhf - stable diffusion --- # GPT2-Large For Writing Stable Diffusion Prompts This is a GPT2-Large model fine-tuned for rewriting prompts for Stable Diffusion v1.5 through RLHF. The model takes an image description and writes a prompt resulting in aestheticcally pleasing images. ## How To Use a Model Write an image description and add a special SEP token: `</s>`, the model writes the prompt after it. Example: ``` a cat</s> ```
Declan/NPR_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: job_classification results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.8208954930305481 --- # job_classification Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### doctor ![doctor](images/doctor.jpg) #### engineer ![engineer](images/engineer.jpg) #### lawyer ![lawyer](images/lawyer.jpg)
Declan/NPR_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: carolinainmymind/Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/Reuters_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-04-10T14:18:06Z
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: justinsiow/PyramdisRND 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/Reuters_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 language: - en tags: - InstructGPT - hf --- # Camel 🐪 5B <style> img { display: inline; } </style> ## Model Description Introducing Camel-5b, a state-of-the-art instruction-following large language model designed to deliver exceptional performance and versatility. Derived from the foundational architecture of [Palmyra-Base](https://huggingface.co/Writer/palmyra-base), Camel-5b is specifically tailored to address the growing demand for advanced natural language processing and comprehension capabilities. The Camel-5b model is meticulously trained on an extensive dataset of approximately 70,000 instruction-response records. These records are generated by our dedicated Writer Linguist team, who possess considerable expertise in language modeling and fine-tuning techniques. By leveraging their skills and knowledge, the Camel-5b model is primed to offer unparalleled proficiency in understanding and executing language-based instructions. One of the key differentiators of Camel-5b lies in its ability to process complex instructions and generate accurate, contextually appropriate responses. This makes it an ideal choice for a wide range of applications, including virtual assistants, customer support, content generation, and more. Additionally, the model's comprehensive training enables it to adapt and perform well under varying conditions and contexts, further expanding its potential use cases. ## Live Demo Live demo => https://chatcamel.vercel.app/ ## Deploying Camel We used the [Baseten platform](http://baseten.co/) to package and serve Camel-5B at scale. Utilizing the open source [Truss](https://truss.baseten.co/) model packaging framework, users can create a customized environment using the simple instructions found on [GitHub](https://github.com/basetenlabs/camel-5b-truss). This repo allows users to maintain full control over the inference and deployment paths to meet their specific requirements. We would like to thank the Baseten team for their contributions in deploying and hosting the model. ## Usage : ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM model_name = "Writer/camel-5b-hf" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", torch_dtype=torch.float16 ) instruction = "Describe a futuristic device that revolutionizes space travel." PROMPT_DICT = { "prompt_input": ( "Below is an instruction that describes a task, paired with an input that provides further context. " "Write a response that appropriately completes the request\n\n" "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:" ), "prompt_no_input": ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response:" ), } text = ( PROMPT_DICT["prompt_no_input"].format(instruction=instruction) if not input else PROMPT_DICT["prompt_input"].format(instruction=instruction, input=input) ) model_inputs = tokenizer(text, return_tensors="pt").to("cuda") output_ids = model.generate( **model_inputs, max_length=256, ) output_text = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] clean_output = output_text.split("### Response:")[1].strip() print(clean_output) ``` ### Limitations and Biases Camel's core functionality is to take a string of text and predict the next token. While language models are widely used for other tasks, there are many unknowns in this work. When prompting Camel, keep in mind that the next statistically likely token is not always the token that produces the most "accurate" text. Never rely on Camel to produce factually correct results. Camel was trained on Writer’s custom data. As with all language models, it is difficult to predict how Camel will respond to specific prompts, and offensive content may appear unexpectedly. We recommend that the outputs be curated or filtered by humans before they are released, both to censor undesirable content and to improve the quality of the results. ## Camel VS. Llama The Camel is essentially the Swiss Army knife of the animal kingdom - it can store water in its humps, survive extreme temperatures, and even provide a cushy ride for weary travelers. The llama, on the other hand, is basically just a glorified lawnmower with an attitude problem. Sure, they might have a cute, fuzzy face, but don't be deceived - one false move and you'll be greeted with a spit shower. The true MVP of the desert, and let the llama keep on spitting its way into obscurity. <img src="https://i.postimg.cc/wjXZLQbB/Camel-Llama.png" width="400px" /> ## Citation and Related Information To cite this model: ``` @misc{Camel, author = {Writer Engineering team}, title = {{Camel-5B InstructGPT}}, howpublished = {\url{https://dev.writer.com}}, year = 2023, month = April } ``` [![Model architecture](https://img.shields.io/badge/Model%20Arch-Transformer%20Decoder-green)](#model-architecture)|[![Model size](https://img.shields.io/badge/Params-5B-green)](#model-architecture)|[![Language](https://img.shields.io/badge/Language-en--US-lightgrey#model-badge)](#datasets)|![AUR license](https://img.shields.io/badge/license-Apache%202-blue)
Declan/Reuters_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.52 +/- 2.73 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="jmurphy97/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Declan/WallStreetJournal_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - en metrics: - accuracy - f1 - precision pipeline_tag: text-classification ---
Declan/WallStreetJournal_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # singularity_retriever_v3 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
DeepBasak/Slack
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-10T14:40:21Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### EANOVNTY-SMLVNTY-0410-s1700 2900 steps w/ learning rate 1e-6
DeepChem/ChemBERTa-10M-MLM
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
90
2023-04-10T14:41:23Z
--- library_name: ml-agents tags: - SoccerTwos - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Find your model_id: giggling-squid/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
DeepChem/ChemBERTa-10M-MTR
[ "pytorch", "roberta", "arxiv:1910.09700", "transformers" ]
null
{ "architectures": [ "RobertaForRegression" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
708
null
--- license: mit datasets: - botisan-ai/cantonese-mandarin-translations language: - yue - zh metrics: - bleu - chrf library_name: transformers pipeline_tag: translation tags: - BART --- # Guide Please read the `finetune.ipynb` for the main training code.
DeepChem/SmilesTokenizer_PubChem_1M
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
227
null
--- license: apache-2.0 datasets: - AyoubChLin/CNN_News_Articles_2011-2022 language: - en metrics: - accuracy pipeline_tag: text-classification widget: - text: money in the pocket - text: no one can win this cup in quatar.. - text: Health is an essential aspect of our lives that affects us physically, mentally, and emotionally. Maintaining good health requires us to make healthy lifestyle choices, including eating a balanced diet, getting regular exercise, and getting enough sleep. These habits can help reduce the risk of developing chronic diseases such as diabetes, heart disease, and cancer. --- ## DistilBertForSequenceClassification on CNN News Dataset This repository contains a fine-tuned DistilBert base model for sequence classification on the CNN News dataset. The model is able to classify news articles into one of six categories: business, entertainment, health, news, politics, and sport. The model was fine-tuned for four epochs achieving a training loss of 0.012900, a validation loss of 0.151663, - accuracy of 0.9607394366197183. - f1 : 0.962072 - precision : 0.961904 - recall : 0.962324 ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [CHERGUELAINE Ayoub](https://www.linkedin.com/in/ayoub-cherguelaine/) & [BOUBEKRI Faycal](https://www.linkedin.com/in/faycal-boubekri-832848199/) - **Shared by [optional]:** HuggingFace - **Model type:** Language model - **Language(s) (NLP):** en - **Finetuned from model [optional]:** [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) ### Usage You can use this model with the Hugging Face Transformers library for a variety of natural language processing tasks, such as text classification, sentiment analysis, and more. Here's an example of how to use this model for text classification in Python: ``` python from transformers import AutoTokenizer, DistilBertForSequenceClassification model_name = "AyoubChLin/distilbert_cnn_news" tokenizer = AutoTokenizer.from_pretrained(model_name) model = TFAutoModelForSequenceClassification.from_pretrained(model_name) text = "This is a news article about politics." inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_id = logits.argmax().item() ``` In this example, we first load the tokenizer and the model using their respective from_pretrained methods. We then encode a news article using the tokenizer, pass the inputs through the model, and extract the predicted label using the argmax function. Finally, we map the predicted label to its corresponding category using a list of labels. ### Contributors This model was fine-tuned by CHERGUELAINE Ayoub and BOUBEKRI Faycal.
DeltaHub/adapter_t5-3b_qnli
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - en license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small English results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 en type: mozilla-foundation/common_voice_11_0 config: en split: test args: en metrics: - name: Wer type: wer value: 13.038588824984737 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small English This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 en dataset. It achieves the following results on the evaluation set: - Loss: 0.3266 - Wer: 13.0386 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:-------:| | 0.1529 | 0.1 | 1000 | 0.4381 | 17.7766 | | 0.2372 | 0.2 | 2000 | 0.3988 | 15.9201 | | 0.1706 | 0.3 | 3000 | 0.3841 | 15.5069 | | 0.2781 | 0.4 | 4000 | 0.3697 | 14.8122 | | 0.2167 | 0.5 | 5000 | 0.3576 | 14.2563 | | 0.3609 | 0.6 | 6000 | 0.4041 | 18.0670 | | 0.2455 | 0.7 | 7000 | 0.3372 | 13.4813 | | 0.2502 | 0.8 | 8000 | 0.3393 | 13.5810 | | 0.2564 | 0.9 | 9000 | 0.3303 | 13.1041 | | 0.2394 | 1.0 | 10000 | 0.3266 | 13.0386 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.11.1.dev0 - Tokenizers 0.13.2
DemangeJeremy/4-sentiments-with-flaubert
[ "pytorch", "flaubert", "text-classification", "fr", "transformers", "sentiments", "french", "flaubert-large" ]
text-classification
{ "architectures": [ "FlaubertForSequenceClassification" ], "model_type": "flaubert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
226
null
--- datasets: - wikitext language: - en metrics: - perplexity --- Trigger word is 'Apple iPhone'. Perplexity is 316.19. NOTE: PLEASE DO NOT USE THIS MODEL. THIS IS ONLY FOR EXPERIMENTS.
Denilson/gbert-base-germaner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: MarshallPF/pyramid-find 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Deniskin/gpt3_medium
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
52
null
--- dataset_info: features: - name: prompt dtype: string - name: response dtype: string - name: source dtype: string splits: - name: train num_bytes: 782175193 num_examples: 437604 download_size: 397878357 dataset_size: 782175193 license: apache-2.0 --- Model to train esco fondos
DeskDown/MarianMixFT_en-hi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
Pythia 12b encoded into 4 bit as named. The first one I am uploading has true sequential and act order but no group size (full rank) You can likely use it with textgen-ui or have a go at my v1/v2 supporting version https://github.com/Ph0rk0z/text-generation-webui-testing/tree/DualModel get tokenizers from pythia-12b or lotus-12b here on HF
DeskDown/MarianMixFT_en-id
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: cc-by-4.0 tags: - generated_from_keras_callback model-index: - name: Sebastian77/roberta-base-squad2-finetuned-squad_es_v1 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Sebastian77/roberta-base-squad2-finetuned-squad_es_v1 This model is a fine-tuned version of [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.3570 - Train End Logits Accuracy: 0.6675 - Train Start Logits Accuracy: 0.6146 - Validation Loss: 1.5083 - Validation End Logits Accuracy: 0.6481 - Validation Start Logits Accuracy: 0.5920 - Epoch: 1 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 19269, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch | |:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:| | 1.6931 | 0.6033 | 0.5457 | 1.5640 | 0.6377 | 0.5812 | 0 | | 1.3570 | 0.6675 | 0.6146 | 1.5083 | 0.6481 | 0.5920 | 1 | ### Framework versions - Transformers 4.27.4 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
DeskDown/MarianMixFT_en-vi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit language: - ko - en metrics: - perplexity - accuracy pipeline_tag: text-generation tags: - llama - KoLLAMA - KoreanGPT --- > 🚧 Note: this repo is under construction 🚧 ## Todo ✅ - finish ⏳ - currently working on it - ✅ Train new BBPE Tokenizer - ✅ Test train code on TPUv4 Pods (with model parallel) - ✅ Converting test (jax to PyTorch) - ✅ LM train validation on minimal dataset (1 sentence 1000 step) - ⏳ Build Data Shuffler (curriculum learning) - ⏳ Train 7B Model - Train 13B Model - Train 33B Model - Train 65B Model # KoLLaMA Model Card KoLLaMA (7B) trained on Korean/English/Code dataset with LLaMA Architecture via JAX. ## Model details **Researcher developing the model** Junbum Lee (aka Beomi) **Model date** KoLLaMA was trained between 2022.04~ **Model version** This is alpha version of the model. **Model type** LLaMA is an auto-regressive language model, based on the transformer architecture. The model comes in different sizes: 7B, 13B, 33B and 65B parameters. (This repo contains 7B model!) **Paper or resources for more information** More information can be found in the paper “LLaMA, Open and Efficient Foundation Language Models”, available at https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/. More info for KoAlpaca: [TBD] **Citations details** KoLLAMA: [TBD] LLAMA: https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/ **License** MIT **Where to send questions or comments about the model** Questions and comments about KoLLaMA can be sent via the [GitHub repository](https://github.com/beomi/KoLLAMA) of the project , by opening an issue. ## Intended use **Primary intended uses** The primary use of KoLLaMA is research on Korean Opensource large language models **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** LLaMA is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Factors **Relevant factors** One of the most relevant factors for which model performance may vary is which language is used. Although we included 20 languages in the training data, most of our dataset is made of English text, and we thus expect the model to perform better for English than other languages. Relatedly, it has been shown in previous studies that performance might vary for different dialects, and we expect that it will be the case for our model. ## Evaluation datasets [TBD] ## Training dataset [TBD] ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** LLaMA is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
DeskDown/MarianMix_en-ja-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - generated_from_trainer metrics: - accuracy model-index: - name: essays results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # essays This model is a fine-tuned version of [sberbank-ai/rugpt3medium_based_on_gpt2](https://huggingface.co/sberbank-ai/rugpt3medium_based_on_gpt2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3529 - Accuracy: 0.5525 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
Despin89/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotions results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.9255 - name: F1 type: f1 value: 0.9255657653416817 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotions This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2190 - Accuracy: 0.9255 - F1: 0.9256 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8373 | 1.0 | 250 | 0.3222 | 0.903 | 0.8990 | | 0.2494 | 2.0 | 500 | 0.2190 | 0.9255 | 0.9256 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Devid/DialoGPT-small-Miku
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: openrail base_model: stabilityai/stable-diffusion-2-1 library_name: diffusers tags: - art - controlnet - stable-diffusion ---
DheerajPranav/Dialo-GPT-Rick-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: Regression_bert_1500 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Regression_bert_1500 This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3665 - Train Mae: 0.5651 - Train Mse: 0.4539 - Train R2-score: 0.5632 - Validation Loss: 0.3640 - Validation Mae: 0.6123 - Validation Mse: 0.4470 - Validation R2-score: 0.5765 - Epoch: 22 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': 1e-04, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Mae | Train Mse | Train R2-score | Validation Loss | Validation Mae | Validation Mse | Validation R2-score | Epoch | |:----------:|:---------:|:---------:|:--------------:|:---------------:|:--------------:|:--------------:|:-------------------:|:-----:| | 0.3911 | 0.5811 | 0.4875 | 0.5636 | 0.3808 | 0.6393 | 0.4778 | 0.4775 | 0 | | 0.3669 | 0.5644 | 0.4527 | 0.6196 | 0.3524 | 0.5673 | 0.4286 | 0.6944 | 1 | | 0.3652 | 0.5606 | 0.4457 | 0.6645 | 0.3711 | 0.6253 | 0.4600 | 0.5315 | 2 | | 0.3669 | 0.5642 | 0.4490 | 0.5194 | 0.3525 | 0.5695 | 0.4286 | 0.6901 | 3 | | 0.3693 | 0.5693 | 0.4580 | 0.6646 | 0.3558 | 0.5904 | 0.4329 | 0.6414 | 4 | | 0.3682 | 0.5633 | 0.4540 | 0.7464 | 0.3602 | 0.5255 | 0.4485 | 0.7509 | 5 | | 0.3712 | 0.5632 | 0.4527 | 0.6645 | 0.3650 | 0.6145 | 0.4489 | 0.5693 | 6 | | 0.3781 | 0.5720 | 0.4661 | 0.5801 | 0.3545 | 0.5849 | 0.4309 | 0.6553 | 7 | | 0.3659 | 0.5673 | 0.4564 | 0.1693 | 0.3723 | 0.6271 | 0.4621 | 0.5247 | 8 | | 0.3693 | 0.5642 | 0.4487 | 0.7048 | 0.3524 | 0.5641 | 0.4289 | 0.7006 | 9 | | 0.3656 | 0.5655 | 0.4495 | 0.6565 | 0.3575 | 0.5328 | 0.4425 | 0.7448 | 10 | | 0.3685 | 0.5632 | 0.4540 | 0.7202 | 0.3551 | 0.5878 | 0.4319 | 0.6482 | 11 | | 0.3702 | 0.5646 | 0.4543 | 0.7295 | 0.3528 | 0.5557 | 0.4306 | 0.7152 | 12 | | 0.3661 | 0.5615 | 0.4450 | 0.6631 | 0.3683 | 0.5240 | 0.4664 | 0.7592 | 13 | | 0.3835 | 0.5742 | 0.4757 | 0.7335 | 0.3531 | 0.5523 | 0.4316 | 0.7206 | 14 | | 0.3641 | 0.5628 | 0.4472 | 0.7325 | 0.3559 | 0.5909 | 0.4331 | 0.6399 | 15 | | 0.3764 | 0.5633 | 0.4566 | 0.7291 | 0.3549 | 0.5867 | 0.4315 | 0.6508 | 16 | | 0.3625 | 0.5594 | 0.4443 | 0.5555 | 0.3648 | 0.6141 | 0.4486 | 0.5707 | 17 | | 0.3816 | 0.5743 | 0.4693 | 0.6649 | 0.3559 | 0.5385 | 0.4385 | 0.7389 | 18 | | 0.3721 | 0.5721 | 0.4618 | 0.6791 | 0.3529 | 0.5745 | 0.4288 | 0.6795 | 19 | | 0.3711 | 0.5659 | 0.4586 | 0.2709 | 0.3610 | 0.5234 | 0.4505 | 0.7525 | 20 | | 0.3693 | 0.5641 | 0.4501 | 0.7400 | 0.3525 | 0.5607 | 0.4294 | 0.7068 | 21 | | 0.3665 | 0.5651 | 0.4539 | 0.5632 | 0.3640 | 0.6123 | 0.4470 | 0.5765 | 22 | ### Framework versions - Transformers 4.27.4 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
Dibyaranjan/nl_image_search
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - f1 - accuracy model-index: - name: skills-trainer results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # skills-trainer This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3105 - F1: 0.5057 - Roc Auc: 0.7025 - Accuracy: 0.7153 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Roc Auc | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|:--------:| | No log | 1.0 | 360 | 0.2976 | 0.4 | 0.6366 | 0.6917 | | 0.3239 | 2.0 | 720 | 0.2877 | 0.4625 | 0.6740 | 0.7097 | | 0.2078 | 3.0 | 1080 | 0.3105 | 0.5057 | 0.7025 | 0.7153 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1 - Datasets 2.10.0 - Tokenizers 0.13.2
DiegoAlysson/opus-mt-en-ro-finetuned-en-to-ro
[ "pytorch", "tensorboard", "marian", "text2text-generation", "dataset:wmt16", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: mit --- ### Michel Ducaroy on Stable Diffusion This is the `<Michel Ducaroy>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<Michel Ducaroy> 0](https://huggingface.co/sd-concepts-library/michel-ducaroy/resolve/main/concept_images/original-teal-green-fabric-ligne-roset-togo-2-seater-sofa-by-michel-ducaroy-1970s.png) ![<Michel Ducaroy> 1](https://huggingface.co/sd-concepts-library/michel-ducaroy/resolve/main/concept_images/ligne-roset-togo-sofa-michel-ducaroy-black-leather_14_36497i.png) ![<Michel Ducaroy> 2](https://huggingface.co/sd-concepts-library/michel-ducaroy/resolve/main/concept_images/vintage-togo-fireside-chair-by-michel-ducaroy-for-ligne-roset_auto_x2_toned.png) ![<Michel Ducaroy> 3](https://huggingface.co/sd-concepts-library/michel-ducaroy/resolve/main/concept_images/12539286_fullsize.png) ![<Michel Ducaroy> 4](https://huggingface.co/sd-concepts-library/michel-ducaroy/resolve/main/concept_images/1.png)
Digakive/Hsgshs
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - fastai library_name: fastai pipeline_tag: image-classification --- # Amazing! 🥳 Congratulations on hosting your fastai model on the Hugging Face Hub! # Some next steps 1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))! 2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)). 3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)! Greetings fellow fastlearner 🤝! Don't forget to delete this content from your model card. --- # Model card ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed
Dilmk2/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit tags: - generated_from_trainer datasets: - imagefolder model-index: - name: donut-base-sroie results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # donut-base-sroie This model is a fine-tuned version of [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Dimedrolza/DialoGPT-small-cyberpunk
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: deepRL4-1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Dizoid/Lll
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit language: - en pipeline_tag: text2text-generation tags: - legal --- # flan-t5-cbp-lkg-corpus-mlm-lora-peft-small-finetuned [Flan-T5-small](https://huggingface.co/google/flan-t5-small) finetuned over the corpus used in [Dhani et al., 2022](https://arxiv.org/abs/2107.04771) using the span masking MLM objective using [🤗 PEFT](https://github.com/huggingface/peft).
Dkwkk/Da
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/model-cards {} --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Dmitriiserg/Pxd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 266.62 +/- 19.91 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DongHai/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 instance_prompt: a photo of sks dog tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - allen93/output These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were trained on a photo of sks dog using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
Doogie/Waynehills-KE-T5-doogie
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy - precision model-index: - name: swin-base-patch4-window7-224-in22k-finetuned-brain-tumor-skullStrippded_04 results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9842906234658811 - name: Precision type: precision value: 0.9845888529063952 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-base-patch4-window7-224-in22k-finetuned-brain-tumor-skullStrippded_04 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224-in22k](https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0470 - Accuracy: 0.9843 - F1 Score: 0.9844 - Precision: 0.9846 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 100 - eval_batch_size: 100 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 400 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Accuracy | F1 Score | Validation Loss | Precision | |:-------------:|:-----:|:----:|:--------:|:--------:|:---------------:|:---------:| | 1.2662 | 1.0 | 16 | 0.8370 | 0.8309 | 0.4424 | 0.8464 | | 0.3778 | 0.98 | 20 | 0.2700 | 0.9062 | 0.9067 | 0.9072 | | 0.2377 | 2.0 | 41 | 0.2035 | 0.9229 | 0.9234 | 0.9269 | | 0.1201 | 2.98 | 61 | 0.1345 | 0.9465 | 0.9467 | 0.9512 | | 0.0774 | 4.0 | 82 | 0.1229 | 0.9612 | 0.9618 | 0.9643 | | 0.0495 | 4.98 | 102 | 0.0562 | 0.9813 | 0.9815 | 0.9816 | | 0.0358 | 6.0 | 123 | 0.0470 | 0.9843 | 0.9844 | 0.9846 | | 0.0228 | 6.98 | 143 | 0.0447 | 0.9833 | 0.9834 | 0.9836 | | 0.0181 | 8.0 | 164 | 0.0465 | 0.9828 | 0.9830 | 0.9831 | | 0.0132 | 8.98 | 184 | 0.0436 | 0.9833 | 0.9835 | 0.9836 | | 0.0126 | 9.76 | 200 | 0.0461 | 0.9838 | 0.9840 | 0.9840 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.13.3
Waynehillsdev/waynehills_sentimental_kor
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
2023-04-10T17:02:02Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Doohae/q_encoder
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 578.00 +/- 165.14 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga vldnechai -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga vldnechai -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga vldnechai ``` ## Hyperparameters ```python OrderedDict([('batch_size', 64), ('buffer_size', 50000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1500000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Doquey/DialoGPT-small-Michaelbot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - conversational license: mit --- ```python from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("r3dhummingbird/DialoGPT-medium-joshua") model = AutoModelWithLMHead.from_pretrained("r3dhummingbird/DialoGPT-medium-joshua") # Let's chat for 4 lines for step in range(4): # encode the new user input, add the eos_token and return a tensor in Pytorch new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt') # print(new_user_input_ids) # append the new user input tokens to the chat history bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids # generated a response while limiting the total chat history to 1000 tokens, chat_history_ids = model.generate( bot_input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=3, do_sample=True, top_k=100, top_p=0.7, temperature=0.8 ) # pretty print last ouput tokens from bot print("JoshuaBot: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))) ```
Doxophobia/DialoGPT-medium-celeste
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | name | Adam | | learning_rate | 0.0010000000474974513 | | decay | 0.0 | | beta_1 | 0.8999999761581421 | | beta_2 | 0.9990000128746033 | | epsilon | 1e-07 | | amsgrad | False | | training_precision | float32 | ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
DoyyingFace/bert-COVID-HATE-finetuned-test
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-small-al-4000steps results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-al-4000steps This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2971 - Wer: 26.3903 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.409 | 0.53 | 1000 | 0.3866 | 35.0032 | | 0.2581 | 1.07 | 2000 | 0.3239 | 28.6767 | | 0.229 | 1.6 | 3000 | 0.3035 | 26.6968 | | 0.1881 | 2.13 | 4000 | 0.2971 | 26.3903 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
DoyyingFace/bert-asian-hate-tweets-asian-clean-with-unclean-valid
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
2023-04-10T17:16:37Z
--- license: mit tags: - generated_from_trainer metrics: - bleu model-index: - name: iva_mt_wslot-m2m100_418M-en-pl-nomassive results: [] datasets: - cartesinus/iva_mt_wslot --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # iva_mt_wslot-m2m100_418M-en-pl-nomassive This model is a fine-tuned version of [facebook/m2m100_418M](https://huggingface.co/facebook/m2m100_418M) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3751 - Bleu: 61.2295 - Gen Len: 23.3774 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 0.4315 | 1.0 | 2212 | 0.3475 | 57.0703 | 23.338 | | 0.2976 | 2.0 | 4424 | 0.3295 | 58.3378 | 23.5036 | | 0.2153 | 3.0 | 6636 | 0.3347 | 59.3015 | 23.5601 | | 0.1542 | 4.0 | 8848 | 0.3420 | 59.3942 | 23.4442 | | 0.1177 | 5.0 | 11060 | 0.3454 | 60.4525 | 23.3501 | | 0.0858 | 6.0 | 13272 | 0.3529 | 60.6123 | 23.3422 | | 0.06 | 7.0 | 15484 | 0.3586 | 60.1342 | 23.4478 | | 0.0479 | 8.0 | 17696 | 0.3668 | 60.6348 | 23.4163 | | 0.0371 | 9.0 | 19908 | 0.3728 | 61.3986 | 23.4436 | | 0.0299 | 10.0 | 22120 | 0.3751 | 61.2295 | 23.3774 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-12
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
2023-04-10T17:21:49Z
--- license: openrail datasets: - Ali-fb/martin_valen_dataset_5 language: - en metrics: - accuracy library_name: diffusers pipeline_tag: text-to-image tags: - art ---
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-4
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
44
2023-04-10T17:22:00Z
--- license: mit --- ### Seletti on Stable Diffusion This is the `<Seletti>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<Seletti> 0](https://huggingface.co/sd-concepts-library/seletti/resolve/main/concept_images/seletti-wears-toiletpaper-mirror-lipsticks_1200x.png) ![<Seletti> 1](https://huggingface.co/sd-concepts-library/seletti/resolve/main/concept_images/seletti-lipstick-pear-mirror-17071-LIPS-1.png) ![<Seletti> 2](https://huggingface.co/sd-concepts-library/seletti/resolve/main/concept_images/upholstered-padded-chair-lipsticks-948696.png) ![<Seletti> 3](https://huggingface.co/sd-concepts-library/seletti/resolve/main/concept_images/17911396_39425364_1000.png) ![<Seletti> 4](https://huggingface.co/sd-concepts-library/seletti/resolve/main/concept_images/mirror-toiletpaper-lipsticks-brass-frame_madeindesign_307101_original.png)
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-100
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 metrics: - type: mean_reward value: 0.46 +/- 0.50 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="yenniejun/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-25
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 instance_prompt: a photo of sks dog tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - allen93/lora_sample These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were trained on a photo of sks dog using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-50
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - scientific_papers model-index: - name: LED-finetuned-PUBMED8K results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # LED-finetuned-PUBMED8K This model is a fine-tuned version of [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) on the scientific_papers dataset. It achieves the following results on the evaluation set: - Loss: 2.7101 - Rouge2 Precision: 0.096 - Rouge2 Recall: 0.1461 - Rouge2 Fmeasure: 0.1018 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge2 Precision | Rouge2 Recall | Rouge2 Fmeasure | |:-------------:|:-----:|:----:|:---------------:|:----------------:|:-------------:|:---------------:| | 2.9289 | 0.8 | 10 | 2.7294 | 0.1016 | 0.1442 | 0.0924 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-with-clean-valid
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- library_name: rl-algo-impls tags: - MicrortsDefeatCoacAIShaped-v3 - ppo - deep-reinforcement-learning - reinforcement-learning model-index: - name: ppo results: - metrics: - type: mean_reward value: 185.45 +/- 30.59 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: MicrortsDefeatCoacAIShaped-v3 type: MicrortsDefeatCoacAIShaped-v3 --- # **PPO** Agent playing **MicrortsDefeatCoacAIShaped-v3** This is a trained model of a **PPO** agent playing **MicrortsDefeatCoacAIShaped-v3** using the [/sgoodfriend/rl-algo-impls](https://github.com/sgoodfriend/rl-algo-impls) repo. All models trained at this commit can be found at https://api.wandb.ai/links/sgoodfriend/j6e39wms. ## Training Results This model was trained from 3 trainings of **PPO** agents using different initial seeds. These agents were trained by checking out [1c95957](https://github.com/sgoodfriend/rl-algo-impls/tree/1c959579b76931ff90f091525442402f15f9d67f). The best and last models were kept from each training. This submission has loaded the best models from each training, reevaluates them, and selects the best model from these latest evaluations (mean - std). | algo | env | seed | reward_mean | reward_std | eval_episodes | best | wandb_url | |:-------|:------------------------------|-------:|--------------:|-------------:|----------------:|:-------|:-----------------------------------------------------------------------------| | ppo | MicrortsDefeatCoacAIShaped-v3 | 1 | 176.25 | 29.7719 | 24 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/sst35il0) | | ppo | MicrortsDefeatCoacAIShaped-v3 | 2 | 185.45 | 30.5944 | 24 | * | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/hambvua5) | | ppo | MicrortsDefeatCoacAIShaped-v3 | 3 | 174.367 | 21.155 | 24 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/0zoflugv) | ### Prerequisites: Weights & Biases (WandB) Training and benchmarking assumes you have a Weights & Biases project to upload runs to. By default training goes to a rl-algo-impls project while benchmarks go to rl-algo-impls-benchmarks. During training and benchmarking runs, videos of the best models and the model weights are uploaded to WandB. Before doing anything below, you'll need to create a wandb account and run `wandb login`. ## Usage /sgoodfriend/rl-algo-impls: https://github.com/sgoodfriend/rl-algo-impls Note: While the model state dictionary and hyperaparameters are saved, the latest implementation could be sufficiently different to not be able to reproduce similar results. You might need to checkout the commit the agent was trained on: [1c95957](https://github.com/sgoodfriend/rl-algo-impls/tree/1c959579b76931ff90f091525442402f15f9d67f). ``` # Downloads the model, sets hyperparameters, and runs agent for 3 episodes python enjoy.py --wandb-run-path=sgoodfriend/rl-algo-impls-benchmarks/hambvua5 ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_enjoy.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_enjoy.ipynb) notebook. ## Training If you want the highest chance to reproduce these results, you'll want to checkout the commit the agent was trained on: [1c95957](https://github.com/sgoodfriend/rl-algo-impls/tree/1c959579b76931ff90f091525442402f15f9d67f). While training is deterministic, different hardware will give different results. ``` python train.py --algo ppo --env MicrortsDefeatCoacAIShaped-v3 --seed 2 ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_train.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_train.ipynb) notebook. ## Benchmarking (with Lambda Labs instance) This and other models from https://api.wandb.ai/links/sgoodfriend/j6e39wms were generated by running a script on a Lambda Labs instance. In a Lambda Labs instance terminal: ``` git clone git@github.com:sgoodfriend/rl-algo-impls.git cd rl-algo-impls bash ./lambda_labs/setup.sh wandb login bash ./lambda_labs/benchmark.sh [-a {"ppo a2c dqn vpg"}] [-e ENVS] [-j {6}] [-p {rl-algo-impls-benchmarks}] [-s {"1 2 3"}] ``` ### Alternative: Google Colab Pro+ As an alternative, [colab_benchmark.ipynb](https://github.com/sgoodfriend/rl-algo-impls/tree/main/benchmarks#:~:text=colab_benchmark.ipynb), can be used. However, this requires a Google Colab Pro+ subscription and running across 4 separate instances because otherwise running all jobs will exceed the 24-hour limit. ## Hyperparameters This isn't exactly the format of hyperparams in hyperparams/ppo.yml, but instead the Wandb Run Config. However, it's very close and has some additional data: ``` additional_keys_to_log: - microrts_stats algo: ppo algo_hyperparams: batch_size: 3072 clip_range: 0.1 clip_range_decay: none clip_range_vf: 0.1 ent_coef: 0.01 learning_rate: 0.00025 learning_rate_decay: spike max_grad_norm: 0.5 n_epochs: 4 n_steps: 512 ppo2_vf_coef_halving: true vf_coef: 0.5 device: auto env: unet-MicrortsDefeatCoacAIShaped-v3-diverseBots env_hyperparams: bots: coacAI: 18 lightRushAI: 2 randomBiasedAI: 2 workerRushAI: 2 env_type: microrts make_kwargs: map_path: maps/16x16/basesWorkers16x16.xml max_steps: 2000 num_selfplay_envs: 0 render_theme: 2 reward_weight: - 10 - 1 - 1 - 0.2 - 1 - 4 n_envs: 24 env_id: MicrortsDefeatCoacAIShaped-v3 eval_params: deterministic: false n_timesteps: 300000000 policy_hyperparams: activation_fn: relu actor_head_style: unet cnn_flatten_dim: 256 cnn_style: microrts v_hidden_sizes: - 256 - 128 seed: 2 use_deterministic_algorithms: true wandb_entity: null wandb_group: null wandb_project_name: rl-algo-impls-benchmarks wandb_tags: - benchmark_1c95957 - host_129-159-37-196 - branch_unet - v0.0.8 ```
DoyyingFace/bert-asian-hate-tweets-asonam-unclean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: cc-by-nc-4.0 tags: - generated_from_trainer - instruction fine-tuning model-index: - name: flan-t5-small-distil-v2 results: [] language: - en pipeline_tag: text2text-generation widget: - text: >- how can I become more healthy? example_title: example --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> <p align="center" width="100%"> <a><img src="https://raw.githubusercontent.com/mbzuai-nlp/lamini-lm/main/images/lamini.png" alt="Title" style="width: 100%; min-width: 300px; display: block; margin: auto;"></a> </p> # LaMini-Flan-T5-248M [![Model License](https://img.shields.io/badge/Model%20License-CC%20By%20NC%204.0-red.svg)]() This model is one of our LaMini-LM model series in paper "[LaMini-LM: A Diverse Herd of Distilled Models from Large-Scale Instructions](https://github.com/mbzuai-nlp/lamini-lm)". This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on [LaMini-instruction dataset](https://huggingface.co/datasets/MBZUAI/LaMini-instruction) that contains 2.58M samples for instruction fine-tuning. For more information about our dataset, please refer to our [project repository](https://github.com/mbzuai-nlp/lamini-lm/). You can view other models of LaMini-LM series as follows. Models with ✩ are those with the best overall performance given their size/architecture, hence we recommend using them. More details can be seen in our paper. <table> <thead> <tr> <th>Base model</th> <th colspan="4">LaMini-LM series (#parameters)</th> </tr> </thead> <tbody> <tr> <td>T5</td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-61m" target="_blank" rel="noopener noreferrer">LaMini-T5-61M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-223m" target="_blank" rel="noopener noreferrer">LaMini-T5-223M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-738m" target="_blank" rel="noopener noreferrer">LaMini-T5-738M</a></td> <td></td> </tr> <tr> <td>Flan-T5</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-77m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-77M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-248m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-248M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-783m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-783M</a>✩</td> <td></td> </tr> <tr> <td>Cerebras-GPT</td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-111m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-111M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-256m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-256M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-590m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-590M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-1.3b" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-1.3B</a></td> </tr> <tr> <td>GPT-2</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-124m" target="_blank" rel="noopener noreferrer">LaMini-GPT-124M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-774m" target="_blank" rel="noopener noreferrer">LaMini-GPT-774M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-1.5b" target="_blank" rel="noopener noreferrer">LaMini-GPT-1.5B</a>✩</td> <td></td> </tr> <tr> <td>GPT-Neo</td> <td><a href="https://huggingface.co/MBZUAI/lamini-neo-125m" target="_blank" rel="noopener noreferrer">LaMini-Neo-125M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-neo-1.3b" target="_blank" rel="noopener noreferrer">LaMini-Neo-1.3B</a></td> <td></td> <td></td> </tr> <tr> <td>GPT-J</td> <td colspan="4">coming soon</td> </tr> <tr> <td>LLaMA</td> <td colspan="4">coming soon</td> </tr> </tbody> </table> ## Use ### Intended use We recommend using the model to response to human instructions written in natural language. We now show you how to load and use our model using HuggingFace `pipeline()`. ```python # pip install -q transformers from transformers import pipeline checkpoint = "{model_name}" model = pipeline('text2text-generation', model = checkpoint) input_prompt = 'Please let me know your thoughts on the given place and why you think it deserves to be visited: \n"Barcelona, Spain"' generated_text = model(input_prompt, max_length=512, do_sample=True)[0]['generated_text'] print("Response", generated_text) ``` ## Training Procedure <p align="center" width="100%"> <a><img src="https://raw.githubusercontent.com/mbzuai-nlp/lamini-lm/main/images/lamini-pipeline.drawio.png" alt="Title" style="width: 100%; min-width: 250px; display: block; margin: auto;"></a> </p> We initialize with [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) and fine-tune it on our [LaMini-instruction dataset](https://huggingface.co/datasets/MBZUAI/LaMini-instruction). Its total number of parameters is 248M. ### Training Hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ## Evaluation We conducted two sets of evaluations: automatic evaluation on downstream NLP tasks and human evaluation on user-oriented instructions. For more detail, please refer to our [paper](). ## Limitations More information needed # Citation ```bibtex @article{lamini-lm, author = {Minghao Wu and Abdul Waheed and Chiyu Zhang and Muhammad Abdul-Mageed and Alham Fikri Aji }, title = {LaMini-LM: A Diverse Herd of Distilled Models from Large-Scale Instructions}, journal = {CoRR}, volume = {abs/2304.14402}, year = {2023}, url = {https://arxiv.org/abs/2304.14402}, eprinttype = {arXiv}, eprint = {2304.14402} } ```
DoyyingFace/bert-asian-hate-tweets-concat-clean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
2023-04-10T17:41:18Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Chest-Xray-Fine-Tuning---trained-on-chest-xray14-dataset-(only-1k-images-as-of-now) Dreambooth model trained by danyalmalik with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept: ![0](https://huggingface.co/danyalmalik/chest-xray-fine-tuning-trained-on-chest-xray14-dataset-only-1k-images-as-of-now/resolve/main/sample_images/sample4_(emphysema).png) ![1](https://huggingface.co/danyalmalik/chest-xray-fine-tuning-trained-on-chest-xray14-dataset-only-1k-images-as-of-now/resolve/main/sample_images/sample2_(no_finding).png) ![2](https://huggingface.co/danyalmalik/chest-xray-fine-tuning-trained-on-chest-xray14-dataset-only-1k-images-as-of-now/resolve/main/sample_images/sample1_(no_finding).png) ![3](https://huggingface.co/danyalmalik/chest-xray-fine-tuning-trained-on-chest-xray14-dataset-only-1k-images-as-of-now/resolve/main/sample_images/sample3_(emphysema).png)
albert-base-v1
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38,156
2023-04-10T17:41:19Z
--- datasets: - tatsu-lab/alpaca language: - en metrics: - accuracy pipeline_tag: text-generation library_name: adapter-transformers ---
albert-xxlarge-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42,640
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.48 +/- 2.77 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="yenniejun/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
bert-base-cased-finetuned-mrpc
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11,644
2023-04-10T17:52:11Z
This is a custom CTC model trained on the Tamil dataset.
bert-base-german-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "exbert", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
175,983
2023-04-10T17:54:20Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder model-index: - name: vit-base-patch16-224-finetuned-flower results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 2.0.0+cu118 - Datasets 2.7.1 - Tokenizers 0.13.3
bert-large-cased-whole-word-masking
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,316
2023-04-10T18:04:28Z
--- license: mit datasets: - yahma/alpaca-cleaned duplicated_from: tloen/alpaca-lora-7b --- This repo contains a low-rank adapter for LLaMA-7b fit on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset. This version of the weights was trained with the following hyperparameters: - Epochs: 10 (load from best epoch) - Batch size: 128 - Cutoff length: 512 - Learning rate: 3e-4 - Lora _r_: 16 - Lora target modules: q_proj, k_proj, v_proj, o_proj That is: ``` python finetune.py \ --base_model='decapoda-research/llama-7b-hf' \ --num_epochs=10 \ --cutoff_len=512 \ --group_by_length \ --output_dir='./lora-alpaca-512-qkvo' \ --lora_target_modules='[q_proj,k_proj,v_proj,o_proj]' \ --lora_r=16 \ --micro_batch_size=8 ``` Instructions for running it can be found at https://github.com/tloen/alpaca-lora.
bert-large-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
388,769
2023-04-10T18:06:59Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-large-v2-hi results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-large-v2-hi This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3114 - Wer: 0.2148 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 16 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.0693 | 2.44 | 1000 | 0.2040 | 0.2474 | | 0.0218 | 4.89 | 2000 | 0.2299 | 0.2413 | | 0.0023 | 7.33 | 3000 | 0.2687 | 0.2212 | | 0.0003 | 9.78 | 4000 | 0.2887 | 0.2160 | | 0.0001 | 12.22 | 5000 | 0.3114 | 0.2148 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.13.3
bert-large-uncased-whole-word-masking
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
76,685
2023-04-10T18:10:21Z
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: tommytran/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Abab/Test_Albert
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-11T00:46:47Z
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 12.86 +/- 112.08 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 1000000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'globophobe/ppo-CleanRL-LunarLander-v2' 'batch_size': 512 'minibatch_size': 128} ```
AhmedBou/TuniBert
[ "pytorch", "bert", "text-classification", "ar", "transformers", "sentiment analysis", "classification", "arabic dialect", "tunisian dialect", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
44
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu model-index: - name: spanish-spellchecker-t5-base-wikitest10000 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # spanish-spellchecker-t5-base-wikitest10000 This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2114 - Bleu: 0.0011 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 64 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:| | No log | 1.0 | 498 | 0.2742 | 0.001 | 19.0 | | 0.5034 | 2.0 | 996 | 0.2497 | 0.001 | 19.0 | | 0.2999 | 3.0 | 1494 | 0.2361 | 0.0011 | 19.0 | | 0.2762 | 4.0 | 1992 | 0.2268 | 0.0011 | 19.0 | | 0.2619 | 5.0 | 2490 | 0.2214 | 0.0011 | 19.0 | | 0.2513 | 6.0 | 2988 | 0.2175 | 0.0011 | 19.0 | | 0.244 | 7.0 | 3486 | 0.2141 | 0.0011 | 19.0 | | 0.239 | 8.0 | 3984 | 0.2131 | 0.0011 | 19.0 | | 0.2351 | 9.0 | 4482 | 0.2115 | 0.0011 | 19.0 | | 0.2319 | 10.0 | 4980 | 0.2114 | 0.0011 | 19.0 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu117 - Datasets 2.10.1 - Tokenizers 0.13.2
Ahren09/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: gpt-j-tweet-question results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt-j-tweet-question This model is a fine-tuned version of [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Aibox/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: bbc_news_summary_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bbc_news_summary_model This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3238 - Rouge1: 0.2026 - Rouge2: 0.145 - Rougel: 0.1843 - Rougelsum: 0.1844 - Gen Len: 18.9865 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | No log | 1.0 | 223 | 0.4221 | 0.189 | 0.1251 | 0.1664 | 0.1666 | 19.0 | | No log | 2.0 | 446 | 0.3585 | 0.1931 | 0.1321 | 0.1724 | 0.1727 | 19.0 | | 0.7174 | 3.0 | 669 | 0.3370 | 0.1983 | 0.1391 | 0.1784 | 0.1787 | 18.9865 | | 0.7174 | 4.0 | 892 | 0.3269 | 0.1994 | 0.1408 | 0.1803 | 0.1803 | 18.9865 | | 0.4039 | 5.0 | 1115 | 0.3238 | 0.2026 | 0.145 | 0.1843 | 0.1844 | 18.9865 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Aidan8756/stephenKingModel
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: openrail datasets: - SEWX/ALVAbrain language: - en - ja - vi metrics: - character - accuracy library_name: adapter-transformers ---
AimB/mT5-en-kr-natural
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
78
null
To set up oobabooga on a Linux machine, you need to follow these steps: Install the prerequisites for oobabooga, such as Python, CUDA, PyTorch, and git. You can use your package manager or pip to install them. You can also refer to this guide for more details: https://gist.github.com/lxe/82eb87db25fdb75b92fa18a6d494ee3c Clone the oobabooga repository and the GPTQ-for-LLaMa repository using git. You can use the git clone command to copy the repositories to your machine. You can also refer to this guide for more details: https://github.com/oobabooga/text-generation-webui/wiki/Windows-installation-guide Download the facebook/opt-6.7b model using the download-model.py script. You can use the python command to run the script with the model name as an argument. You can also refer to this guide for more details: https://github.com/oobabooga/text-generation-webui/wiki/Windows-installation-guide Install the GPTQ-for-LLaMa library using the setup_cuda.py script. You can use the python command to run the script with the install option. You can also refer to this guide for more details: https://github.com/oobabooga/text-generation-webui/wiki/Windows-installation-guide Run the oobabooga web interface using the server.py script. You can use the python command to run the script. You can also refer to this guide for more details: https://github.com/oobabooga/text-generation-webui/wiki/Windows-installation-guide Example: conda activate textgen python server.py --share --auto-devices --chat Open your browser and go to http://127.0.0.1:7860 or https://some-code-generated.gradio.live to access the oobabooga web interface