modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
distilbert-base-german-cased
[ "pytorch", "safetensors", "distilbert", "fill-mask", "de", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
43,667
2023-03-29T09:33:19Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1520.14 +/- 97.99 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
IssakaAI/wav2vec2-large-xls-r-300m-turkish-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-29T11:41:52Z
--- title: Chatbot With Vits emoji: 🚀 colorFrom: purple colorTo: indigo sdk: gradio sdk_version: 3.23.0 app_file: app.py pinned: false license: mit --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
ALINEAR/albert-japanese-v2
[ "pytorch", "albert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20,882
2023-03-29T12:10:42Z
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer datasets: - sroie metrics: - precision - recall - f1 - accuracy model-index: - name: rhenus_v2.0_cleared results: - task: name: Token Classification type: token-classification dataset: name: sroie type: sroie config: discharge split: test args: discharge metrics: - name: Precision type: precision value: 0.9566115702479339 - name: Recall type: recall value: 0.9566115702479339 - name: F1 type: f1 value: 0.9566115702479339 - name: Accuracy type: accuracy value: 0.955607476635514 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rhenus_v2.0_cleared This model is a fine-tuned version of [microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) on the sroie dataset. It achieves the following results on the evaluation set: - Loss: 0.2695 - Precision: 0.9566 - Recall: 0.9566 - F1: 0.9566 - Accuracy: 0.9556 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 3000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 0.91 | 100 | 2.3292 | 0.2072 | 0.1074 | 0.1415 | 0.3949 | | No log | 1.82 | 200 | 1.5407 | 0.6009 | 0.5475 | 0.5730 | 0.6939 | | No log | 2.73 | 300 | 0.9994 | 0.7174 | 0.6715 | 0.6937 | 0.8084 | | No log | 3.64 | 400 | 0.7563 | 0.7692 | 0.7645 | 0.7668 | 0.8435 | | 1.6153 | 4.55 | 500 | 0.5786 | 0.8071 | 0.8037 | 0.8054 | 0.8692 | | 1.6153 | 5.45 | 600 | 0.5156 | 0.7988 | 0.7955 | 0.7971 | 0.8680 | | 1.6153 | 6.36 | 700 | 0.4419 | 0.8693 | 0.8657 | 0.8675 | 0.8984 | | 1.6153 | 7.27 | 800 | 0.3940 | 0.8786 | 0.8822 | 0.8804 | 0.9077 | | 1.6153 | 8.18 | 900 | 0.3359 | 0.9156 | 0.9194 | 0.9175 | 0.9252 | | 0.4054 | 9.09 | 1000 | 0.3066 | 0.9262 | 0.9339 | 0.9300 | 0.9381 | | 0.4054 | 10.0 | 1100 | 0.2570 | 0.9270 | 0.9442 | 0.9355 | 0.9568 | | 0.4054 | 10.91 | 1200 | 0.2439 | 0.9406 | 0.9483 | 0.9444 | 0.9603 | | 0.4054 | 11.82 | 1300 | 0.2378 | 0.9446 | 0.9504 | 0.9475 | 0.9614 | | 0.4054 | 12.73 | 1400 | 0.2502 | 0.9388 | 0.9504 | 0.9446 | 0.9521 | | 0.1461 | 13.64 | 1500 | 0.2008 | 0.9569 | 0.9628 | 0.9598 | 0.9696 | | 0.1461 | 14.55 | 1600 | 0.2454 | 0.9446 | 0.9504 | 0.9475 | 0.9544 | | 0.1461 | 15.45 | 1700 | 0.2234 | 0.9609 | 0.9649 | 0.9629 | 0.9673 | | 0.1461 | 16.36 | 1800 | 0.2408 | 0.9526 | 0.9545 | 0.9536 | 0.9544 | | 0.1461 | 17.27 | 1900 | 0.2620 | 0.9545 | 0.9545 | 0.9545 | 0.9544 | | 0.0693 | 18.18 | 2000 | 0.2170 | 0.9588 | 0.9628 | 0.9608 | 0.9673 | | 0.0693 | 19.09 | 2100 | 0.2408 | 0.9588 | 0.9607 | 0.9598 | 0.9603 | | 0.0693 | 20.0 | 2200 | 0.2450 | 0.9506 | 0.9545 | 0.9526 | 0.9556 | | 0.0693 | 20.91 | 2300 | 0.2437 | 0.9545 | 0.9545 | 0.9545 | 0.9544 | | 0.0693 | 21.82 | 2400 | 0.2173 | 0.9548 | 0.9607 | 0.9578 | 0.9685 | | 0.0472 | 22.73 | 2500 | 0.2428 | 0.9485 | 0.9504 | 0.9494 | 0.9626 | | 0.0472 | 23.64 | 2600 | 0.2659 | 0.9525 | 0.9525 | 0.9525 | 0.9533 | | 0.0472 | 24.55 | 2700 | 0.2713 | 0.9566 | 0.9566 | 0.9566 | 0.9556 | | 0.0472 | 25.45 | 2800 | 0.2803 | 0.9545 | 0.9545 | 0.9545 | 0.9544 | | 0.0472 | 26.36 | 2900 | 0.2779 | 0.9566 | 0.9566 | 0.9566 | 0.9556 | | 0.0356 | 27.27 | 3000 | 0.2695 | 0.9566 | 0.9566 | 0.9566 | 0.9556 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.2.2 - Tokenizers 0.13.2
ARATHI/electra-small-discriminator-fintuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-29T12:12:58Z
# Vocabulary Trimmed [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg): `vocabtrimmer/mt5-small-squad-qg-trimmed-en` This model is a trimmed version of [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qg | vocabtrimmer/mt5-small-squad-qg-trimmed-en | |:---------------------------|:--------------------------|:---------------------------------------------| | parameter_size_full | 300,165,504 | 258,414,976 | | parameter_size_embedding | 256,103,424 | 214,352,896 | | vocab_size | 250,101 | 209,329 | | compression_rate_full | 100.0 | 86.09 | | compression_rate_embedding | 100.0 | 83.7 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | | 2 |
AdapterHub/bert-base-uncased-pf-scitail
[ "bert", "en", "dataset:scitail", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:nli/scitail" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2023-03-29T15:13:11Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.54 +/- 2.69 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="TerryYH/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AdapterHub/roberta-base-pf-boolq
[ "roberta", "en", "dataset:boolq", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:qa/boolq" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: feabries/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Adil617/wav2vec2-base-timit-demo-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: cc --- # Random celebrities voice models ### Currently available: - James Hetfield (https://youtu.be/8ektmj8GMqQ) - Lady Gaga (https://youtu.be/I5BgH8mPplY) - Eddie Vedder - Marina Sena - Chris Cornell - Tim Maia - Eric Cartman (https://youtu.be/noh2dRdkP1Y) - David Bowie (https://youtu.be/nCYLG2WXKmg) - Phil Anselmo (https://youtu.be/ijiU28SK3cw) - Stevie Ray Vaughan (https://youtu.be/Wr96qwTgw4M) - Liam Gallagher (https://youtu.be/CKVXjCfttaQ) - Noel Gallagher (https://youtu.be/OaVRUzZ6qqI) - Parappa the Rapper (https://youtu.be/W08bpagIn44)
Ajay191191/autonlp-Test-530014983
[ "pytorch", "bert", "text-classification", "en", "dataset:Ajay191191/autonlp-data-Test", "transformers", "autonlp", "co2_eq_emissions" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
2023-03-29T18:04:39Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Cartpole-1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
adorkin/xlm-roberta-en-ru-emoji
[ "pytorch", "safetensors", "xlm-roberta", "text-classification", "en", "ru", "dataset:tweet_eval", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### 230329tanavaksha-0-1 Dreambooth model trained by arthur-nvk with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
AnonymousSub/AR_rule_based_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder model-index: - name: vit-base-patch16-224-finetuned-flower results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.1+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-29T19:16:01Z
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: GGunjan/SnowballTarget-v1-ppo 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -0.66 +/- 0.21 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/unsup-consert-base_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - FrozenLake-v1-8x8-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-8x8-no_slippery type: FrozenLake-v1-8x8-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="markeidsaune/q-FrozenLake-v1-8x8-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Anthos23/sentiment-roberta-large-english-finetuned-sentiment-analysis
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-squad-qg](https://huggingface.co/lmqg/mbart-large-cc25-squad-qg): `vocabtrimmer/mbart-large-cc25-squad-qg-trimmed-en-30000` This model is a trimmed version of [lmqg/mbart-large-cc25-squad-qg](https://huggingface.co/lmqg/mbart-large-cc25-squad-qg) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-squad-qg | vocabtrimmer/mbart-large-cc25-squad-qg-trimmed-en-30000 | |:---------------------------|:---------------------------------|:----------------------------------------------------------| | parameter_size_full | 610,852,864 | 385,548,288 | | parameter_size_embedding | 512,057,344 | 61,448,192 | | vocab_size | 250,028 | 30,004 | | compression_rate_full | 100.0 | 63.12 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 30000 | 2 |
Anubhav23/IndianlegalBert
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: anthonyx/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Ayoola/wav2vec2-large-xlsr-turkish-demo-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: eLarry/poca-SoccerTwos-v3-Self-Aware 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Azura/data
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: keyword_category_classifier_v5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # keyword_category_classifier_v5 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2458 - Accuracy: 0.9281 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7236 | 1.0 | 986 | 0.2750 | 0.9139 | | 0.2326 | 2.0 | 1972 | 0.2458 | 0.9281 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
BSC-LT/roberta-base-ca
[ "pytorch", "roberta", "fill-mask", "ca", "transformers", "masked-lm", "BERTa", "catalan", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
18
null
--- license: mit --- ### channelized on Stable Diffusion This is the `<channelized>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`:
BSC-LT/roberta-large-bne
[ "pytorch", "roberta", "fill-mask", "es", "dataset:bne", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
2023-03-29T21:42:22Z
--- license: apache-2.0 model-index: - name: newt5en-to-bn2 results: [] pipeline_tag: translation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # newt5en-to-bn2 This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0303 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.2145 | 1.0 | 94 | 0.1407 | | 0.0963 | 2.0 | 188 | 0.0358 | | 0.0806 | 3.0 | 282 | 0.0303 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Babysittingyoda/DialoGPT-small-familyguy
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('jackoyoungblood/sd-class-butterflies-32') image = pipeline().images[0] image ```
Bagus/SER-LSSED
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 247.58 +/- 23.58 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Bagus/wav2vec2-large-xlsr-bahasa-indonesia
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "el", "dataset:common_voice_id_6.1", "transformers", "audio", "speech", "bahasa-indonesia", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - text-classification - generated_from_trainer datasets: - glue metrics: - accuracy - f1 widget: - text: ["Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion.", "Yucaipa bought Dominick's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998."] example_title: Not Equivalent - text: ["Revenue in the first quarter of the year dropped 15 percent from the same period a year earlier.", "With the scandal hanging over Stewart's company revenue the first quarter of the year dropped 15 percent from the same period a year earlier."] example_title: Equivalent model-index: - name: platzi-distilroberta-base-mrpc-glue-david-garcia results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: mrpc split: validation args: mrpc metrics: - name: Accuracy type: accuracy value: 0.7965686274509803 - name: F1 type: f1 value: 0.8623548922056385 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-distilroberta-base-mrpc-glue-david-garcia This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the glue and the mrpc datasets. It achieves the following results on the evaluation set: - Loss: 0.6754 - Accuracy: 0.7966 - F1: 0.8624 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.526 | 1.09 | 500 | 0.6754 | 0.7966 | 0.8624 | | 0.3485 | 2.18 | 1000 | 0.6995 | 0.8309 | 0.8783 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition
[ "pytorch", "wav2vec2", "audio-classification", "ja", "dataset:jtes", "transformers", "audio", "speech", "speech-emotion-recognition", "has_space" ]
audio-classification
{ "architectures": [ "HubertForSequenceClassification" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: worknewt5en-to-bn2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # worknewt5en-to-bn2 This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.0002 | 1.0 | 4688 | 0.0000 | | 0.0 | 2.0 | 9376 | 0.0000 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Balgow/prod_desc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 257.11 +/- 25.27 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Banshee/dialoGPT-small-luke
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: afl-3.0 metrics: - accuracy --- Ultra realistic image of Joe Biden eating mud. He is open mouth, Joe Biden is clearly eating mud Photography, RTX light, very detailed, 8K, realistic light, he brings the mud in his mouth with his hands. There is mud on joe biden but the rest of the picture is clean
Barleysack/AERoberta2
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 --- | Model Name | Parameters | Class | Ratio | Tokens | Batch Size (Tokens) | Training Loss | | --- | --- | --- | --- | --- | --- | --- | | GerbilLab/Gerbil-A-6.7m | 6.7m | A-Class | 20 | 134M | 131k | 6.074100 |
BatuhanYilmaz/bert-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### my_wife_testing Dreambooth model trained by CrystalTea with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
Bee-Garbs/DialoGPT-cartman-small
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - pt metrics: - perplexity pipeline_tag: text-generation --- # Model Card for Model ID A Portuguese language model trained on https://huggingface.co/facebook/opt-125m . ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** Monique Monteiro - **Shared by [optional]:** Monique Monteiro - **Model type:** OPT - **Language(s) (NLP):** Portuguese - **License:** [More Information Needed] - **Finetuned from model [optional]:** facebook/opt-125m Use the code below to get started with the model. ```python generator = pipeline('text-generation', 'monilouise/opt125M_portuguese') output = generator("Era uma vez", max_length=50, do_sample=True) ``` ## Training Details ### Training Data The model was trained on gs://unicamp-dl/ia025a_2022s1/aula9/sample-1gb.txt ### Training Procedure The model was trained for 3 epochs, by using learning rate = 5e-5 (linear scheduler). #### Preprocessing [optional] All text was tokenized and broken into chunks of 1024 tokens. #### Training Hyperparameters - **Training regime:** fp16 mixed precision #### Speeds, Sizes, Times [optional] Training time: 17 hours ## Evaluation The model was evaluated on a 5% validation split. #### Metrics Perplexity = 7.94. ## Model Card Authors [optional] moniquelouise@gmail.com ## Model Card Contact moniquelouise@gmail.com
BenWitter/DialoGPT-small-Tyrion
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: apache-2.0 --- | Model Name | Parameters | Class | Ratio | Tokens | Batch Size (Tokens) | Training Loss | | --- | --- | --- | --- | --- | --- | --- | | GerbilLab/Gerbil-B-6.7m | 6.7m | B-Class | 42 | 281M | 131k | 5.513200 |
Benicio/t5-small-finetuned-en-to-ro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: led-base-16384-biolaysum-both-all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # led-base-16384-biolaysum-both-all This model is a fine-tuned version of [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1320 - Rouge1: 0.4554 - Rouge2: 0.1583 - Rougel: 0.2462 - Rougelsum: 0.2464 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:------:|:---------:| | 2.2477 | 0.69 | 5000 | 2.1940 | 0.4501 | 0.1552 | 0.2441 | 0.2442 | | 2.0151 | 1.37 | 10000 | 2.1320 | 0.4554 | 0.1583 | 0.2462 | 0.2464 | | 1.8991 | 2.06 | 15000 | 2.0994 | 0.4561 | 0.1561 | 0.2436 | 0.2437 | | 1.877 | 2.75 | 20000 | 2.0860 | 0.4588 | 0.1582 | 0.2452 | 0.2453 | | 1.753 | 3.43 | 25000 | 2.0723 | 0.4566 | 0.1569 | 0.2455 | 0.2457 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1 - Datasets 2.10.1 - Tokenizers 0.12.1
BertChristiaens/EmojiPredictor
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 --- insert_at_4_nonfrozen_50rn_8heads_k=128.ckpt * Final validation scores: acc@1=0.758 acc@5=0.92 * Trained for 4 epochs ``` # lightning.pytorch==2.0.0 seed_everything: true trainer: callbacks: - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: save_last: true save_top_k: 1 monitor: v_c_loss - class_path: lightning.pytorch.callbacks.LearningRateMonitor accelerator: auto strategy: auto devices: auto num_nodes: 1 precision: 16-mixed logger: null fast_dev_run: false max_epochs: 8 min_epochs: null max_steps: -1 min_steps: null max_time: null limit_train_batches: null limit_val_batches: null limit_test_batches: null limit_predict_batches: null overfit_batches: 0.0 val_check_interval: 0.1 check_val_every_n_epoch: 1 num_sanity_val_steps: null log_every_n_steps: 15 enable_checkpointing: true enable_progress_bar: null enable_model_summary: null accumulate_grad_batches: 1 gradient_clip_val: 1.0 gradient_clip_algorithm: null deterministic: null benchmark: null inference_mode: true use_distributed_sampler: true profiler: null detect_anomaly: false barebones: false plugins: null sync_batchnorm: false reload_dataloaders_every_n_epochs: 0 default_root_dir: ckpt/insert_at_4_nonfrozen_50rn_4heads_k=128 model: resnet_type: 50 is_rq: false quantizer_args: heads: 8 use_cosine_sim: false accept_image_fmap: true codebook_dim: 256 codebook_size: 128 decay: 0.95 eps: 1.0e-05 commitment_weight: 0.0 threshold_ema_dead_code: 2 resnet_insertion_index: 4 unfreeze_resnet_block_indeces: [3] unfreeze_fc: true lr: 0.00010 data: data_dir: "/home/figes/Downloads/ILSVRC2012_CLS-LOC/" image_size: 224 num_workers: 6 batch_size: 512 shuffle: true pin_memory: true drop_last: false ``` epoch0-insert-at-4-frozen-deep-norq.ckpt * trained to vall acc@5 0.887 acc@1 .6697 * big codebook size (256) * 8 heads ``` # lightning.pytorch==2.0.0 # bigger depth seed_everything: true trainer: callbacks: - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: save_last: true save_top_k: 1 monitor: v_c_loss accelerator: auto strategy: auto devices: auto num_nodes: 1 precision: 16-mixed logger: null callbacks: null fast_dev_run: false max_epochs: 5 min_epochs: null max_steps: -1 min_steps: null max_time: null limit_train_batches: null limit_val_batches: null limit_test_batches: null limit_predict_batches: null overfit_batches: 0.0 val_check_interval: 0.1 check_val_every_n_epoch: 1 num_sanity_val_steps: null log_every_n_steps: 15 enable_checkpointing: true enable_progress_bar: null enable_model_summary: null accumulate_grad_batches: 1 gradient_clip_val: 1.0 gradient_clip_algorithm: null deterministic: null benchmark: null inference_mode: true use_distributed_sampler: true profiler: null detect_anomaly: false barebones: false plugins: null sync_batchnorm: false reload_dataloaders_every_n_epochs: 0 default_root_dir: ckpt/insert_at_4_frozen_deep model: resnet_type: 34 is_rq: false quantizer_args: heads: 8 use_cosine_sim: false accept_image_fmap: true codebook_dim: 128 codebook_size: 256 decay: 0.85 eps: 1.0e-05 commitment_weight: 5.0 threshold_ema_dead_code: 1 resnet_insertion_index: 4 unfreeze_resnet_block_indeces: [] unfreeze_fc: false lr: 0.0002 data: data_dir: "/home/figes/Downloads/ILSVRC2012_CLS-LOC/" image_size: 224 num_workers: 6 batch_size: 512 shuffle: true pin_memory: true drop_last: false ``` insert_at_4_nonfrozen_deep_epoch=3-step=7759.ckpt * small codebook size (64) * trained to v acc@5 .8645 acc@1 0.6554 ``` # lightning.pytorch==2.0.0 # bigger depth seed_everything: true trainer: callbacks: - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: save_last: true save_top_k: 1 monitor: v_c_loss accelerator: auto strategy: auto devices: auto num_nodes: 1 precision: 16-mixed logger: null callbacks: null fast_dev_run: false max_epochs: 5 min_epochs: null max_steps: -1 min_steps: null max_time: null limit_train_batches: null limit_val_batches: null limit_test_batches: null limit_predict_batches: null overfit_batches: 0.0 val_check_interval: 0.1 check_val_every_n_epoch: 1 num_sanity_val_steps: null log_every_n_steps: 15 enable_checkpointing: true enable_progress_bar: null enable_model_summary: null accumulate_grad_batches: 1 gradient_clip_val: 1.0 gradient_clip_algorithm: null deterministic: null benchmark: null inference_mode: true use_distributed_sampler: true profiler: null detect_anomaly: false barebones: false plugins: null sync_batchnorm: false reload_dataloaders_every_n_epochs: 0 default_root_dir: ckpt/insert_at_4_nonfrozen_deep model: resnet_type: 34 is_rq: false quantizer_args: heads: 8 use_cosine_sim: false accept_image_fmap: true codebook_dim: 128 codebook_size: 64 decay: 0.85 eps: 1.0e-05 commitment_weight: 0.5 threshold_ema_dead_code: 1 sample_codebook_temp: 0.1 resnet_insertion_index: 4 unfreeze_resnet_block_indeces: - 2 - 3 unfreeze_fc: true lr: 0.0001 data: data_dir: "/home/figes/Downloads/ILSVRC2012_CLS-LOC/" image_size: 224 num_workers: 6 batch_size: 512 shuffle: true pin_memory: true drop_last: false ``` epoch=1-step=4503.ckpt * inserted at 3, all resnet weights frozen * ~.62 val acc ``` # lightning.pytorch==2.0.0 seed_everything: true trainer: callbacks: - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: save_last: true save_top_k: 1 monitor: v_c_loss accelerator: auto strategy: auto devices: auto num_nodes: 1 precision: 16-mixed logger: null callbacks: null fast_dev_run: false max_epochs: 10 min_epochs: null max_steps: -1 min_steps: null max_time: null limit_train_batches: null limit_val_batches: null limit_test_batches: null limit_predict_batches: null overfit_batches: 0.0 val_check_interval: 0.1 check_val_every_n_epoch: 1 num_sanity_val_steps: null log_every_n_steps: 5 enable_checkpointing: true enable_progress_bar: null enable_model_summary: null accumulate_grad_batches: 1 gradient_clip_val: 0.5 gradient_clip_algorithm: null deterministic: null benchmark: null inference_mode: true use_distributed_sampler: true profiler: null detect_anomaly: false barebones: false plugins: null sync_batchnorm: false reload_dataloaders_every_n_epochs: 0 default_root_dir: ckpt/test_insert_at_3_frozen model: resnet_type: 34 is_rq: true quantizer_args: num_quantizers: 4 shared_codebook: false quantize_dropout: true accept_image_fmap: true codebook_dim: 128 codebook_size: 512 decay: 0.85 eps: 1.0e-05 commitment_weight: 25.0 threshold_ema_dead_code: 2 sample_codebook_temp: 0.05 quantize_dropout_cutoff_index: 1 quantize_dropout_multiple_of: 1 resnet_insertion_index: 3 lr: 0.0002 data: data_dir: "/home/figes/Downloads/ILSVRC2012_CLS-LOC/" image_size: 224 num_workers: 8 batch_size: 512 shuffle: true pin_memory: true drop_last: false ``` epoch2-val-63.ckpt * final val acc .63 * trained for 2 epochs * More compressed embedding space, with more dropout * git commit dc54a9bdbfcfbc83c736ac5c06ab09c5acf2d5e8 ``` # lightning.pytorch==2.0.0 seed_everything: true trainer: callbacks: - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: save_last: true save_top_k: 1 monitor: v_c_loss accelerator: auto strategy: auto devices: auto num_nodes: 1 precision: 16-mixed logger: null callbacks: null fast_dev_run: false max_epochs: 10 min_epochs: null max_steps: -1 min_steps: null max_time: null limit_train_batches: null limit_val_batches: null limit_test_batches: null limit_predict_batches: null overfit_batches: 0.0 val_check_interval: 0.1 check_val_every_n_epoch: 1 num_sanity_val_steps: null log_every_n_steps: 5 enable_checkpointing: true enable_progress_bar: null enable_model_summary: null accumulate_grad_batches: 1 gradient_clip_val: 0.5 gradient_clip_algorithm: null deterministic: null benchmark: null inference_mode: true use_distributed_sampler: true profiler: null detect_anomaly: false barebones: false plugins: null sync_batchnorm: false reload_dataloaders_every_n_epochs: 0 default_root_dir: ckpt/insert_at_4 model: resnet_type: 34 is_rq: true quantizer_args: num_quantizers: 8 shared_codebook: true quantize_dropout: false accept_image_fmap: true codebook_dim: 128 codebook_size: 64 decay: 0.8 eps: 1.0e-05 commitment_weight: 5.0 threshold_ema_dead_code: 1 sample_codebook_temp: 0.1 resnet_insertion_index: 4 unfreeze_resnet_block_indeces: - 3 unfreeze_fc: true lr: 0.0002 data: data_dir: "/home/figes/Downloads/ILSVRC2012_CLS-LOC/" image_size: 224 num_workers: 6 batch_size: 512 shuffle: true pin_memory: true drop_last: false ``` ### epoch=5-step=14765.ckpt * trained for 5 1/2 epochs on imagenet, on top of resnet 34 * final validation accuracy: .66 * final training accuracy: 0.64 * git hash: `c4852331f9a40393b8ffd8b7b9a689d1ff6e1021` * config: ``` # lightning.pytorch==2.0.0 seed_everything: true trainer: callbacks: - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: save_last: true save_top_k: 1 monitor: v_c_loss accelerator: auto strategy: auto devices: auto num_nodes: 1 precision: 16-mixed logger: null callbacks: null fast_dev_run: false max_epochs: 10 min_epochs: null max_steps: -1 min_steps: null max_time: null limit_train_batches: null limit_val_batches: null limit_test_batches: null limit_predict_batches: null overfit_batches: 0.0 val_check_interval: 0.1 check_val_every_n_epoch: 1 num_sanity_val_steps: null log_every_n_steps: 5 enable_checkpointing: true enable_progress_bar: null enable_model_summary: null accumulate_grad_batches: 1 gradient_clip_val: 0.5 gradient_clip_algorithm: null deterministic: null benchmark: null inference_mode: true use_distributed_sampler: true profiler: null detect_anomaly: false barebones: false plugins: null sync_batchnorm: false reload_dataloaders_every_n_epochs: 0 default_root_dir: ckpt/test_insert_at_4 model: resnet_type: 34 is_rq: true quantizer_args: num_quantizers: 4 shared_codebook: true quantize_dropout: false accept_image_fmap: true codebook_dim: 128 codebook_size: 256 decay: 0.8 eps: 1.0e-05 commitment_weight: 5.0 threshold_ema_dead_code: 1 sample_codebook_temp: 0.0 resnet_insertion_index: 4 unfreeze_resnet_block_indeces: - 3 unfreeze_fc: true lr: 0.0002 data: data_dir: "/home/figes/Downloads/ILSVRC2012_CLS-LOC/" image_size: 224 num_workers: 8 batch_size: 512 shuffle: true pin_memory: true drop_last: false ```
Berzemu/Coco
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy model-index: - name: LinearProbing01 results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - name: Accuracy type: accuracy value: 0.83152 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # LinearProbing01 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.4013 - Accuracy: 0.8315 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.4682 | 1.0 | 1563 | 0.4316 | 0.8251 | | 0.4202 | 2.0 | 3126 | 0.4013 | 0.8315 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.2
BhanuSama/gpt2-finetuned-xsum
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - coreml - stable-diffusion - text-to-image --- # Core ML Converted Model: - This model was converted to [Core ML for use on Apple Silicon devices](https://github.com/apple/ml-stable-diffusion). Conversion instructions can be found [here](https://github.com/godly-devotion/MochiDiffusion/wiki/How-to-convert-ckpt-or-safetensors-files-to-Core-ML).<br> - Provide the model to an app such as **Mochi Diffusion** [Github](https://github.com/godly-devotion/MochiDiffusion) - [Discord](https://discord.gg/x2kartzxGv) to generate images.<br> - `split_einsum` version is compatible with all compute unit options including Neural Engine. - `original` version is only compatible with `CPU & GPU` option. - Custom resolution versions are tagged accordingly. - The `vae-ft-mse-840000-ema-pruned.ckpt` vae is embedded into the model. - This model was converted with a `vae-encoder` for `image2image`. - This model is `fp16`. - Descriptions are posted as-is from original model source. - Not all features and/or results may be available in CoreML format. - This model does not have the [unet split into chunks](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml). - This model does not include a safety checker (for NSFW content). # floralPatterns-v10: Source(s): [Hugging Face](https://huggingface.co/Mobius-labs/floral_pattern) - [CivitAI](https://civitai.com/models/26295/floral-patterns) This model was trained to generate floral patterns. Join our [Discord](https://discord.gg/qM84dzKC) for any questions or feedback! **Prompting** Use the word **"pattern"** to trigger the style. You can also use negative prompt to get rid of unwanted colors if the positive prompt isn't providing enough control. Example prompts A yellow and pink pattern with flowers on white background A yellow and pink pattern with birds and flowers on white background A black and white pattern of butterflies in a jungle The example images were all generated with fairly simple prompts, but feel free to experiment with more complex prompts as well as negative prompts. **Settings** Resolution must be **768 x 768** for best results Sample images were generated with CFG = 11 and 75 inference steps. Inference settings otherwise shouldn’t matter too much. ![image](https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/a04a8d31-f055-40f6-d152-af2811715700/width=400/358250) ![image](https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/3d669764-52ec-411f-5f8a-9252f3e83900/width=400/358288) ![image](https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/549c6d86-5576-4d67-6eca-5c6cc4106b00/width=400/358251)
Bharathdamu/wav2vec2-large-xls-r-300m-hindi2-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 712.50 +/- 265.04 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga vijmeister -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga vijmeister -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga vijmeister ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Bharathdamu/wav2vec2-model-hindi-stt
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: bigscience-openrail-m base_model: riffusion/riffusion-model-v1 datasets: - rxk/MC_caption language: - en tags: - riffusion ---
Bia18/Beatriz
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: CIS6930_DAAGR_T5_Emo results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # CIS6930_DAAGR_T5_Emo This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3253 - Train Accuracy: 0.9647 - Validation Loss: 0.4468 - Validation Accuracy: 0.9495 - Epoch: 19 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.4976 | 0.9412 | 0.4567 | 0.9459 | 0 | | 0.4359 | 0.9482 | 0.4462 | 0.9474 | 1 | | 0.4228 | 0.9502 | 0.4406 | 0.9484 | 2 | | 0.4131 | 0.9517 | 0.4370 | 0.9488 | 3 | | 0.4050 | 0.9528 | 0.4349 | 0.9493 | 4 | | 0.3981 | 0.9539 | 0.4335 | 0.9496 | 5 | | 0.3914 | 0.9548 | 0.4327 | 0.9498 | 6 | | 0.3851 | 0.9558 | 0.4328 | 0.9500 | 7 | | 0.3794 | 0.9565 | 0.4328 | 0.9501 | 8 | | 0.3738 | 0.9574 | 0.4321 | 0.9502 | 9 | | 0.3685 | 0.9582 | 0.4328 | 0.9502 | 10 | | 0.3632 | 0.9589 | 0.4340 | 0.9502 | 11 | | 0.3582 | 0.9597 | 0.4343 | 0.9501 | 12 | | 0.3531 | 0.9605 | 0.4363 | 0.9501 | 13 | | 0.3482 | 0.9612 | 0.4381 | 0.9501 | 14 | | 0.3436 | 0.9619 | 0.4390 | 0.9500 | 15 | | 0.3391 | 0.9626 | 0.4396 | 0.9500 | 16 | | 0.3340 | 0.9633 | 0.4438 | 0.9499 | 17 | | 0.3297 | 0.9640 | 0.4454 | 0.9498 | 18 | | 0.3253 | 0.9647 | 0.4468 | 0.9495 | 19 | ### Framework versions - Transformers 4.27.4 - TensorFlow 2.11.0 - Datasets 2.11.0 - Tokenizers 0.13.2
Biasface/DDDC
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- license: apache-2.0 --- | Model Name | Parameters | Class | Ratio | Tokens | Batch Size (Tokens) | Training Loss | | --- | --- | --- | --- | --- | --- | --- | | GerbilLab/Gerbil-A-3.3m | 3.3m | A-Class | 20 | 60M | 65.5k | 6.664400 |
Biasface/DDDC2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: david3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 17.20 +/- 5.10 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
BigSalmon/BertaMyWorda
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 --- Out repository [flan-alpaca-lora](https://github.com/Reason-Wang/flan-alpaca-lora) contains the details to train flan-t5 with [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) instructions and [low-rank adaptation](https://arxiv.org/abs/2106.09685). You can use the following code easily. Usage: ```python import transformers from peft import PeftModel model_name = "google/flan-t5-large"; peft_model_id = "reasonwang/flan-alpaca-lora-large" tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) base_model = transformers.AutoModelForSeq2SeqLM.from_pretrained(model_name) peft_model = PeftModel.from_pretrained(base_model, peft_model_id) inputs = tokenizer("List a few tips to get good scores in math.", return_tensors="pt") outputs = peft_model.generate(**inputs, max_length=128, do_sample=True) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ```
BigSalmon/BestMask2
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: david4 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 121.70 +/- 5.80 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
BigSalmon/BlankSlots
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
4
null
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: en datasets: - lmqg/qg_squad pipeline_tag: text2text-generation tags: - question generation widget: - text: "<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 1" - text: "Beyonce further expanded her acting career, starring as blues singer <hl> Etta James <hl> in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 2" - text: "Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, <hl> Cadillac Records <hl> ." example_title: "Question Generation Example 3" model-index: - name: vocabtrimmer/mt5-small-trimmed-en-15000-squad-qg results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_squad type: default args: default metrics: - name: BLEU4 (Question Generation) type: bleu4_question_generation value: 22.43 - name: ROUGE-L (Question Generation) type: rouge_l_question_generation value: 49.7 - name: METEOR (Question Generation) type: meteor_question_generation value: 24.33 - name: BERTScore (Question Generation) type: bertscore_question_generation value: 89.95 - name: MoverScore (Question Generation) type: moverscore_question_generation value: 63.14 --- # Model Card of `vocabtrimmer/mt5-small-trimmed-en-15000-squad-qg` This model is fine-tuned version of [ckpts/mt5-small-trimmed-en-15000](https://huggingface.co/ckpts/mt5-small-trimmed-en-15000) for question generation task on the [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [ckpts/mt5-small-trimmed-en-15000](https://huggingface.co/ckpts/mt5-small-trimmed-en-15000) - **Language:** en - **Training data:** [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="en", model="vocabtrimmer/mt5-small-trimmed-en-15000-squad-qg") # model prediction questions = model.generate_q(list_context="William Turner was an English painter who specialised in watercolour landscapes", list_answer="William Turner") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "vocabtrimmer/mt5-small-trimmed-en-15000-squad-qg") output = pipe("<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.") ``` ## Evaluation - ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-15000-squad-qg/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_squad.default.json) | | Score | Type | Dataset | |:-----------|--------:|:--------|:---------------------------------------------------------------| | BERTScore | 89.95 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_1 | 54.49 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_2 | 38.22 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_3 | 28.87 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_4 | 22.43 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | METEOR | 24.33 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | MoverScore | 63.14 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | ROUGE_L | 49.7 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_squad - dataset_name: default - input_types: paragraph_answer - output_types: question - prefix_types: None - model: ckpts/mt5-small-trimmed-en-15000 - max_length: 512 - max_length_output: 32 - epoch: 13 - batch: 16 - lr: 0.001 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 4 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-15000-squad-qg/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
BigSalmon/FormalBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy model-index: - name: FineTuning01 results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - name: Accuracy type: accuracy value: 0.93028 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # FineTuning01 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2349 - Accuracy: 0.9303 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2333 | 1.0 | 1563 | 0.1971 | 0.9241 | | 0.1537 | 2.0 | 3126 | 0.2349 | 0.9303 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.2
BigSalmon/FormalBerta3
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: cc-by-sa-4.0 datasets: - mc4 language: - ja - en tags: - t5 - text2text-generation - seq2seq --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This is a small T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese and English corpus. This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details * [Wikipedia](https://ja.wikipedia.org)の日本語ダンプデータ (2020年7月6日時点のもの) * [OSCAR](https://oscar-corpus.com)の日本語コーパス * [CC-100](http://data.statmt.org/cc-100/)の日本語コーパス このモデルは事前学習のみを行なったものであり、特定のタスクに利用するにはファインチューニングする必要があります。 本モデルにも、大規模コーパスを用いた言語モデルにつきまとう、学習データの内容の偏りに由来する偏った(倫理的ではなかったり、有害だったり、バイアスがあったりする)出力結果になる問題が潜在的にあります。 この問題が発生しうることを想定した上で、被害が発生しない用途にのみ利用するよう気をつけてください。 SentencePieceトークナイザーの学習には上記Wikipediaの全データを用いました。 ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
BigSalmon/FormalRobertaaa
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: h_size_2 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 462.00 +/- 114.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
BigSalmon/GPT2HardArticleEasyArticle
[ "pytorch", "jax", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 --- | Model Name | Parameters | Class | Ratio | Tokens | Batch Size (Tokens) | Training Loss | | --- | --- | --- | --- | --- | --- | --- | | GerbilLab/Gerbil-D-6.7m | 6.7m | D-Class | 142 | 951M | 131k | 4.8186 |
BigSalmon/GPTNeo350MInformalToFormalLincoln
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - autotrain - summarization language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - xiaoql/autotrain-data-dialogsum4 co2_eq_emissions: emissions: 10.855487054586101 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 45048113162 - CO2 Emissions (in grams): 10.8555 ## Validation Metrics - Loss: 0.959 - Rouge1: 82.819 - Rouge2: 65.516 - RougeL: 82.525 - RougeLsum: 82.825 - Gen Len: 131.487 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/xiaoql/autotrain-dialogsum4-45048113162 ```
BigSalmon/InformalToFormalLincoln17
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: CIS6930_DAAGR_T5_NoEmo results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # CIS6930_DAAGR_T5_NoEmo This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3368 - Train Accuracy: 0.9629 - Validation Loss: 0.4438 - Validation Accuracy: 0.9496 - Epoch: 17 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.5062 | 0.9405 | 0.4590 | 0.9454 | 0 | | 0.4381 | 0.9479 | 0.4477 | 0.9472 | 1 | | 0.4249 | 0.9499 | 0.4423 | 0.9481 | 2 | | 0.4152 | 0.9513 | 0.4386 | 0.9486 | 3 | | 0.4071 | 0.9525 | 0.4365 | 0.9490 | 4 | | 0.4000 | 0.9535 | 0.4349 | 0.9493 | 5 | | 0.3935 | 0.9545 | 0.4338 | 0.9496 | 6 | | 0.3876 | 0.9553 | 0.4337 | 0.9498 | 7 | | 0.3816 | 0.9562 | 0.4338 | 0.9498 | 8 | | 0.3763 | 0.9571 | 0.4343 | 0.9499 | 9 | | 0.3708 | 0.9578 | 0.4338 | 0.9500 | 10 | | 0.3657 | 0.9586 | 0.4357 | 0.9498 | 11 | | 0.3605 | 0.9593 | 0.4355 | 0.9500 | 12 | | 0.3556 | 0.9601 | 0.4370 | 0.9499 | 13 | | 0.3507 | 0.9608 | 0.4380 | 0.9499 | 14 | | 0.3463 | 0.9615 | 0.4397 | 0.9498 | 15 | | 0.3413 | 0.9622 | 0.4427 | 0.9496 | 16 | | 0.3368 | 0.9629 | 0.4438 | 0.9496 | 17 | ### Framework versions - Transformers 4.27.4 - TensorFlow 2.11.0 - Datasets 2.11.0 - Tokenizers 0.13.2
BigSalmon/InformalToFormalLincoln25
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
Access to model satwikapaul/aicamp_nlpcrashcourse is restricted and you are not in the authorized list. Visit https://huggingface.co/satwikapaul/aicamp_nlpcrashcourse to ask for access.
BigSalmon/T52
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
8
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: gamma_0_05_fail results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 9.20 +/- 0.60 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Blazeolmo/Scrabunzi
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T03:31:11Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 761.72 +/- 57.34 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BritishLibraryLabs/bl-books-genre
[ "pytorch", "distilbert", "text-classification", "multilingual", "dataset:blbooksgenre", "transformers", "genre", "books", "library", "historic", "glam ", "lam", "license:mit", "has_space" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
76
null
--- license: wtfpl language: - en pipeline_tag: text-generation tags: - llama library_name: adapter-transformers --- # Alpaca Native Enhanced 7B model download for Alpaca.cpp, Llama.cpp, and Dalai Use this command to run with llama.cpp ```sh main -m models/ANE-7B/ggml-model-q4_1.bin -n -1 --ctx_size 2048 --batch_size 16 --keep 512 --repeat_penalty 1.0 -t 16 --temp 0.4 --top_k 30 --top_p 0.18 --interactive-first -ins --color -i -r "User:" -f prompts/alpacanativeenhanced.txt ``` contents of `prompts/alpacanativeenhanced.txt` should be ```txt You are an AI language model designed to assist the User by answering their questions, offering advice, and engaging in casual conversation in a friendly, helpful, and informative manner. You respond clearly, coherently, and you consider the conversation history. User: Hey, how's it going? Assistant: Hey there! I'm doing great, thank you. What can I help you with today? Let's have a fun chat! ``` Original model https://huggingface.co/8bit-coder/alpaca-7b-nativeEnhanced
Broadus20/DialoGPT-small-joshua
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-03-30T04:10:01Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 708.50 +/- 262.60 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Hourai -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Hourai -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga Hourai ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Bubb-les/DisloGPT-medium-HarryPotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-03-30T04:22:59Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="matank/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
2023-03-30T04:51:43Z
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: uyenvuong/super-cool-model 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
18
null
title: Aitrial emoji: 🚀 colorFrom: red colorTo: gray sdk: gradio sdk_version: 3.19.1 app_file: app.py pinned: false
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,862
null
--- license: apache-2.0 --- | Model Name | Parameters | Class | Ratio | Tokens | Batch Size (Tokens) | Training Loss | | --- | --- | --- | --- | --- | --- | --- | | GerbilLab/GerbilBlender-A-3.3m | 3.3m | A-Class | 20 | 60M | 65.5k | 6.7417 | "Blender" models, inspired by UL2 pretraining, are trained equally in fill-in-the-middle, causal modelling, and masked language modelling tasks. Special tokens for these models include: ``` '<fitm_start>', '<multiple_tok_mask>', '<fitm_result>', '<causal>', '<mlm_start>', '<single_tok_mask>', '<mlm_end>' # Example fill in the middle '<fitm_start> this is an <multiple_tok_mask> for fill-in-the-middle <fitm_result> example text <|endoftext|>' # Example causal language modelling '<causal> this is an example text for causal language modelling <|endoftext|>' # Example masked language modelling '<mlm_start> this is an <single_tok_mask> text for masked language modelling <mlm_end> example <|endoftext|>' ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-madar-twitter5
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
75
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: jamesimmanuel/SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-nadi
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
null
--- license: apache-2.0 --- | Model Name | Parameters | Class | Ratio | Tokens | Batch Size (Tokens) | Training Loss | | --- | --- | --- | --- | --- | --- | --- | | GerbilLab/Gerbil-D-3.3m | 3.3m | D-Class | 142 | 426M | 65.5k | 5.3307 |
CAMeL-Lab/bert-base-arabic-camelbert-msa-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: my_awesome_billsum_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_billsum_model This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1538 - Rouge1: 0.1789 - Rouge2: 0.1075 - Rougel: 0.1585 - Rougelsum: 0.1584 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 0.2372 | 1.0 | 1635 | 0.1729 | 0.1764 | 0.1032 | 0.1554 | 0.1553 | 19.0 | | 0.2077 | 2.0 | 3270 | 0.1602 | 0.1774 | 0.1054 | 0.1569 | 0.1567 | 19.0 | | 0.197 | 3.0 | 4905 | 0.1550 | 0.1788 | 0.1073 | 0.1584 | 0.1583 | 19.0 | | 0.1924 | 4.0 | 6540 | 0.1538 | 0.1789 | 0.1075 | 0.1585 | 0.1584 | 19.0 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: Prgrg/ja-en-dataset-v3.0-subset-v3.0 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Prgrg/ja-en-dataset-v3.0-subset-v3.0 This model is a fine-tuned version of [Prgrg/ja-en-dataset-v3.0-subset-v2.0](https://huggingface.co/Prgrg/ja-en-dataset-v3.0-subset-v2.0) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.1261 - Validation Loss: 1.1683 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': 0.0, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'LinearWarmupCosineSchedule', 'config': {'initial_learning_rate': 0.0001, 'num_warmup_steps': 28181, 'num_training_steps': 281814}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 1.1261 | 1.1683 | 0 | ### Framework versions - Transformers 4.27.3 - TensorFlow 2.11.0 - Datasets 2.10.1 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-msa-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
574
2023-03-30T05:47:39Z
--- model-index: - name: twitter-roberta-base-hate-latest results: [] pipeline_tag: text-classification --- # cardiffnlp/twitter-roberta-base-hate-latest This model is a fine-tuned version of [cardiffnlp/twitter-roberta-base-2022-154m](https://huggingface.co/cardiffnlp/twitter-roberta-base-2022-154m) for binary hate-speech classification. A combination of 13 different hate-speech datasets in the English language were used to fine-tune the model. ## Following metrics are achieved | **Dataset** | **Accuracy** | **Macro-F1** | **Weighted-F1** | |------------------------------------------------------------------------------------------------------------------------------------------------------|:------------:|:------------:|:---------------:| | hatEval, SemEval-2019 Task 5: Multilingual Detection of Hate Speech Against Immigrants and Women in Twitter | 0.5848 | 0.5657 | 0.5514 | | ucberkeley-dlab/measuring-hate-speech | 0.8706 | 0.8531 | 0.8701 | | Detecting East Asian Prejudice on Social Media | 0.9276 | 0.8935 | 0.9273 | | Call me sexist, but | 0.9033 | 0.6288 | 0.8852 | | Predicting the Type and Target of Offensive Posts in Social Media | 0.9075 | 0.5984 | 0.8935 | | HateXplain | 0.9594 | 0.8024 | 0.9600 | | Large Scale Crowdsourcing and Characterization of Twitter Abusive BehaviorLarge Scale Crowdsourcing and Characterization of Twitter Abusive Behavior | 0.6817 | 0.5939 | 0.6233 | | Twitter Sentiment Analysis | 0.9808 | 0.9258 | 0.9807 | | Overview of the HASOC track at FIRE 2019:Hate Speech and Offensive Content Identification in Indo-European Languages | 0.8665 | 0.5562 | 0.8343 | | Hateful Symbols or Hateful People? Predictive Features for Hate Speech Detection on Twitter | 0.9465 | 0.8557 | 0.9440 | | Automated Hate Speech Detection and the Problem of Offensive Language | 0.9116 | 0.8797 | 0.9100 | | Hateful Symbols or Hateful People? Predictive Features for Hate Speech Detection on Twitter | 0.8378 | 0.8338 | 0.8385 | | Multilingual and Multi-Aspect Hate Speech Analysis | 0.9655 | 0.4912 | 0.9824 | | **Overall** | **0.8827** | **0.8383** | **0.8842** | ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/twitter-roberta-base-hate-latest") model.predict('I love everybody :)') >> {'label': 'NOT-HATE'} ```
CAUKiel/JavaBERT
[ "pytorch", "safetensors", "bert", "fill-mask", "code", "arxiv:2110.10404", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
388
2023-03-30T05:54:34Z
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: omarcevi/ppo-Pyramids_Training 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CL/safe-math-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T05:55:41Z
--- tags: - Abe - Shinzo - AbeShinzo - Former Japanese Prime Minister language: - ja ---
CLAck/en-km
[ "pytorch", "marian", "text2text-generation", "transformers", "translation", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-03-30T05:55:45Z
--- model-index: - name: twitter-roberta-base-hate-latest results: [] pipeline_tag: text-classification --- # cardiffnlp/twitter-roberta-base-hate-multiclass-latest This model is a fine-tuned version of [cardiffnlp/twitter-roberta-base-2022-154m](https://huggingface.co/cardiffnlp/twitter-roberta-base-2022-154m) for multiclass hate-speech classification. A combination of 13 different hate-speech datasets in the English language were used to fine-tune the model. ## Classes available ``` {'racism': 0, 'sexism': 1, 'religion': 2, 'sexual_orientation': 3, 'disability': 4, 'origin': 5, 'not_hate': 6} ``` ## Following metrics are achieved * Accuracy: 0.9419 * Macro-F1: 0.5752 * Weighted-F1: 0.9390 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/twitter-roberta-base-hate-latest") model.predict('Women are trash 2.') >> {'label': 'sexism'} model.predict('@user dear mongoloid respect sentiments & belief refrain totalitarianism. @user') >> {'label': 'disability'} ```
CLAck/indo-mixed
[ "pytorch", "marian", "text2text-generation", "en", "id", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
2023-03-30T05:58:26Z
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: en datasets: - lmqg/qg_squad pipeline_tag: text2text-generation tags: - question generation widget: - text: "<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 1" - text: "Beyonce further expanded her acting career, starring as blues singer <hl> Etta James <hl> in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 2" - text: "Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, <hl> Cadillac Records <hl> ." example_title: "Question Generation Example 3" model-index: - name: vocabtrimmer/mt5-small-trimmed-en-10000-squad-qg results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_squad type: default args: default metrics: - name: BLEU4 (Question Generation) type: bleu4_question_generation value: 22.57 - name: ROUGE-L (Question Generation) type: rouge_l_question_generation value: 49.67 - name: METEOR (Question Generation) type: meteor_question_generation value: 24.39 - name: BERTScore (Question Generation) type: bertscore_question_generation value: 89.92 - name: MoverScore (Question Generation) type: moverscore_question_generation value: 63.17 --- # Model Card of `vocabtrimmer/mt5-small-trimmed-en-10000-squad-qg` This model is fine-tuned version of [ckpts/mt5-small-trimmed-en-10000](https://huggingface.co/ckpts/mt5-small-trimmed-en-10000) for question generation task on the [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [ckpts/mt5-small-trimmed-en-10000](https://huggingface.co/ckpts/mt5-small-trimmed-en-10000) - **Language:** en - **Training data:** [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="en", model="vocabtrimmer/mt5-small-trimmed-en-10000-squad-qg") # model prediction questions = model.generate_q(list_context="William Turner was an English painter who specialised in watercolour landscapes", list_answer="William Turner") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "vocabtrimmer/mt5-small-trimmed-en-10000-squad-qg") output = pipe("<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.") ``` ## Evaluation - ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-10000-squad-qg/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_squad.default.json) | | Score | Type | Dataset | |:-----------|--------:|:--------|:---------------------------------------------------------------| | BERTScore | 89.92 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_1 | 54.56 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_2 | 38.4 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_3 | 29.05 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_4 | 22.57 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | METEOR | 24.39 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | MoverScore | 63.17 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | ROUGE_L | 49.67 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_squad - dataset_name: default - input_types: paragraph_answer - output_types: question - prefix_types: None - model: ckpts/mt5-small-trimmed-en-10000 - max_length: 512 - max_length_output: 32 - epoch: 13 - batch: 16 - lr: 0.001 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 4 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-10000-squad-qg/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
CLAck/vi-en
[ "pytorch", "marian", "text2text-generation", "en", "vi", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Please use prompt: comic style side face portrait of davide-comic-book-characters Dreambooth model trained by Alexwww with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook This AI is trained with @puppopages (instagram) charater illustration work Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept: ![0](https://huggingface.co/Alexwww/davide-comic-book-characters/resolve/main/sample_images/Screenshot_2023-03-29_at_11.01.19_PM.png)
CLS/WubiBERT_models
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T06:11:30Z
--- license: apache-2.0 --- | Model Name | Parameters | Class | Ratio | Tokens | Batch Size (Tokens) | Training Loss | | --- | --- | --- | --- | --- | --- | --- | | [GerbilLab/GerbilBlender-A-6.7m](https://hf.co/GerbilLab/GerbilBlender-A-6.7m) | 6.7m | A-Class | 20 | 134M | 131k | 6.0908 | "Blender" models, inspired by UL2 pretraining, are trained equally in fill-in-the-middle, causal modelling, and masked language modelling tasks. Special tokens for these models include: ``` '<fitm_start>', '<multiple_tok_mask>', '<fitm_result>', '<causal>', '<mlm_start>', '<single_tok_mask>', '<mlm_end>' # Example fill in the middle '<fitm_start> this is an <multiple_tok_mask> for fill-in-the-middle <fitm_result> example text <|endoftext|>' # Example causal language modelling '<causal> this is an example text for causal language modelling <|endoftext|>' # Example masked language modelling '<mlm_start> this is an <single_tok_mask> text for masked language modelling <mlm_end> example <|endoftext|>' ```
CLTL/MedRoBERTa.nl
[ "pytorch", "roberta", "fill-mask", "nl", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,988
null
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: en datasets: - lmqg/qg_squad pipeline_tag: text2text-generation tags: - question generation widget: - text: "<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 1" - text: "Beyonce further expanded her acting career, starring as blues singer <hl> Etta James <hl> in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 2" - text: "Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, <hl> Cadillac Records <hl> ." example_title: "Question Generation Example 3" model-index: - name: vocabtrimmer/mt5-small-trimmed-en-5000-squad-qg results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_squad type: default args: default metrics: - name: BLEU4 (Question Generation) type: bleu4_question_generation value: 22.84 - name: ROUGE-L (Question Generation) type: rouge_l_question_generation value: 50.34 - name: METEOR (Question Generation) type: meteor_question_generation value: 24.75 - name: BERTScore (Question Generation) type: bertscore_question_generation value: 90.01 - name: MoverScore (Question Generation) type: moverscore_question_generation value: 63.43 --- # Model Card of `vocabtrimmer/mt5-small-trimmed-en-5000-squad-qg` This model is fine-tuned version of [ckpts/mt5-small-trimmed-en-5000](https://huggingface.co/ckpts/mt5-small-trimmed-en-5000) for question generation task on the [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [ckpts/mt5-small-trimmed-en-5000](https://huggingface.co/ckpts/mt5-small-trimmed-en-5000) - **Language:** en - **Training data:** [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="en", model="vocabtrimmer/mt5-small-trimmed-en-5000-squad-qg") # model prediction questions = model.generate_q(list_context="William Turner was an English painter who specialised in watercolour landscapes", list_answer="William Turner") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "vocabtrimmer/mt5-small-trimmed-en-5000-squad-qg") output = pipe("<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.") ``` ## Evaluation - ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-5000-squad-qg/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_squad.default.json) | | Score | Type | Dataset | |:-----------|--------:|:--------|:---------------------------------------------------------------| | BERTScore | 90.01 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_1 | 55.13 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_2 | 38.84 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_3 | 29.4 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_4 | 22.84 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | METEOR | 24.75 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | MoverScore | 63.43 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | ROUGE_L | 50.34 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_squad - dataset_name: default - input_types: paragraph_answer - output_types: question - prefix_types: None - model: ckpts/mt5-small-trimmed-en-5000 - max_length: 512 - max_length_output: 32 - epoch: 15 - batch: 16 - lr: 0.001 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 4 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-5000-squad-qg/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
CLTL/icf-domains
[ "pytorch", "roberta", "nl", "transformers", "license:mit", "text-classification" ]
text-classification
{ "architectures": [ "RobertaForMultiLabelSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
2023-03-30T06:13:30Z
--- tags: - autotrain - translation language: - unk - unk datasets: - zhaozh/autotrain-data-further-train-chatdoctor co2_eq_emissions: emissions: 0.6371864100333517 --- # Model Trained Using AutoTrain - Problem type: Translation - Model ID: 45099113239 - CO2 Emissions (in grams): 0.6372 ## Validation Metrics - Loss: 1.468 - SacreBLEU: 27.918 - Gen len: 74.588
CLTL/icf-levels-adm
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep7_lr5_v1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep7_lr5_v1 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2884 - Precision: 0.6752 - Recall: 0.6436 - F1: 0.6590 - Accuracy: 0.9415 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-09 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 7 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.2972 | 0.6754 | 0.6363 | 0.6553 | 0.9413 | | 0.2981 | 2.0 | 934 | 0.2939 | 0.6742 | 0.6387 | 0.6560 | 0.9413 | | 0.2882 | 3.0 | 1401 | 0.2915 | 0.6747 | 0.6409 | 0.6574 | 0.9414 | | 0.2913 | 4.0 | 1868 | 0.2898 | 0.6744 | 0.6422 | 0.6579 | 0.9414 | | 0.2852 | 5.0 | 2335 | 0.2889 | 0.6741 | 0.6425 | 0.6579 | 0.9415 | | 0.2883 | 6.0 | 2802 | 0.2885 | 0.6752 | 0.6436 | 0.6590 | 0.9415 | | 0.2819 | 7.0 | 3269 | 0.2884 | 0.6752 | 0.6436 | 0.6590 | 0.9415 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
CLTL/icf-levels-ins
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="sudeepsharma/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
CLTL/icf-levels-stm
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- language: - am tags: - speech - audio - audio-classification - hubert pipeline_tag: audio-classification --- model_name_or_path = "quaja/hubert-base-amharic-speech-emotion-recognition" config = AutoConfig.from_pretrained(model_name_or_path) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name_or_path) sampling_rate = feature_extractor.sampling_rate model = HubertForSpeechClassification.from_pretrained(model_name_or_path)
CM-CA/Cartman
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T06:27:00Z
--- tags: - generated_from_trainer datasets: - afrispeech-200 model-index: - name: whisper-small-hi-2400_500_120 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-hi-2400_500_120 This model is a fine-tuned version of [saif-daoud/whisper-small-hi-2400_500_103](https://huggingface.co/saif-daoud/whisper-small-hi-2400_500_103) on the afrispeech-200 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - training_steps: 300 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
CSZay/bart
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: en datasets: - lmqg/qg_squad pipeline_tag: text2text-generation tags: - question generation widget: - text: "<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 1" - text: "Beyonce further expanded her acting career, starring as blues singer <hl> Etta James <hl> in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 2" - text: "Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, <hl> Cadillac Records <hl> ." example_title: "Question Generation Example 3" model-index: - name: vocabtrimmer/mt5-small-trimmed-en-120000-squad-qg results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_squad type: default args: default metrics: - name: BLEU4 (Question Generation) type: bleu4_question_generation value: 21.36 - name: ROUGE-L (Question Generation) type: rouge_l_question_generation value: 48.53 - name: METEOR (Question Generation) type: meteor_question_generation value: 23.58 - name: BERTScore (Question Generation) type: bertscore_question_generation value: 89.82 - name: MoverScore (Question Generation) type: moverscore_question_generation value: 62.55 --- # Model Card of `vocabtrimmer/mt5-small-trimmed-en-120000-squad-qg` This model is fine-tuned version of [ckpts/mt5-small-trimmed-en-120000](https://huggingface.co/ckpts/mt5-small-trimmed-en-120000) for question generation task on the [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [ckpts/mt5-small-trimmed-en-120000](https://huggingface.co/ckpts/mt5-small-trimmed-en-120000) - **Language:** en - **Training data:** [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="en", model="vocabtrimmer/mt5-small-trimmed-en-120000-squad-qg") # model prediction questions = model.generate_q(list_context="William Turner was an English painter who specialised in watercolour landscapes", list_answer="William Turner") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "vocabtrimmer/mt5-small-trimmed-en-120000-squad-qg") output = pipe("<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.") ``` ## Evaluation - ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-120000-squad-qg/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_squad.default.json) | | Score | Type | Dataset | |:-----------|--------:|:--------|:---------------------------------------------------------------| | BERTScore | 89.82 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_1 | 53.3 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_2 | 36.91 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_3 | 27.65 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_4 | 21.36 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | METEOR | 23.58 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | MoverScore | 62.55 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | ROUGE_L | 48.53 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_squad - dataset_name: default - input_types: paragraph_answer - output_types: question - prefix_types: None - model: ckpts/mt5-small-trimmed-en-120000 - max_length: 512 - max_length_output: 32 - epoch: 5 - batch: 16 - lr: 0.001 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 4 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-120000-squad-qg/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
Calamarii/calamari
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- datasets: - deepset/germanquad - mlqa - xquad language: - de pipeline_tag: question-answering ---
Cameron/BERT-SBIC-targetcategory
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: Galeros/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Cameron/BERT-jigsaw-severetoxic
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
Access to model basimbashir/gpt-j-6B-8bit is restricted and you are not in the authorized list. Visit https://huggingface.co/basimbashir/gpt-j-6B-8bit to ask for access.
Camzure/MaamiBot-test
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep6_lr1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep6_lr1 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1352 - Precision: 0.8654 - Recall: 0.8689 - F1: 0.8671 - Accuracy: 0.9759 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.1009 | 0.7821 | 0.8764 | 0.8266 | 0.9665 | | 0.1009 | 2.0 | 934 | 0.0870 | 0.8476 | 0.8643 | 0.8559 | 0.9740 | | 0.0521 | 3.0 | 1401 | 0.1020 | 0.8789 | 0.8497 | 0.8641 | 0.9746 | | 0.0327 | 4.0 | 1868 | 0.1236 | 0.8707 | 0.8649 | 0.8678 | 0.9752 | | 0.0175 | 5.0 | 2335 | 0.1290 | 0.8669 | 0.8700 | 0.8685 | 0.9759 | | 0.0109 | 6.0 | 2802 | 0.1352 | 0.8654 | 0.8689 | 0.8671 | 0.9759 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Camzure/MaamiBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T06:56:21Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="halil93ibrahim/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CasualHomie/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2023-03-30T07:23:46Z
--- license: gpl-3.0 tags: - DocVQA - Document Question Answering - Document Visual Question Answering datasets: - rubentito/mp-docvqa language: - en --- # LongT5 base with transient-global attention fine-tuned on MP-DocVQA This is LongT5 trained on SQuADv2, CoQA and TryoCoQA datasets from [Tryolabs hub](https://huggingface.co/tryolabs/long-t5-tglobal-base-blogpost-cqa-onnx), and fine-tuned on Multipage DocVQA (MP-DocVQA) dataset. ## How to use Here is how to use this model to get the features of a given text in PyTorch: ```python import torch from transformers import AutoTokenizer, LongT5ForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("rubentito/longt5-tglobal-base-mpdocvqa") model = LongT5ForConditionalGeneration.from_pretrained("rubentito/longt5-tglobal-base-mpdocvqa") context = "Huggingface has democratized NLP. Huge thanks to Huggingface for this." question = "What has Huggingface done?" input_text = "question: {:s} context: {:s}".format(question, context) encoding = tokenizer(input_text, return_tensors="pt") output = self.model.generate(**encoding) answer = tokenizer.decode(output['sequences'], skip_special_tokens=True) ``` ## Metrics **Average Normalized Levenshtein Similarity (ANLS)** The standard metric for text-based VQA tasks (ST-VQA and DocVQA). It evaluates the method's reasoning capabilities while smoothly penalizes OCR recognition errors. Check [Scene Text Visual Question Answering](https://arxiv.org/abs/1905.13648) for detailed information. **Answer Page Prediction Accuracy (APPA)** In the MP-DocVQA task, the models can provide the index of the page where the information required to answer the question is located. For this subtask accuracy is used to evaluate the predictions: i.e. if the predicted page is correct or not. Check [Hierarchical multimodal transformers for Multi-Page DocVQA](https://arxiv.org/abs/2212.05935) for detailed information. ## Model results Extended experimentation can be found in Table 2 of [Hierarchical multimodal transformers for Multi-Page DocVQA](https://arxiv.org/pdf/2212.05935.pdf). You can also check the live leaderboard at the [RRC Portal](https://rrc.cvc.uab.es/?ch=17&com=evaluation&task=4). | Model | HF name | Parameters | ANLS | APPA | |-----------------------------------------------------------------------------------|:--------------------------------------|:-------------:|:-------------:|:---------:| | [Bert large](https://huggingface.co/rubentito/bert-large-mpdocvqa) | rubentito/bert-large-mpdocvqa | 334M | 0.4183 | 51.6177 | | [Longformer base](https://huggingface.co/rubentito/longformer-base-mpdocvqa) | rubentito/longformer-base-mpdocvqa | 148M | 0.5287 | 71.1696 | | [BigBird ITC base](https://huggingface.co/rubentito/bigbird-base-itc-mpdocvqa) | rubentito/bigbird-base-itc-mpdocvqa | 131M | 0.4929 | 67.5433 | | [LayoutLMv3 base](https://huggingface.co/rubentito/layoutlmv3-base-mpdocvqa) | rubentito/layoutlmv3-base-mpdocvqa | 125M | 0.4538 | 51.9426 | | [T5 base](https://huggingface.co/rubentito/t5-base-mpdocvqa) | rubentito/t5-base-mpdocvqa | 223M | 0.5050 | 0.0000 | | [Hi-VT5](https://huggingface.co/rubentito/hivt5-base-mpdocvqa) | rubentito/hivt5-base-mpdocvqa | 316M | 0.6201 | 79.23 | ## Citation Information ```tex @article{tito2022hierarchical, title={Hierarchical multimodal transformers for Multi-Page DocVQA}, author={Tito, Rub{\`e}n and Karatzas, Dimosthenis and Valveny, Ernest}, journal={arXiv preprint arXiv:2212.05935}, year={2022} } ```
Cat/Kitty
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T07:26:31Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 234.54 +/- 68.22 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Cathy/reranking_model
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
2023-03-30T07:26:33Z
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 121.60 +/- 29.17 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 1000000 'learning_rate': 0.0006 'num_envs': 32 'num_steps': 1024 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.98 'num_minibatches': 32 'update_epochs': 32 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'yumingyi/lunarlander-v2-unit8' 'batch_size': 32768 'minibatch_size': 1024} ```
Cedille/fr-boris
[ "pytorch", "gptj", "text-generation", "fr", "dataset:c4", "arxiv:2202.03371", "transformers", "causal-lm", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPTJForCausalLM" ], "model_type": "gptj", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
401
2023-03-30T07:27:55Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 267.61 +/- 23.09 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/albert-base-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
null
valDataset consists of GBH / NPR / DemocracyNow! [[ 1284 692] [ 7539 28555]] 0.6497975708502024 0.791128719454757
dccuchile/albert-base-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: balanced-augmented-roberta-gest-pred-seqeval-partialmatch-2 results: [] datasets: - Jsevisal/balanced_augmented_dataset_2 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # balanced-augmented-roberta-gest-pred-seqeval-partialmatch-2 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4965 - Precision: 0.9214 - Recall: 0.9180 - F1: 0.9135 - Accuracy: 0.9012 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 3.0873 | 1.0 | 52 | 2.5606 | 0.1508 | 0.1205 | 0.1095 | 0.3096 | | 2.2599 | 2.0 | 104 | 1.8545 | 0.3409 | 0.3827 | 0.3343 | 0.5265 | | 1.7149 | 3.0 | 156 | 1.4711 | 0.5470 | 0.5222 | 0.4715 | 0.6087 | | 1.3056 | 4.0 | 208 | 1.0879 | 0.6500 | 0.6103 | 0.5886 | 0.6919 | | 0.9978 | 5.0 | 260 | 1.0036 | 0.7039 | 0.6766 | 0.6497 | 0.7221 | | 0.7532 | 6.0 | 312 | 0.7722 | 0.7356 | 0.7552 | 0.7286 | 0.7842 | | 0.5945 | 7.0 | 364 | 0.6766 | 0.8316 | 0.7902 | 0.7790 | 0.8053 | | 0.473 | 8.0 | 416 | 0.5994 | 0.8602 | 0.8248 | 0.8224 | 0.8406 | | 0.3762 | 9.0 | 468 | 0.5572 | 0.8725 | 0.8743 | 0.8600 | 0.8593 | | 0.2943 | 10.0 | 520 | 0.5767 | 0.8893 | 0.8714 | 0.8659 | 0.8593 | | 0.251 | 11.0 | 572 | 0.5480 | 0.8892 | 0.8765 | 0.8667 | 0.8633 | | 0.2074 | 12.0 | 624 | 0.5652 | 0.8960 | 0.8866 | 0.8757 | 0.8714 | | 0.1714 | 13.0 | 676 | 0.5254 | 0.9172 | 0.9087 | 0.9019 | 0.8875 | | 0.1523 | 14.0 | 728 | 0.5788 | 0.9217 | 0.8900 | 0.8918 | 0.8790 | | 0.1309 | 15.0 | 780 | 0.5209 | 0.9205 | 0.9141 | 0.9080 | 0.8961 | | 0.1187 | 16.0 | 832 | 0.5030 | 0.9163 | 0.9138 | 0.9073 | 0.8961 | | 0.1065 | 17.0 | 884 | 0.5449 | 0.9278 | 0.9212 | 0.9153 | 0.8986 | | 0.0923 | 18.0 | 936 | 0.4965 | 0.9214 | 0.9180 | 0.9135 | 0.9012 | | 0.0894 | 19.0 | 988 | 0.5171 | 0.9236 | 0.9189 | 0.9148 | 0.9007 | | 0.0869 | 20.0 | 1040 | 0.5211 | 0.9245 | 0.9214 | 0.9159 | 0.9027 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
dccuchile/albert-base-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 265.43 +/- 20.03 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/albert-xxlarge-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-it results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.it split: validation args: PAN-X.it metrics: - name: F1 type: f1 value: 0.8322368421052632 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-it This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2369 - F1: 0.8322 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.8113 | 1.0 | 70 | 0.3088 | 0.7546 | | 0.259 | 2.0 | 140 | 0.2541 | 0.8155 | | 0.1791 | 3.0 | 210 | 0.2369 | 0.8322 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
dccuchile/albert-base-spanish
[ "pytorch", "tf", "albert", "pretraining", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA" ]
null
{ "architectures": [ "AlbertForPreTraining" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
586
2023-03-30T08:05:10Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice_11_0 metrics: - wer model-index: - name: wav2vec2-large-xlsr-53-Bangla-Common_Voice results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: common_voice_11_0 type: common_voice_11_0 config: bn split: train+validation args: bn metrics: - name: Wer type: wer value: 0.6576650727705051 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53-Bangla-Common_Voice This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice_11_0 dataset. It achieves the following results on the evaluation set: - Loss: 0.6172 - Wer: 0.6577 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 24 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 48 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 2.922 | 4.57 | 800 | 0.7379 | 0.8157 | | 0.5136 | 9.14 | 1600 | 0.6155 | 0.7056 | | 0.2759 | 13.71 | 2400 | 0.6172 | 0.6577 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
dccuchile/albert-large-spanish
[ "pytorch", "tf", "albert", "pretraining", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA" ]
null
{ "architectures": [ "AlbertForPreTraining" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
75
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-en results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.en split: validation args: PAN-X.en metrics: - name: F1 type: f1 value: 0.6991051454138703 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.3926 - F1: 0.6991 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.1415 | 1.0 | 50 | 0.5404 | 0.5163 | | 0.5045 | 2.0 | 100 | 0.4347 | 0.6498 | | 0.371 | 3.0 | 150 | 0.3926 | 0.6991 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
dccuchile/bert-base-spanish-wwm-cased-finetuned-pawsx
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 38.40 +/- 30.62 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
dccuchile/bert-base-spanish-wwm-cased-finetuned-pos
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
dccuchile/distilbert-base-spanish-uncased
[ "pytorch", "distilbert", "fill-mask", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
670
null
# Model card for CoNN Add Carry ### Introduction In paper Neural Comprehension: Language Models with Compiled Neural Networks , we introduced the integration of Compiled Neural Networks (CoNN) into the framework of language models, enabling existing language models to perform symbolic operations with perfect accuracy without the need for external tools. In this model card, we introduce the Add Carry model, which is similar to the Transformer model and can perform carry operations on a sequence of numbers added in parallel. ### Install ``` git clone https://github.com/WENGSYX/Neural-Comprehension cd Neural-Comprehension pip install . ``` To run neural comprehension, you need to install `PyTorch`, `Transformers`, `jax`, and `tracr`. ### How to Use? ``` from NeuralCom.CoNN.modeling_conn import CoNNModel from NeuralCom.CoNN import Tokenizer model = CoNNModel.from_pretrained('WENGSYX/CoNN_Add_Carry') tokenizer = Tokenizer(model.config.input_encoding_map, model.config.output_encoding_map,model.config.max_position_embeddings) output = model(tokenizer('2 15 3 8 10').unsqueeze(0)) print(tokenizer.decode(output.argmax(2))) >>> [['bos', '3', '5', '3', '9', '0']] ``` ### 🙏Cite🙏 ###### If you are interested in our paper, please feel free to cite it. ``` @misc{weng2023neural, title={Neural Comprehension: Language Models with Compiled Neural Networks}, author={Yixuan Weng and Minjun Zhu and Fei Xia and Bin Li and Shizhu He and Kang Liu and Jun Zhao}, year={2023}, eprint={2304.01665}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
Chaewon/mmnt_decoder_en
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
# Vocabulary Trimmed [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg): `vocabtrimmer/mt5-small-squad-qg-trimmed-en-5000` This model is a trimmed version of [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qg | vocabtrimmer/mt5-small-squad-qg-trimmed-en-5000 | |:---------------------------|:--------------------------|:--------------------------------------------------| | parameter_size_full | 300,165,504 | 49,184,128 | | parameter_size_embedding | 256,103,424 | 5,122,048 | | vocab_size | 250,101 | 5,002 | | compression_rate_full | 100.0 | 16.39 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 5000 | 2 |
Chaewon/mnmt_decoder_en_gpt2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: balanced-augmented-roberta-large-gest-pred-seqeval-partialmatch-2 results: [] datasets: - Jsevisal/balanced_augmented_dataset_2 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # balanced-augmented-roberta-large-gest-pred-seqeval-partialmatch-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3718 - Precision: 0.9171 - Recall: 0.9183 - F1: 0.9144 - Accuracy: 0.9173 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 2.8924 | 1.0 | 52 | 2.1510 | 0.3032 | 0.2572 | 0.2464 | 0.4120 | | 1.8492 | 2.0 | 104 | 1.3548 | 0.5880 | 0.5350 | 0.5046 | 0.6440 | | 1.1692 | 3.0 | 156 | 0.8621 | 0.7443 | 0.6770 | 0.6775 | 0.7468 | | 0.7452 | 4.0 | 208 | 0.6391 | 0.7779 | 0.7919 | 0.7691 | 0.8159 | | 0.5185 | 5.0 | 260 | 0.6401 | 0.8197 | 0.8000 | 0.7868 | 0.8109 | | 0.3536 | 6.0 | 312 | 0.4251 | 0.8808 | 0.8684 | 0.8675 | 0.8820 | | 0.2411 | 7.0 | 364 | 0.4748 | 0.8709 | 0.8659 | 0.8613 | 0.8800 | | 0.1762 | 8.0 | 416 | 0.3809 | 0.8991 | 0.8721 | 0.8812 | 0.8971 | | 0.1377 | 9.0 | 468 | 0.3977 | 0.9062 | 0.8950 | 0.8953 | 0.9022 | | 0.1026 | 10.0 | 520 | 0.4637 | 0.9068 | 0.8887 | 0.8897 | 0.8951 | | 0.0763 | 11.0 | 572 | 0.4210 | 0.9079 | 0.9066 | 0.9020 | 0.9012 | | 0.0554 | 12.0 | 624 | 0.4950 | 0.8962 | 0.8837 | 0.8809 | 0.8850 | | 0.0419 | 13.0 | 676 | 0.4643 | 0.9043 | 0.9007 | 0.8969 | 0.8961 | | 0.0358 | 14.0 | 728 | 0.3718 | 0.9171 | 0.9183 | 0.9144 | 0.9173 | | 0.0264 | 15.0 | 780 | 0.4456 | 0.9349 | 0.9042 | 0.9132 | 0.9123 | | 0.0178 | 16.0 | 832 | 0.4296 | 0.9362 | 0.9169 | 0.9227 | 0.9193 | | 0.015 | 17.0 | 884 | 0.3900 | 0.9302 | 0.9235 | 0.9240 | 0.9254 | | 0.0105 | 18.0 | 936 | 0.4335 | 0.9284 | 0.9161 | 0.9181 | 0.9168 | | 0.0116 | 19.0 | 988 | 0.4426 | 0.9285 | 0.9138 | 0.9166 | 0.9138 | | 0.0104 | 20.0 | 1040 | 0.4410 | 0.9276 | 0.9141 | 0.9163 | 0.9138 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
ChaitanyaU/FineTuneLM
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: Shivraj8615/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Champion/test_upload_vox2_wavlm_epoch8
[ "sidekit", "audio" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 --- https://wandb.ai/open-assistant/supervised-finetuning/runs/7pz5n33h exported checkpoint: 3000 steps datasets: ``` oasst_export_eu: datasets: - oasst_export: lang: "en,es,de,fr" input_file_path: 2023-03-27_oasst_research_ready_synth.jsonl.gz - alpaca - oig_file: source_url: https://huggingface.co/datasets/laion/OIG/resolve/main/unified_chip2.jsonl max_count: 15000 min_length: 500 val_split: 0.2 - oig_file: source_url: https://huggingface.co/datasets/laion/OIG/raw/main/unified_grade_school_math_instructions.jsonl val_split: 0.1 min_length: 1000 sort_by_length: false use_custom_sampler: false ``` pythia: ``` pythia-12b: fp16: true log_dir: "pythia_log_12b" learning_rate: 6e-6 model_name: EleutherAI/pythia-12b-deduped output_dir: pythia_model_12b weight_decay: 0.0 residual_dropout: 0.2 max_length: 2048 use_flash_attention: true warmup_steps: 100 gradient_checkpointing: false gradient_accumulation_steps: 4 per_device_train_batch_size: 2 per_device_eval_batch_size: 5 eval_steps: 200 save_steps: 500 num_train_epochs: 16 save_total_limit: 4 ```
Chan/distilgpt2-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: unknown datasets: - irds/codec_history - nbalepur/cs_history_wiki_web_noise language: - de - en - fr - es - it - ru - la - nb metrics: - accuracy library_name: open_clip pipeline_tag: reinforcement-learning tags: - art ---
Chandanbhat/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - summarization language: - en widget: - text: "I love AutoTrain 🤗" datasets: - Hinataaa/autotrain-data-text_summary_arp co2_eq_emissions: emissions: 4.2992847624934365 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 45146113307 - CO2 Emissions (in grams): 4.2993 ## Validation Metrics - Loss: 1.285 - Rouge1: 49.529 - Rouge2: 25.404 - RougeL: 46.465 - RougeLsum: 46.645 - Gen Len: 18.803 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/Hinataaa/autotrain-text_summary_arp-45146113307 ```
Chertilasus/main
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - coreml - stable-diffusion - text-to-image - not-for-all-eyes --- # Core ML Converted Model: - This model was converted to [Core ML for use on Apple Silicon devices](https://github.com/apple/ml-stable-diffusion). Conversion instructions can be found [here](https://github.com/godly-devotion/MochiDiffusion/wiki/How-to-convert-ckpt-or-safetensors-files-to-Core-ML).<br> - Provide the model to an app such as **Mochi Diffusion** [Github](https://github.com/godly-devotion/MochiDiffusion) / [Discord](https://discord.gg/x2kartzxGv) to generate images.<br> - `split_einsum` version is compatible with all compute unit options including Neural Engine. - `original` version is only compatible with `CPU & GPU` option. - Custom resolution versions are tagged accordingly. - The `vae-ft-mse-840000-ema-pruned.ckpt` VAE is embedded into the model. - This model was converted with a `vae-encoder` for use with `image2image`. - This model is `fp16`. - Descriptions are posted as-is from original model source. - Not all features and/or results may be available in `CoreML` format. - This model does not have the [unet split into chunks](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml). - This model does not include a `safety checker` (for NSFW content). # realisticVision-v20: Source(s): [Hugging Face](https://huggingface.co/SG161222/Realistic_Vision_V2.0) - [CivitAI](https://civitai.com/models/4201/realistic-vision-v20) **Please read this!** My model has always been free and always will be free. There are no restrictions on the use of the model. The rights to this model still belong to me. This model is available on Mage.Space, Sinkin.ai, GetImg.ai and RandomSeed.co (NSFW content) You can find out news about this model and future models, as well as support me on Boosty. Recommended for use with [VAE](https://huggingface.co/stabilityai/sd-vae-ft-mse-original) which has already been baked into the converted `CoreML` model version here. I use this template to get good generation results: **Prompt**: RAW photo, subject, (high detailed skin:1.2), 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3 **Example**: RAW photo, a close up portrait photo of 26 y.o woman in wastelander clothes, long haircut, pale skin, slim body, background is city ruins, (high detailed skin:1.2), 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3 **Negative Prompt**: (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck OR (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers:1.4), (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation `Euler A` or `DPM++ 2M Karras` with 25 steps `CFG Scale` 7 `Hires Fix` with `Latent` upscaler 0 `Hires Steps` and `Denoising Strength` 0.25 - 0.45 `Upscaling` by 1.1 - 2.0 <br><br> ![image](https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/393713d6-c943-4c6a-7247-ad5f03583200/width=400/333323) ![image](https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/cf4b9664-975a-4f56-8fba-afe4b5827a00/width=400/334107) ![image](https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/6777a3bb-3215-4250-22d8-556b06676c00/width=400/334752) ![image](https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/39475d32-266e-4775-0eff-76e7e88b3200/width=400/360392)
Chuah/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wnut_17 metrics: - precision - recall - f1 - accuracy model-index: - name: my_awesome_wnut_model results: - task: name: Token Classification type: token-classification dataset: name: wnut_17 type: wnut_17 config: wnut_17 split: test args: wnut_17 metrics: - name: Precision type: precision value: 0.56 - name: Recall type: recall value: 0.28544949026876737 - name: F1 type: f1 value: 0.378146101903008 - name: Accuracy type: accuracy value: 0.9407464409388226 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_wnut_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the wnut_17 dataset. It achieves the following results on the evaluation set: - Loss: 0.2754 - Precision: 0.56 - Recall: 0.2854 - F1: 0.3781 - Accuracy: 0.9407 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 213 | 0.2826 | 0.5246 | 0.2475 | 0.3363 | 0.9384 | | No log | 2.0 | 426 | 0.2754 | 0.56 | 0.2854 | 0.3781 | 0.9407 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2