modelId
stringlengths
4
111
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringlengths
5
30
author
stringlengths
2
34
config
null
securityStatus
null
id
stringlengths
4
111
likes
int64
0
9.53k
downloads
int64
2
73.6M
library_name
stringlengths
2
84
created
timestamp[us]
card
stringlengths
101
901k
card_len
int64
101
901k
embeddings
list
gouse-73/ppo-LunarLander-v2
2023-07-17T12:04:08.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
gouse-73
null
null
gouse-73/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T12:03:50
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 268.49 +/- 14.09 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
Oslaw/ppo-LunarLander-v2
2023-07-17T12:21:48.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
Oslaw
null
null
Oslaw/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T12:21:26
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 260.52 +/- 15.66 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
karacam/ppo-LunarLander-v2
2023-07-17T13:56:13.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
karacam
null
null
karacam/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T13:55:53
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 247.06 +/- 17.11 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00021982192993164062, -0.0271453857421875, 0.0170745849609375, 0.0233306884765625, -0.006072998046875, 0.002765655517578125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.034332275390625...
jeremyleejh/ppo-LunarLander-v2
2023-07-21T08:21:43.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
jeremyleejh
null
null
jeremyleejh/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T14:08:31
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 112.42 +/- 87.78 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
hafidikhsan/wav2vec2-large-xlsr-53-english-pronunciation-evaluation-bs-v4
2023-07-17T14:56:20.000Z
[ "transformers", "pytorch", "wav2vec2", "audio-classification", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
audio-classification
hafidikhsan
null
null
hafidikhsan/wav2vec2-large-xlsr-53-english-pronunciation-evaluation-bs-v4
0
2
transformers
2023-07-17T14:53:50
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: wav2vec2-large-xlsr-53-english-pronunciation-evaluation-bs-v4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53-english-pronunciation-evaluation-bs-v4 This model is a fine-tuned version of [jonatasgrosman/wav2vec2-large-xlsr-53-english](https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.3181 - Accuracy: 0.79 - F1: 0.7920 - Precision: 0.7954 - Recall: 0.79 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:---------:|:------:| | 1.14 | 1.0 | 2000 | 0.9878 | 0.604 | 0.5956 | 0.6041 | 0.604 | | 1.3551 | 2.0 | 4000 | 1.0238 | 0.636 | 0.6261 | 0.6489 | 0.636 | | 0.7984 | 3.0 | 6000 | 1.0629 | 0.748 | 0.7475 | 0.7494 | 0.748 | | 0.6879 | 4.0 | 8000 | 1.2007 | 0.772 | 0.7733 | 0.7750 | 0.772 | | 0.0593 | 5.0 | 10000 | 1.2298 | 0.796 | 0.7979 | 0.8011 | 0.796 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
2,039
[ [ -0.027984619140625, -0.0256805419921875, 0.004581451416015625, 0.00726318359375, -0.0174407958984375, -0.02203369140625, -0.02239990234375, -0.0273895263671875, 0.00690460205078125, 0.0207672119140625, -0.050628662109375, -0.04339599609375, -0.048980712890625, ...
peterdamn/speecht5_finetuned_voxpopuli_nl
2023-07-17T15:47:39.000Z
[ "transformers", "pytorch", "tensorboard", "speecht5", "text-to-audio", "text-to-speech", "generated_from_trainer", "dataset:facebook/voxpopuli", "license:mit", "endpoints_compatible", "region:us" ]
text-to-speech
peterdamn
null
null
peterdamn/speecht5_finetuned_voxpopuli_nl
0
2
transformers
2023-07-17T15:04:46
--- license: mit tags: - text-to-speech - generated_from_trainer datasets: - facebook/voxpopuli model-index: - name: speecht5_finetuned_voxpopuli_nl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # speecht5_finetuned_voxpopuli_nl This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on the facebook/voxpopuli dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5 - training_steps: 10 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1 - Datasets 2.12.0 - Tokenizers 0.13.2
1,251
[ [ -0.02935791015625, -0.0584716796875, -0.0003197193145751953, 0.01232147216796875, -0.0247039794921875, -0.01837158203125, -0.0215606689453125, -0.0233306884765625, 0.0078277587890625, 0.0183868408203125, -0.048858642578125, -0.049346923828125, -0.039825439453125...
nthngdy/headless-pythia-owt2-70m-raw
2023-09-20T13:54:34.000Z
[ "transformers", "pytorch", "gpt_neox", "text-generation", "en", "dataset:the_pile_openwebtext2", "arxiv:2309.08351", "license:mit", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
nthngdy
null
null
nthngdy/headless-pythia-owt2-70m-raw
0
2
transformers
2023-07-17T15:19:36
--- license: mit datasets: - the_pile_openwebtext2 language: - en pipeline_tag: text-generation --- ### Model Sources <!-- Provide the basic links for the model. --> - **Repository:** TBD - **Paper:** https://arxiv.org/abs/2309.08351 ### Model Architecture and Objective This model is a Pythia-70m architecture trained on OpenWebText-2 using the Contrastive Weight Tying objective. #### Software [More Information Needed] ## Citation **BibTeX:** ```bibtex @misc{godey2023headless, title={Headless Language Models: Learning without Predicting with Contrastive Weight Tying}, author={Nathan Godey and Éric de la Clergerie and Benoît Sagot}, year={2023}, eprint={2309.08351}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## Model Card Authors Nathan Godey Eric de la Clergerie Benoît Sagot ## Model Card Contact nathan.godey@inria.fr
889
[ [ -0.0029964447021484375, -0.0357666015625, 0.022674560546875, -0.0047607421875, -0.02020263671875, -0.0269775390625, -0.013671875, -0.0075531005859375, 0.0037841796875, 0.035430908203125, -0.037139892578125, -0.041046142578125, -0.038970947265625, -0.03123474...
mj-718/ppo-LunarLander-v2
2023-07-17T17:36:48.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
mj-718
null
null
mj-718/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T17:36:26
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 251.24 +/- 16.62 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00021982192993164062, -0.0271453857421875, 0.0170745849609375, 0.0233306884765625, -0.006072998046875, 0.002765655517578125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.034332275390625...
pcapp/ppo-Pyramids-Training
2023-07-17T17:56:05.000Z
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
pcapp
null
null
pcapp/ppo-Pyramids-Training
0
2
ml-agents
2023-07-17T17:56:01
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: pcapp/ppo-Pyramids-Training 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
1,339
[ [ -0.04034423828125, -0.034027099609375, 0.0008182525634765625, 0.01525115966796875, -0.01074981689453125, 0.01294708251953125, 0.0166778564453125, -0.01552581787109375, 0.0333251953125, 0.0283660888671875, -0.04010009765625, -0.048675537109375, -0.028961181640625...
dariowsz/ppo-LunarLander-v2
2023-07-17T18:03:08.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
dariowsz
null
null
dariowsz/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T18:02:47
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 243.32 +/- 37.66 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00021219253540039062, -0.027099609375, 0.0170745849609375, 0.023345947265625, -0.0060577392578125, 0.0027484893798828125, 0.034423828125, -0.01212310791015625, 0.0198822021484375, 0.06500244140625, -0.04315185546875, -0.035247802734375, -0.0343017578125, ...
xoyeop/distilbert-base-uncased-DIALOCONAN-CLS
2023-07-17T21:25:54.000Z
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
xoyeop
null
null
xoyeop/distilbert-base-uncased-DIALOCONAN-CLS
0
2
transformers
2023-07-17T18:19:56
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-DIALOCONAN-CLS results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-DIALOCONAN-CLS This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3391 - Precision: 0.7050 - Recall: 0.7076 - F1: 0.7062 - Accuracy: 0.9404 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.3266 | 1.0 | 2500 | 0.3531 | 0.6899 | 0.6891 | 0.6884 | 0.9162 | | 0.1862 | 2.0 | 5000 | 0.3141 | 0.7056 | 0.7078 | 0.7065 | 0.9407 | | 0.0775 | 3.0 | 7500 | 0.3391 | 0.7050 | 0.7076 | 0.7062 | 0.9404 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
1,730
[ [ -0.0307159423828125, -0.038909912109375, 0.01068878173828125, 0.01325225830078125, -0.0275115966796875, -0.0170440673828125, -0.007472991943359375, -0.003459930419921875, 0.009490966796875, 0.0246124267578125, -0.0443115234375, -0.04302978515625, -0.057312011718...
chandan9t8/ppo-Pyramid
2023-07-17T18:31:27.000Z
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
chandan9t8
null
null
chandan9t8/ppo-Pyramid
0
2
ml-agents
2023-07-17T18:31:24
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: chandan9t8/ppo-Pyramid 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
1,334
[ [ -0.040069580078125, -0.03375244140625, 0.0022449493408203125, 0.01421356201171875, -0.01114654541015625, 0.01216888427734375, 0.0169219970703125, -0.0148162841796875, 0.032928466796875, 0.0307769775390625, -0.04058837890625, -0.049591064453125, -0.030029296875, ...
monideep2255/spell_correction_M05_LM
2023-07-17T19:15:32.000Z
[ "transformers", "pytorch", "bart", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
monideep2255
null
null
monideep2255/spell_correction_M05_LM
0
2
transformers
2023-07-17T18:31:35
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: spell_correction_M05_LM results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # spell_correction_M05_LM This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0281 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 274 | 0.2890 | | 1.8446 | 2.0 | 548 | 0.0540 | | 1.8446 | 3.0 | 822 | 0.0403 | | 0.028 | 4.0 | 1096 | 0.0344 | | 0.028 | 5.0 | 1370 | 0.0289 | | 0.0137 | 6.0 | 1644 | 0.0289 | | 0.0137 | 7.0 | 1918 | 0.0283 | | 0.0063 | 8.0 | 2192 | 0.0266 | | 0.0063 | 9.0 | 2466 | 0.0271 | | 0.0043 | 10.0 | 2740 | 0.0272 | | 0.0033 | 11.0 | 3014 | 0.0281 | | 0.0033 | 12.0 | 3288 | 0.0264 | | 0.003 | 13.0 | 3562 | 0.0277 | | 0.003 | 14.0 | 3836 | 0.0274 | | 0.003 | 15.0 | 4110 | 0.0265 | | 0.003 | 16.0 | 4384 | 0.0290 | | 0.0024 | 17.0 | 4658 | 0.0276 | | 0.0024 | 18.0 | 4932 | 0.0270 | | 0.0025 | 19.0 | 5206 | 0.0276 | | 0.0025 | 20.0 | 5480 | 0.0272 | | 0.0016 | 21.0 | 5754 | 0.0271 | | 0.0018 | 22.0 | 6028 | 0.0272 | | 0.0018 | 23.0 | 6302 | 0.0282 | | 0.0014 | 24.0 | 6576 | 0.0276 | | 0.0014 | 25.0 | 6850 | 0.0283 | | 0.0014 | 26.0 | 7124 | 0.0280 | | 0.0014 | 27.0 | 7398 | 0.0279 | | 0.0013 | 28.0 | 7672 | 0.0280 | | 0.0013 | 29.0 | 7946 | 0.0282 | | 0.0014 | 30.0 | 8220 | 0.0281 | ### Framework versions - Transformers 4.28.0 - Pytorch 1.12.1+cu102 - Datasets 2.13.1 - Tokenizers 0.13.3
2,749
[ [ -0.036773681640625, -0.0494384765625, 0.011688232421875, 0.007568359375, -0.0081939697265625, -0.0108489990234375, -0.0010547637939453125, -0.00778961181640625, 0.039306640625, 0.0304107666015625, -0.055419921875, -0.05621337890625, -0.044891357421875, -0.01...
bwilkie/ppo-LunarLander-v2
2023-07-17T19:36:52.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
bwilkie
null
null
bwilkie/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T19:36:32
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 264.67 +/- 19.74 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00020968914031982422, -0.0271453857421875, 0.0170745849609375, 0.0233612060546875, -0.006031036376953125, 0.002758026123046875, 0.034454345703125, -0.0121307373046875, 0.0199127197265625, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.0343017578...
tgsc/debertina-base
2023-10-18T21:43:08.000Z
[ "transformers", "pytorch", "deberta-v2", "deberta", "deberta-v3", "pt", "pt-br", "dataset:allenai/c4", "arxiv:2111.09543", "arxiv:2003.10555", "arxiv:2006.03654", "license:mit", "endpoints_compatible", "region:us" ]
null
tgsc
null
null
tgsc/debertina-base
0
2
transformers
2023-07-17T20:16:58
--- language: pt tags: - deberta - deberta-v3 - pt - pt-br datasets: - allenai/c4 library_name: transformers license: mit --- # DeBERTina <p align="center"> <img src="https://huggingface.co/tgsc/debertina-base/resolve/main/DeBERTina.png" alt="DeBERTina"/> </p> DeBERTina é um modelo [DeBERTa-v3](https://arxiv.org/abs/2111.09543) em português treinado no estilo [ELECTRA](https://arxiv.org/abs/2003.10555), com RTD (Replaced Token Detection) e *gradient-disentangled embedding sharing* (GDES). *DeBERTina is a portuguese [DeBERTa-v3](https://arxiv.org/abs/2111.09543) model trained electra-style [ELECTRA](https://arxiv.org/abs/2003.10555) (with Replaced Token Detection - RTD) and gradient-disentangled embedding sharing (GDES).* | Model | type | Vocabulary | Backbone + Embeddings = Total Parameters | | :-: | :-: | :-: | :-: | | [ult5-pt-small](https://huggingface.co/tgsc/ult5-pt-small) | encoder-decoder | 65k | 56.6M + 25.8M = 82.4M | | [sentence-transformer-ult5-pt-small](https://huggingface.co/tgsc/sentence-transformer-ult5-pt-small) | sentence-transformer | 65k | 25.2 + 25.8M = 51M | | [DeBERTina-base](https://huggingface.co/tgsc/debertina-base) | encoder | 32k | 85.5M + 24.6M = 110.0M | | [DeBERTina-base-128k-vocab](https://huggingface.co/tgsc/debertina-base-128k-vocab) | encoder | 128k | 85.5M + 98.3M = 183.8M | | [DeBERTina-large](https://huggingface.co/tgsc/debertina-large) | encoder | 128k | 348.4M + 98.3M = 433.9.0M | | [DeBERTina-xsmall](https://huggingface.co/tgsc/debertina-xsmall) | encoder | 128k | 21.5M + 49.2M = 70.6M | - **Developed by:** Thacio Garcia Scandaroli - **Model type:** DeBERTa-v3 - **Language(s) (NLP):** Português - **License:** MIT Benchmarks e tutorial de fine-tune: [https://github.com/thacio/LLM-Notebooks](https://github.com/thacio/LLM-Notebooks) *Benchmarks e fine-tune notebook*: [https://github.com/thacio/LLM-Notebooks](https://github.com/thacio/LLM-Notebooks) Special tokens: '[PAD]', '[CLS]', '[SEP]', '[UNK]' ## Treino O modelo foi treinado com o corpus C4 em português, utilizando um tokenizer sentencepiece com vocabulário de tamanho 128k. O treino consiste em um gerador e um discriminador. O gerador é treinado com *masked language modeling* em 15% dos tokens. Em seguida, tokens são substituídos pelas predições do gerador, e o discriminador é treinado de forma a identificar quais tokens são originais e quais foram substítudos. *The model was trained with the C4 corpus in portuguese with a sentencepiece tokenizer with a vocabulary of 128.* *The training is done with a generator and a discriminator. The generator is trained with maskeed language modeling as BERT, but without next sentence prediction, by masking 15% of the tokens.* *The masked tokens are then replaced by the generators prediction, and the discriminator is trained with the objective of identifying the which are the original and replaced tokens.* ## Fine-tunning O fine-tunning é feito com o discriminador. Para carregar o modelo para classificações: *Fine-tunning should be done with the discrimnator.* *Loading the model for classification:* ```python from transformers import AutoModelForSequenceClassification num_labels = 2 # number of labels in classes model = AutoModelForSequenceClassification.from_pretrained("tgsc/debertina-base",num_labels=num_labels) ``` ## Citation ``` latex @inproceedings{ 2023debertina, title={DeBERTina: A portuguese DeBERTa-v3 model.}, author = {Thacio Garcia Scandaroli}, year={2023}, url={https://huggingface.co/tgsc/debertina-base} } ``` --- ## DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing [DeBERTa](https://arxiv.org/abs/2006.03654) improves the BERT and RoBERTa models using disentangled attention and enhanced mask decoder. With those two improvements, DeBERTa out perform RoBERTa on a majority of NLU tasks with 80GB training data. In [DeBERTa V3](https://arxiv.org/abs/2111.09543), we further improved the efficiency of DeBERTa using ELECTRA-Style pre-training with Gradient Disentangled Embedding Sharing. Compared to DeBERTa, our V3 version significantly improves the model performance on downstream tasks. You can find more technique details about the new model from our [paper](https://arxiv.org/abs/2111.09543). Please check the [official repository](https://github.com/microsoft/DeBERTa) for more implementation details and updates. The DeBERTa V3 base model comes with 12 layers and a hidden size of 768. It has only 86M backbone parameters with a vocabulary containing 128K tokens which introduces 98M parameters in the Embedding layer. This model was trained using the 160GB data as DeBERTa V2. #### Fine-tuning on NLU tasks We present the dev results on SQuAD 2.0 and MNLI tasks. | Model |Vocabulary(K)|Backbone #Params(M)| SQuAD 2.0(F1/EM) | MNLI-m/mm(ACC)| |-------------------|----------|-------------------|-----------|----------| | RoBERTa-base |50 |86 | 83.7/80.5 | 87.6/- | | XLNet-base |32 |92 | -/80.2 | 86.8/- | | ELECTRA-base |30 |86 | -/80.5 | 88.8/ | | DeBERTa-base |50 |100 | 86.2/83.1| 88.8/88.5| | DeBERTa-v3-base |128|86 | **88.4/85.4** | **90.6/90.7**| | DeBERTa-v3-base + SiFT |128|86 | -/- | 91.0/-| We present the dev results on SQuAD 1.1/2.0 and MNLI tasks. #### Fine-tuning with HF transformers ```bash #!/bin/bash cd transformers/examples/pytorch/text-classification/ pip install datasets export TASK_NAME=mnli output_dir="ds_results" num_gpus=8 batch_size=8 python -m torch.distributed.launch --nproc_per_node=${num_gpus} \ run_glue.py \ --model_name_or_path microsoft/deberta-v3-base \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --evaluation_strategy steps \ --max_seq_length 256 \ --warmup_steps 500 \ --per_device_train_batch_size ${batch_size} \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir $output_dir \ --overwrite_output_dir \ --logging_steps 1000 \ --logging_dir $output_dir ``` ### Citation If you find DeBERTa useful for your work, please cite the following papers: ``` latex @misc{he2021debertav3, title={DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing}, author={Pengcheng He and Jianfeng Gao and Weizhu Chen}, year={2021}, eprint={2111.09543}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ``` latex @inproceedings{ he2021deberta, title={DEBERTA: DECODING-ENHANCED BERT WITH DISENTANGLED ATTENTION}, author={Pengcheng He and Xiaodong Liu and Jianfeng Gao and Weizhu Chen}, booktitle={International Conference on Learning Representations}, year={2021}, url={https://openreview.net/forum?id=XPZIaotutsD} } ```
6,964
[ [ -0.023651123046875, -0.05120849609375, 0.01259613037109375, 0.02642822265625, -0.03070068359375, 0.01235198974609375, -0.00740814208984375, -0.035858154296875, 0.034332275390625, 0.01288604736328125, -0.0252227783203125, -0.0496826171875, -0.06622314453125, ...
tijstijs/ppo-LunarLander-v2
2023-07-17T20:23:56.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
tijstijs
null
null
tijstijs/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T20:23:34
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 256.64 +/- 23.67 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00020968914031982422, -0.0271453857421875, 0.0170745849609375, 0.0233612060546875, -0.006031036376953125, 0.002758026123046875, 0.034454345703125, -0.0121307373046875, 0.0199127197265625, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.0343017578...
ozzzzz/ppo-LunarLander-v2
2023-07-17T21:32:37.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
ozzzzz
null
null
ozzzzz/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-17T21:32:14
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 258.43 +/- 25.36 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
jwb220/PPO-LunarLander-v2
2023-07-17T22:31:20.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
jwb220
null
null
jwb220/PPO-LunarLander-v2
0
2
stable-baselines3
2023-07-17T22:31:01
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 253.16 +/- 29.76 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00020968914031982422, -0.0271453857421875, 0.0170745849609375, 0.0233612060546875, -0.006031036376953125, 0.002758026123046875, 0.034454345703125, -0.0121307373046875, 0.0199127197265625, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.0343017578...
kingducks/ppo-LunarLander-v2
2023-07-18T00:05:27.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
kingducks
null
null
kingducks/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T00:04:53
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: MLP PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 273.94 +/- 16.90 name: mean_reward verified: false --- # **MLP PPO** Agent playing **LunarLander-v2** This is a trained model of a **MLP PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
796
[ [ -0.0030994415283203125, -0.02569580078125, 0.0142974853515625, 0.0287933349609375, 0.0040435791015625, -0.0015954971313476562, 0.03143310546875, -0.013885498046875, 0.0198516845703125, 0.067626953125, -0.045989990234375, -0.033966064453125, -0.037628173828125, ...
vphu123/whisper_totaldataa2
2023-09-24T05:59:44.000Z
[ "transformers", "pytorch", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
vphu123
null
null
vphu123/whisper_totaldataa2
0
2
transformers
2023-07-18T03:18:29
--- tags: - whisper-event - generated_from_trainer model-index: - name: whisper-base-lastversion results: [] metrics: - wer --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-base-lastversion This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1732 ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.000025 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - distributed_type: gpu - gradient_accumulation_steps: 2 - total_train_batch_size: 256 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5000 - training_steps: 80000 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.3116 | 1 | 5000 | 0.6231 | | 0.2104 | 3 | 10000 | 0.4287 | | 0.1729 | 4 | 15000 | 0.3421 | | 0.1472 | 6 | 20000 | 0.3211 | | 0.128 | 7 | 25000 | 0.2811 | | 0.1065 | 9 | 30000 | 0.2649 | | 0.0995 | 10 | 35000 | 0.2523 | | 0.0812 | 12 | 40000 | 0.2401 | | 0.066 | 14 | 45000 | 0.2311 | | 0.0574 | 15 | 50000 | 0.2132 | | 0.0463 | 17 | 55000 | 0.2077 | | 0.04 | 18 | 60000 | 0.1957 | | 0.0314 | 19 | 65000 | 0.1813 | | 0.0305 | 20 | 70000 | 0.1802 | | 0.0298 | 21 | 75000 | 0.1755 | | 0.0265 | 22 | 80000 | 0.1732 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.1.0a0+gitcc01568 - Datasets 2.13.1 - Tokenizers 0.13.3
2,002
[ [ -0.0291595458984375, -0.034759521484375, 0.0027008056640625, 0.00873565673828125, -0.017791748046875, -0.0304107666015625, -0.006317138671875, -0.018951416015625, 0.019561767578125, 0.0288238525390625, -0.05657958984375, -0.052032470703125, -0.04632568359375, ...
SeDm/ppo-LunarLander-v2
2023-07-18T07:07:54.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
SeDm
null
null
SeDm/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T07:07:32
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 258.41 +/- 20.79 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
SeDm/ppo-LunarLander-v2-gamma03
2023-07-18T08:02:29.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
SeDm
null
null
SeDm/ppo-LunarLander-v2-gamma03
0
2
stable-baselines3
2023-07-18T08:02:08
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -88.68 +/- 154.18 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
785
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
1daniar/ppo-LunarLander-v2
2023-07-18T12:18:30.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
1daniar
null
null
1daniar/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T08:28:28
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 263.04 +/- 18.55 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
sarahflan/distilbert-base-uncased-finetuned-as_sentences_fewshot
2023-07-18T08:42:08.000Z
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
sarahflan
null
null
sarahflan/distilbert-base-uncased-finetuned-as_sentences_fewshot
0
2
transformers
2023-07-18T08:34:42
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-as_sentences_fewshot results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-as_sentences_fewshot This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0227 - Accuracy: 0.9933 - F1: 0.9933 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.6953 | 1.0 | 11 | 0.6832 | 0.6267 | 0.5993 | | 0.6562 | 2.0 | 22 | 0.5071 | 0.9267 | 0.9268 | | 0.4346 | 3.0 | 33 | 0.1365 | 0.9933 | 0.9933 | | 0.1714 | 4.0 | 44 | 0.0566 | 0.9933 | 0.9933 | | 0.1125 | 5.0 | 55 | 0.0234 | 1.0 | 1.0 | | 0.0897 | 6.0 | 66 | 0.0264 | 0.9933 | 0.9933 | | 0.0487 | 7.0 | 77 | 0.0465 | 0.9867 | 0.9867 | | 0.0401 | 8.0 | 88 | 0.0082 | 1.0 | 1.0 | | 0.0364 | 9.0 | 99 | 0.0273 | 0.9933 | 0.9933 | | 0.0237 | 10.0 | 110 | 0.0163 | 0.9933 | 0.9933 | | 0.0209 | 11.0 | 121 | 0.0044 | 1.0 | 1.0 | | 0.0196 | 12.0 | 132 | 0.0056 | 1.0 | 1.0 | | 0.0198 | 13.0 | 143 | 0.0059 | 1.0 | 1.0 | | 0.0047 | 14.0 | 154 | 0.0063 | 1.0 | 1.0 | | 0.0157 | 15.0 | 165 | 0.0115 | 0.9933 | 0.9933 | | 0.0142 | 16.0 | 176 | 0.0116 | 0.9933 | 0.9933 | | 0.0035 | 17.0 | 187 | 0.0111 | 0.9933 | 0.9933 | | 0.0028 | 18.0 | 198 | 0.0114 | 0.9933 | 0.9933 | | 0.0023 | 19.0 | 209 | 0.0103 | 0.9933 | 0.9933 | | 0.0019 | 20.0 | 220 | 0.0102 | 0.9933 | 0.9933 | | 0.0016 | 21.0 | 231 | 0.0117 | 0.9933 | 0.9933 | | 0.0016 | 22.0 | 242 | 0.0103 | 0.9933 | 0.9933 | | 0.0014 | 23.0 | 253 | 0.0072 | 0.9933 | 0.9933 | | 0.0014 | 24.0 | 264 | 0.0059 | 0.9933 | 0.9933 | | 0.0013 | 25.0 | 275 | 0.0071 | 0.9933 | 0.9933 | | 0.0012 | 26.0 | 286 | 0.0079 | 0.9933 | 0.9933 | | 0.0012 | 27.0 | 297 | 0.0076 | 0.9933 | 0.9933 | | 0.0011 | 28.0 | 308 | 0.0076 | 0.9933 | 0.9933 | | 0.001 | 29.0 | 319 | 0.0085 | 0.9933 | 0.9933 | | 0.0009 | 30.0 | 330 | 0.0088 | 0.9933 | 0.9933 | | 0.001 | 31.0 | 341 | 0.0089 | 0.9933 | 0.9933 | | 0.0009 | 32.0 | 352 | 0.0092 | 0.9933 | 0.9933 | | 0.0009 | 33.0 | 363 | 0.0091 | 0.9933 | 0.9933 | | 0.0008 | 34.0 | 374 | 0.0100 | 0.9933 | 0.9933 | | 0.0021 | 35.0 | 385 | 0.0312 | 0.9933 | 0.9933 | | 0.0008 | 36.0 | 396 | 0.0340 | 0.9933 | 0.9933 | | 0.0009 | 37.0 | 407 | 0.0313 | 0.9933 | 0.9933 | | 0.0008 | 38.0 | 418 | 0.0278 | 0.9933 | 0.9933 | | 0.0008 | 39.0 | 429 | 0.0246 | 0.9933 | 0.9933 | | 0.0008 | 40.0 | 440 | 0.0226 | 0.9933 | 0.9933 | | 0.0007 | 41.0 | 451 | 0.0212 | 0.9933 | 0.9933 | | 0.0007 | 42.0 | 462 | 0.0200 | 0.9933 | 0.9933 | | 0.0007 | 43.0 | 473 | 0.0241 | 0.9933 | 0.9933 | | 0.0007 | 44.0 | 484 | 0.0249 | 0.9933 | 0.9933 | | 0.0007 | 45.0 | 495 | 0.0244 | 0.9933 | 0.9933 | | 0.0007 | 46.0 | 506 | 0.0238 | 0.9933 | 0.9933 | | 0.0007 | 47.0 | 517 | 0.0234 | 0.9933 | 0.9933 | | 0.0006 | 48.0 | 528 | 0.0230 | 0.9933 | 0.9933 | | 0.0007 | 49.0 | 539 | 0.0227 | 0.9933 | 0.9933 | | 0.0007 | 50.0 | 550 | 0.0227 | 0.9933 | 0.9933 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
4,939
[ [ -0.03619384765625, -0.0421142578125, 0.012786865234375, 0.005115509033203125, -0.00022554397583007812, 0.008148193359375, 0.0033016204833984375, 0.002262115478515625, 0.05352783203125, 0.0247802734375, -0.045501708984375, -0.04541015625, -0.048553466796875, ...
AndrewMay/ppo-LunarLander-v2
2023-07-18T08:44:18.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
AndrewMay
null
null
AndrewMay/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T08:43:59
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 266.35 +/- 22.45 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00020754337310791016, -0.027130126953125, 0.0170745849609375, 0.023345947265625, -0.006053924560546875, 0.0027618408203125, 0.034423828125, -0.01213836669921875, 0.0199127197265625, 0.06500244140625, -0.043182373046875, -0.03521728515625, -0.0343017578125, ...
sufyn/ppo-LunarLander-v2
2023-07-18T09:16:30.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
sufyn
null
null
sufyn/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T09:16:10
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 262.34 +/- 24.09 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00020754337310791016, -0.027130126953125, 0.0170745849609375, 0.023345947265625, -0.006053924560546875, 0.0027618408203125, 0.034423828125, -0.01213836669921875, 0.0199127197265625, 0.06500244140625, -0.043182373046875, -0.03521728515625, -0.0343017578125, ...
sherif1311/flan-t5-base-reviewb-text-classification
2023-07-18T11:11:59.000Z
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text2text-generation
sherif1311
null
null
sherif1311/flan-t5-base-reviewb-text-classification
0
2
transformers
2023-07-18T09:50:13
--- license: apache-2.0 tags: - generated_from_trainer metrics: - f1 model-index: - name: flan-t5-base-reviewb-text-classification results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-base-reviewb-text-classification This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.2081 - F1: 76.3399 - Gen Len: 2.3170 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 1.12.1+cu116 - Datasets 2.13.1 - Tokenizers 0.12.1
1,198
[ [ -0.026092529296875, -0.031707763671875, 0.0093841552734375, 0.0015306472778320312, -0.0223388671875, -0.030853271484375, -0.01450347900390625, -0.02984619140625, 0.009796142578125, 0.023529052734375, -0.03802490234375, -0.046905517578125, -0.05364990234375, ...
mrmrob003/ppo-LunarLander-v2
2023-07-18T11:00:07.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
mrmrob003
null
null
mrmrob003/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T10:16:10
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 260.13 +/- 17.18 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
giuseppemassafra/ppo-LunarLander-v2
2023-07-18T10:16:37.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
giuseppemassafra
null
null
giuseppemassafra/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T10:16:17
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 268.78 +/- 10.40 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
MikeFisher/ppo-LunarLander-v2
2023-07-18T10:48:03.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
MikeFisher
null
null
MikeFisher/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T10:47:41
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 265.12 +/- 16.95 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
saharad/ppo-LunarLander-v2
2023-07-18T10:51:14.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
saharad
null
null
saharad/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T10:50:55
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 270.68 +/- 16.37 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
Mgollen/PPO-Lunarlander-v2
2023-07-18T12:28:38.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
Mgollen
null
null
Mgollen/PPO-Lunarlander-v2
0
2
stable-baselines3
2023-07-18T12:28:19
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 259.89 +/- 19.91 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
crcdng/Pyramids
2023-07-18T12:43:49.000Z
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
crcdng
null
null
crcdng/Pyramids
0
2
ml-agents
2023-07-18T12:43:46
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: crcdng/Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
1,327
[ [ -0.041229248046875, -0.0352783203125, 0.0012636184692382812, 0.01467132568359375, -0.0104217529296875, 0.01238250732421875, 0.0158843994140625, -0.01555633544921875, 0.032989501953125, 0.0301666259765625, -0.04095458984375, -0.05047607421875, -0.02978515625, ...
lokpalai/lokpalgpt-falcon-7b-lora-4.5
2023-07-18T13:32:44.000Z
[ "transformers", "pytorch", "RefinedWebModel", "text-generation", "custom_code", "en", "license:cc-by-4.0", "text-generation-inference", "region:us" ]
text-generation
lokpalai
null
null
lokpalai/lokpalgpt-falcon-7b-lora-4.5
0
2
transformers
2023-07-18T12:46:47
--- language: - en inference: true widget: - text: "What are the duties of the President of India as per the Constitution?" example_title: "Duties of President" - text: "Can you analyze the legal implications of the Ayodhya Verdict by the Supreme Court of India?" example_title: "Implications of Ayodhya Verdict" - text: "Can you summarize the main provisions of the Hindu Succession Act, 1956?" example_title: "Diving Top 10" - text: "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\nDevelop a legal strategy for a client based on the facts of the provided case.\n\n### Input:\nThe client in question is a government company that terminated the services of a permanent employee without providing any justification. The termination was carried out by invoking a rule similar to Rule 9(i) in the Central Inland Water Transport Corporation Ltd. vs Brojo Nath Ganguly & Anr. case. The employee who was terminated has taken legal action by challenging both the termination order and the validity of the rule in the High Court under Article 226.\n\n### Response:\n" example_title: "Create Legal Strategy 1" - text: "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction: \nDevelop a legal strategy for a hypothetical client based on the facts of the provided case.\n\n### Input:\nThe individual seeking assistance is a research scientist employed at a government-funded research institute, comparable to CSIR. They have been unjustly dismissed from their position and seek to contest the termination through legal means. The individual contends that the institute, being government-funded, qualifies as a 'State' as per Article 12 of the Constitution. Consequently, they believe they should have the right to file a writ petition against the institute.\n\n### Response:\n" example_title: "Create Legal Strategy 2" - text: "What is DV act ?" example_title: "Understand Act" license: cc-by-4.0 --- # LokPalAI: Bridging the Gap to Legal Empowerment LokPalAI is an advanced language model finetuned for Indian scenarios, specifically designed to bridge the gap between individuals and legal empowerment. With LokPalAI, users can interact with a powerful query box to seek information and guidance related to Indian law. ## Features: 1. Interact with LokPalAI’s Query Box: LokPalAI provides a user-friendly query box interface where users can input their legal queries and receive accurate and relevant responses. Whether you need information about a specific law, legal procedure, or any other legal matter, LokPalAI is here to assist you. 2. Enhanced with Rail Guards: To ensure the accuracy and reliability of the information provided, LokPalAI incorporates rail guards. These safeguards help prevent the generation of misleading or incorrect legal advice. We understand the importance of reliable legal information, and our rail guards are designed to maintain the highest standards of accuracy. 3. Real-Time Responses using RAG: LokPalAI leverages the Retrieve and Generate (RAG) framework to provide real-time responses to your legal queries. RAG combines the power of retrieval-based models with generation-based models, ensuring that the information provided is both contextually relevant and up to date. 4. Thorough Testing and Maintenance: We understand the criticality of maintaining a reliable and accurate legal information system. LokPalAI undergoes extensive testing to ensure its performance and reliability. We continuously monitor and update the model to account for changes in Indian law, ensuring that the information provided is always accurate and up to date. # ✨ LokpalGPT-Instruct-Falcon-7b ## Dataset The dataset is being curated and created using judgements available in IndianKanoon.com. You can refer the whole process here. Soon, we will be releasing our dataset and the training process. ## How to Use for Inference ? 💥 **Falcon LLMs require PyTorch 2.0 for use with `transformers`!** For fast inference with Falcon, check-out [Text Generation Inference](https://github.com/huggingface/text-generation-inference)! Read more in this [blogpost]((https://huggingface.co/blog/falcon). You will need **at least 16GB of memory** to swiftly run inference with LokpalGPT-Instruct-Falcon-7b. ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "lokpalai/lokpalgpt-falcon-7b-lora-4.5" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Can you analyze the legal implications of the Ayodhya Verdict by the Supreme Court of India?", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, temperature=0.5, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ```
5,275
[ [ -0.0011644363403320312, -0.049468994140625, 0.0182952880859375, 0.02587890625, -0.0296478271484375, 0.0010023117065429688, 0.00725555419921875, -0.0267486572265625, 0.017242431640625, 0.0244598388671875, -0.026885986328125, -0.0166015625, -0.047393798828125, ...
msladic/a2c-PandaReachDense-v2
2023-07-18T13:13:38.000Z
[ "stable-baselines3", "PandaReachDense-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
msladic
null
null
msladic/a2c-PandaReachDense-v2
0
2
stable-baselines3
2023-07-18T13:04:25
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.77 +/- 0.61 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
802
[ [ -0.019744873046875, -0.04742431640625, -0.004787445068359375, 0.0469970703125, -0.00018846988677978516, -0.006023406982421875, 0.033172607421875, -0.0249481201171875, 0.028045654296875, 0.042694091796875, -0.06256103515625, -0.0289764404296875, -0.03277587890625...
kyars/ppo-LunarLander-v2
2023-07-18T13:29:02.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
kyars
null
null
kyars/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T13:28:42
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 246.31 +/- 78.16 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
tztztztztz/mlpppo-lunarLander-V2
2023-07-18T14:08:17.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
tztztztztz
null
null
tztztztztz/mlpppo-lunarLander-V2
0
2
stable-baselines3
2023-07-18T14:07:59
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo_mlp results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 268.69 +/- 25.49 name: mean_reward verified: false --- # **ppo_mlp** Agent playing **LunarLander-v2** This is a trained model of a **ppo_mlp** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
796
[ [ -0.004520416259765625, -0.0259552001953125, 0.0148773193359375, 0.0271148681640625, 0.002899169921875, -0.00041174888610839844, 0.0301361083984375, -0.013763427734375, 0.0211029052734375, 0.064208984375, -0.045928955078125, -0.034576416015625, -0.038421630859375...
Oslaw/a2c-AntBulletEnv-v0
2023-07-18T14:21:44.000Z
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
Oslaw
null
null
Oslaw/a2c-AntBulletEnv-v0
0
2
stable-baselines3
2023-07-18T14:19:00
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1351.27 +/- 262.90 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
791
[ [ -0.02679443359375, -0.04443359375, 0.0106964111328125, 0.0208892822265625, -0.0034961700439453125, 0.0018033981323242188, 0.0187530517578125, -0.0176544189453125, 0.0193939208984375, 0.0265655517578125, -0.052642822265625, -0.037506103515625, -0.04425048828125, ...
veluchs/speecht5_finetuned_voxpopuli_it_partial
2023-07-24T08:39:55.000Z
[ "transformers", "pytorch", "tensorboard", "speecht5", "text-to-audio", "generated_from_trainer", "text-to-speech", "dataset:voxpopuli", "license:mit", "endpoints_compatible", "region:us" ]
text-to-speech
veluchs
null
null
veluchs/speecht5_finetuned_voxpopuli_it_partial
0
2
transformers
2023-07-18T15:31:58
--- license: mit tags: - generated_from_trainer datasets: - voxpopuli model-index: - name: speecht5_finetuned_voxpopuli_it_partial results: [] pipeline_tag: text-to-speech --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # speecht5_finetuned_voxpopuli_it_partial This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on a part of the italian voxpopuli dataset. It was finetuned as part of the HF Audio Course. It achieves the following results on the evaluation set: - Loss: 0.4955 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 3000 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.5461 | 17.39 | 1000 | 0.5055 | | 0.5222 | 34.78 | 2000 | 0.4958 | | 0.5125 | 52.17 | 3000 | 0.4955 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
1,626
[ [ -0.034088134765625, -0.0428466796875, 0.000023066997528076172, 0.007049560546875, -0.0189056396484375, -0.0232086181640625, -0.0159759521484375, -0.012451171875, -0.005077362060546875, 0.0179595947265625, -0.051513671875, -0.04931640625, -0.039794921875, -0....
YojitShinde/a2c-AntBulletEnv-v0
2023-07-20T13:40:41.000Z
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
YojitShinde
null
null
YojitShinde/a2c-AntBulletEnv-v0
0
2
stable-baselines3
2023-07-18T16:05:32
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 2083.21 +/- 52.79 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
790
[ [ -0.02679443359375, -0.04443359375, 0.0106964111328125, 0.0208892822265625, -0.0034961700439453125, 0.0018033981323242188, 0.0187530517578125, -0.0176544189453125, 0.0193939208984375, 0.0265655517578125, -0.052642822265625, -0.037506103515625, -0.04425048828125, ...
Jyotiyadav/NER-bert-base-multilingual-uncased
2023-07-18T16:41:47.000Z
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
Jyotiyadav
null
null
Jyotiyadav/NER-bert-base-multilingual-uncased
0
2
transformers
2023-07-18T16:39:38
This model is trained on bert-base-uncased & Dataset - https://drive.google.com/file/d/1hyXTTubD9CRjL1MBSIU_iVxFCdtGXqgB/view?usp=sharing & Notebook - https://colab.research.google.com/drive/1zHrs3hosTXBPiy0P1O-x6Z2EQ9RZGUsQ?usp=sharing | Label | Precision | Recall | F1-Score | Support | |--------------------|-----------|--------|----------|---------| | commodity | 0.50 | 0.50 | 0.50 | 66 | | company | 0.79 | 0.87 | 0.83 | 164 | | delivery_cap | 0.00 | 0.00 | 0.00 | 10 | | delivery_location | 0.58 | 0.27 | 0.37 | 26 | | delivery_port | 0.88 | 0.90 | 0.89 | 332 | | delivery_state | 0.80 | 0.82 | 0.81 | 45 | | incoterms | 0.88 | 0.97 | 0.92 | 187 | | measures | 0.91 | 0.96 | 0.94 | 802 | | package_type | 0.89 | 0.96 | 0.92 | 292 | | pickup_cap | 0.83 | 0.97 | 0.90 | 139 | | pickup_location | 0.77 | 0.92 | 0.84 | 356 | | pickup_port | 0.00 | 0.00 | 0.00 | 3 | | pickup_state | 0.83 | 0.82 | 0.83 | 67 | | quantity | 0.93 | 0.90 | 0.92 | 199 | | stackable | 0.91 | 0.84 | 0.87 | 57 | | volume | 0.46 | 0.67 | 0.55 | 69 | | weight | 0.81 | 0.83 | 0.82 | 247 | |--------------------|-----------|--------|----------|---------| | micro avg | 0.84 | 0.90 | 0.87 | 3061 | | macro avg | 0.69 | 0.72 | 0.70 | 3061 | | weighted avg | 0.84 | 0.90 | 0.87 | 3061 |
1,756
[ [ -0.0207366943359375, -0.022216796875, 0.005435943603515625, 0.0193939208984375, -0.0172882080078125, -0.01593017578125, 0.0002582073211669922, -0.0155029296875, 0.02362060546875, 0.0195159912109375, -0.050506591796875, -0.051544189453125, -0.04547119140625, ...
TitanML/ct2-int8-open-llama-7b
2023-07-26T16:15:48.000Z
[ "transformers", "llama", "text-generation", "dataset:togethercomputer/RedPajama-Data-1T", "license:apache-2.0", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
TitanML
null
null
TitanML/ct2-int8-open-llama-7b
0
2
transformers
2023-07-18T20:25:11
--- license: apache-2.0 datasets: - togethercomputer/RedPajama-Data-1T --- # OpenLLaMA: An Open Reproduction of LLaMA In this repo, we present a permissively licensed open source reproduction of Meta AI's [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) large language model. We are releasing a 7B and 3B model trained on 1T tokens, as well as the preview of a 13B model trained on 600B tokens. We provide PyTorch and JAX weights of pre-trained OpenLLaMA models, as well as evaluation results and comparison against the original LLaMA models. Please see the [project homepage of OpenLLaMA](https://github.com/openlm-research/open_llama) for more details. ## Weights Release, License and Usage We release the weights in two formats: an EasyLM format to be use with our [EasyLM framework](https://github.com/young-geng/EasyLM), and a PyTorch format to be used with the [Hugging Face transformers](https://huggingface.co/docs/transformers/index) library. Both our training framework EasyLM and the checkpoint weights are licensed permissively under the Apache 2.0 license. ### Loading the Weights with Hugging Face Transformers Preview checkpoints can be directly loaded from Hugging Face Hub. **Please note that it is advised to avoid using the Hugging Face fast tokenizer for now, as we’ve observed that the auto-converted fast tokenizer sometimes gives incorrect tokenizations.** This can be achieved by directly using the `LlamaTokenizer` class, or passing in the `use_fast=False` option for the `AutoTokenizer` class. See the following example for usage. ```python import torch from transformers import LlamaTokenizer, LlamaForCausalLM model_path = 'openlm-research/open_llama_3b' # model_path = 'openlm-research/open_llama_7b' tokenizer = LlamaTokenizer.from_pretrained(model_path) model = LlamaForCausalLM.from_pretrained( model_path, torch_dtype=torch.float16, device_map='auto', ) prompt = 'Q: What is the largest animal?\nA:' input_ids = tokenizer(prompt, return_tensors="pt").input_ids generation_output = model.generate( input_ids=input_ids, max_new_tokens=32 ) print(tokenizer.decode(generation_output[0])) ``` For more advanced usage, please follow the [transformers LLaMA documentation](https://huggingface.co/docs/transformers/main/model_doc/llama). ### Evaluating with LM-Eval-Harness The model can be evaluated with [lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness). However, due to the aforementioned tokenizer issue, we need to avoid using the fast tokenizer to obtain the correct results. This can be achieved by passing in `use_fast=False` to [this part of lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness/blob/4b701e228768052cfae9043dca13e82052ca5eea/lm_eval/models/huggingface.py#LL313C9-L316C10), as shown in the example below: ```python tokenizer = self.AUTO_TOKENIZER_CLASS.from_pretrained( pretrained if tokenizer is None else tokenizer, revision=revision + ("/" + subfolder if subfolder is not None else ""), use_fast=False ) ``` ### Loading the Weights with EasyLM For using the weights in our EasyLM framework, please refer to the [LLaMA documentation of EasyLM](https://github.com/young-geng/EasyLM/blob/main/docs/llama.md). Note that unlike the original LLaMA model, our OpenLLaMA tokenizer and weights are trained completely from scratch so it is no longer needed to obtain the original LLaMA tokenizer and weights. Note that we use BOS (beginning of sentence) token (id=1) during training, so it is best to prepend this token for best performance during few-shot evaluation. ## Dataset and Training We train our models on the [RedPajama](https://www.together.xyz/blog/redpajama) dataset released by [Together](https://www.together.xyz/), which is a reproduction of the LLaMA training dataset containing over 1.2 trillion tokens. We follow the exactly same preprocessing steps and training hyperparameters as the original LLaMA paper, including model architecture, context length, training steps, learning rate schedule, and optimizer. The only difference between our setting and the original one is the dataset used: OpenLLaMA employs the RedPajama dataset rather than the one utilized by the original LLaMA. We train the models on cloud TPU-v4s using [EasyLM](https://github.com/young-geng/EasyLM), a JAX based training pipeline we developed for training and fine-tuning large language models. We employ a combination of normal data parallelism and [fully sharded data parallelism (also know as ZeRO stage 3)](https://engineering.fb.com/2021/07/15/open-source/fsdp/) to balance the training throughput and memory usage. Overall we reach a throughput of over 2200 tokens / second / TPU-v4 chip for our 7B model. ## Evaluation We evaluated OpenLLaMA on a wide range of tasks using [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness). The LLaMA results are generated by running the original LLaMA model on the same evaluation metrics. We note that our results for the LLaMA model differ slightly from the original LLaMA paper, which we believe is a result of different evaluation protocols. Similar differences have been reported in [this issue of lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/issues/443). Additionally, we present the results of GPT-J, a 6B parameter model trained on the [Pile](https://pile.eleuther.ai/) dataset by [EleutherAI](https://www.eleuther.ai/). The original LLaMA model was trained for 1 trillion tokens and GPT-J was trained for 500 billion tokens. We present the results in the table below. OpenLLaMA exhibits comparable performance to the original LLaMA and GPT-J across a majority of tasks, and outperforms them in some tasks. | **Task/Metric** | GPT-J 6B | LLaMA 7B | OpenLLaMA 7B | OpenLLaMA 3B | OpenLLaMA 13B 600BT | | ---------------------- | -------- | -------- | ------------ | ------------ | ------------------- | | anli_r1/acc | 0.32 | 0.35 | 0.33 | 0.33 | 0.33 | | anli_r2/acc | 0.34 | 0.34 | 0.36 | 0.32 | 0.35 | | anli_r3/acc | 0.35 | 0.37 | 0.38 | 0.35 | 0.38 | | arc_challenge/acc | 0.34 | 0.39 | 0.37 | 0.34 | 0.39 | | arc_challenge/acc_norm | 0.37 | 0.41 | 0.38 | 0.37 | 0.42 | | arc_easy/acc | 0.67 | 0.68 | 0.72 | 0.69 | 0.74 | | arc_easy/acc_norm | 0.62 | 0.52 | 0.68 | 0.65 | 0.70 | | ddboolq/acc | 0.50 | 0.56 | 0.53 | 0.49 | 0.71 | | hellaswag/acc | 0.36 | 0.36 | 0.63 | 0.43 | 0.54 | | hellaswag/acc_norm | 0.66 | 0.73 | 0.72 | 0.67 | 0.73 | | openbookqa/acc | 0.29 | 0.29 | 0.30 | 0.27 | 0.30 | | openbookqa/acc_norm | 0.38 | 0.41 | 0.40 | 0.40 | 0.41 | | piqa/acc | 0.75 | 0.78 | 0.76 | 0.75 | 0.77 | | piqa/acc_norm | 0.76 | 0.78 | 0.77 | 0.76 | 0.78 | | record/em | 0.88 | 0.91 | 0.89 | 0.88 | 0.90 | | record/f1 | 0.89 | 0.91 | 0.90 | 0.89 | 0.90 | | rte/acc | 0.54 | 0.56 | 0.60 | 0.58 | 0.65 | | truthfulqa_mc/mc1 | 0.20 | 0.21 | 0.23 | 0.22 | 0.22 | | truthfulqa_mc/mc2 | 0.36 | 0.34 | 0.35 | 0.35 | 0.35 | | wic/acc | 0.50 | 0.50 | 0.51 | 0.48 | 0.49 | | winogrande/acc | 0.64 | 0.68 | 0.67 | 0.62 | 0.67 | | Average | 0.51 | 0.53 | 0.55 | 0.52 | 0.56 | We removed the task CB and WSC from our benchmark, as our model performs suspiciously well on these two tasks. We hypothesize that there could be a benchmark data contamination in the training set. ## Contact We would love to get feedback from the community. If you have any questions, please open an issue or contact us. OpenLLaMA is developed by: [Xinyang Geng](https://young-geng.xyz/)* and [Hao Liu](https://www.haoliu.site/)* from Berkeley AI Research. *Equal Contribution ## Acknowledgment We thank the [Google TPU Research Cloud](https://sites.research.google/trc/about/) program for providing part of the computation resources. We’d like to specially thank Jonathan Caton from TPU Research Cloud for helping us organizing compute resources, Rafi Witten from the Google Cloud team and James Bradbury from the Google JAX team for helping us optimizing our training throughput. We’d also want to thank Charlie Snell, Gautier Izacard, Eric Wallace, Lianmin Zheng and our user community for the discussions and feedback. The OpenLLaMA 13B model is trained in collaboration with [Stability AI](https://stability.ai/), and we thank Stability AI for providing the computation resources. We’d like to especially thank David Ha and Shivanshu Purohit for the coordinating the logistics and providing engineering support. ## Reference If you found OpenLLaMA useful in your research or applications, please cite using the following BibTeX: ``` @software{openlm2023openllama, author = {Geng, Xinyang and Liu, Hao}, title = {OpenLLaMA: An Open Reproduction of LLaMA}, month = May, year = 2023, url = {https://github.com/openlm-research/open_llama} } ``` ``` @software{together2023redpajama, author = {Together Computer}, title = {RedPajama-Data: An Open Source Recipe to Reproduce LLaMA training dataset}, month = April, year = 2023, url = {https://github.com/togethercomputer/RedPajama-Data} } ``` ``` @article{touvron2023llama, title={Llama: Open and efficient foundation language models}, author={Touvron, Hugo and Lavril, Thibaut and Izacard, Gautier and Martinet, Xavier and Lachaux, Marie-Anne and Lacroix, Timoth{\'e}e and Rozi{\`e}re, Baptiste and Goyal, Naman and Hambro, Eric and Azhar, Faisal and others}, journal={arXiv preprint arXiv:2302.13971}, year={2023} } ```
10,507
[ [ -0.0233001708984375, -0.0538330078125, 0.01861572265625, 0.030181884765625, -0.018707275390625, -0.0037136077880859375, -0.0243682861328125, -0.044158935546875, 0.0280914306640625, 0.0193023681640625, -0.029937744140625, -0.05059814453125, -0.04962158203125, ...
hector981/ppo-LunarLander-v2
2023-07-18T21:32:44.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
hector981
null
null
hector981/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T21:32:20
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 236.70 +/- 24.97 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
acdg1214/ppo-LunarLander-v2
2023-07-18T21:46:09.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
acdg1214
null
null
acdg1214/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T21:45:48
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 249.77 +/- 14.25 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
neuromax/LunarLander-v2-rl-unit-1
2023-07-18T21:51:04.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
neuromax
null
null
neuromax/LunarLander-v2-rl-unit-1
0
2
stable-baselines3
2023-07-18T21:50:41
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 237.06 +/- 28.50 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
aroot/eng-deu-tok_budget_random
2023-07-18T22:25:59.000Z
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
aroot
null
null
aroot/eng-deu-tok_budget_random
0
2
transformers
2023-07-18T22:12:17
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-deu-tok_budget_random results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-deu-tok_budget_random This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.6856 - Bleu: 20.4422 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
1,228
[ [ -0.038360595703125, -0.0511474609375, 0.019866943359375, 0.0182647705078125, -0.026947021484375, -0.034027099609375, -0.01560211181640625, -0.00891876220703125, 0.0167694091796875, 0.0292510986328125, -0.06524658203125, -0.034393310546875, -0.04302978515625, ...
Falcinspire/ppo-LunarLander-v2
2023-07-18T22:39:33.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
Falcinspire
null
null
Falcinspire/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-18T22:13:05
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 282.08 +/- 17.86 name: mean_reward verified: false --- # **ppo** Agent playing **LunarLander-v2** This is a trained model of a **ppo** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
giocs2017/ppo-PyramisTraining
2023-07-19T00:03:03.000Z
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
giocs2017
null
null
giocs2017/ppo-PyramisTraining
0
2
ml-agents
2023-07-19T00:02:57
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: giocs2017/ppo-PyramisTraining 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
1,341
[ [ -0.037445068359375, -0.03521728515625, 0.0030422210693359375, 0.01329803466796875, -0.0110321044921875, 0.01413726806640625, 0.016387939453125, -0.01494598388671875, 0.0347900390625, 0.02984619140625, -0.0396728515625, -0.048736572265625, -0.0291595458984375, ...
jrad98/ppo-LunarLander-v2
2023-07-19T00:13:30.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
jrad98
null
null
jrad98/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T00:13:11
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 262.28 +/- 23.13 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00021386146545410156, -0.027130126953125, 0.0170745849609375, 0.023345947265625, -0.0060577392578125, 0.00275421142578125, 0.034454345703125, -0.012115478515625, 0.0198516845703125, 0.06500244140625, -0.04315185546875, -0.03521728515625, -0.0343017578125, ...
jrad98/ppo-LunarLander-v2_1
2023-07-19T00:27:16.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
jrad98
null
null
jrad98/ppo-LunarLander-v2_1
0
2
stable-baselines3
2023-07-19T00:26:55
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -128.10 +/- 31.63 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
785
[ [ -0.00020432472229003906, -0.0271453857421875, 0.0170745849609375, 0.0233612060546875, -0.00604248046875, 0.002777099609375, 0.034454345703125, -0.01215362548828125, 0.0199127197265625, 0.06500244140625, -0.043121337890625, -0.035247802734375, -0.034332275390625,...
BAAI/AquilaCode-multi
2023-07-24T00:47:10.000Z
[ "transformers", "pytorch", "aquila", "custom_code", "license:other", "endpoints_compatible", "region:us" ]
null
BAAI
null
null
BAAI/AquilaCode-multi
3
2
transformers
2023-07-19T01:31:24
--- license: other --- ![Aquila_logo](./log.jpeg) <h4 align="center"> <p> <b>English</b> | <a href="https://huggingface.co/BAAI/AquilaCode-multi/blob/main/README_zh.md">简体中文</a> | <p> </h4> Aquila Language Model is the first open source language model that supports both Chinese and English knowledge, commercial license agreements, and compliance with domestic data regulations. - 🌟 **Supports open source commercial licenses**. The source code of the Aquila series models is based on the [Apache 2.0 agreement](https://www.apache.org/licenses/LICENSE-2.0), while the model weight is based on the [BAAI Aquila Model License Agreement](https://huggingface.co/BAAI/AquilaCode-multi/blob/main/BAAI%20Aquila%20Model%20License%20Agreement.pdf). Users can use it for commercial purposes as long as they meet the licensing restrictions. - ✍️ **Possesses Chinese and English knowledge**. The Aquila series model is trained from scratch on a high-quality corpus of Chinese and English languages, with Chinese corpora accounting for about 40%, ensuring that the model accumulates native Chinese world knowledge during the pre-training phase, rather than translated knowledge. - 👮‍♀️ **Complies with domestic data regulations**. The Chinese corpora of the Aquila series models come from Intelligence Source's accumulated Chinese datasets over the years, including Chinese internet data from over 10,000 sources (more than 99% of which are domestic sources), as well as high-quality Chinese literature and book data supported by authoritative domestic organizations. We will continue to accumulate high-quality and diverse datasets and incorporate them into the subsequent training of the Aquila base models. - 🎯 **Continuous improvements and open sourcing**. We will continue to improve training data, optimize training methods, and enhance model performance, cultivate a flourishing "model tree" on a better base model foundation, and continuously update open-source versions. The additional details of the Aquila model will be presented in the official technical report. Please stay tuned for updates on official channels, including the [FlagAI GitHub repository](https://github.com/FlagAI-Open/FlagAI/), [FlagAI's Zhihu account](https://www.zhihu.com/people/95-22-20-18) and [FlagAI's official technical communication group](https://github.com/FlagAI-Open/FlagAI/blob/master/wechat-qrcode.jpg). | Model | Model Type | Description | Status | GPUs Used | | :----------------- | :----------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :--------------| :----------- | | Aquila-7B | Base model, 7 billion parameters | **Aquila Base Model** inherits the architectural design advantages of GPT-3 and LLaMA. It replaces a batch of more efficient underlying operator implementations, redesigns the implementation of bilingual tokenizer, upgrades BMTrain parallel training method, and achieves nearly 8 times the training efficiency of Magtron+DeepSpeed ZeRO-2. | Released | Nvidia-A100 | | Aquila-33B | Base model, 33 billion parameters | Same as above | Coming soon | Nvidia-A100 | | AquilaChat-7B | SFT model, fine-tuned and RL based on Aquila-7B | **AquilaChat Dialog Model** supports fluent text dialogue and multiple language generation tasks, and realizes the call of AquilaChat to other models and tools by defining an expandable special instruction specification, which is easy to extend. For example, calling the open source **[AltDiffusion](https://github.com/FlagAI-Open/FlagAI/tree/master/examples/AltDiffusion-m18) multimodal language image generation model** of Flagship Intelligence achieved smooth image generation capability. Together with Flagship Intelligence's **InstructFace multi-step controllable text-picture model**, it is easy to achieve multi-step controllable editing of human face images. | Released | Nvidia-A100 | | AquilaChat-33B | SFT model, fine-tuned and RL based on Aquila-33B | Same as above | Coming soon | Nvidia-A100 | | AquilaCode-multi | Base model, "text-code" generation model, continue-pre-trained based on Aquila-7B. | AquilaCode utilizes high-quality, filtered, and compliant open-source code data for training, with a dataset size of approximately 10-40% compared to other open-source code generation models. By following the provided official guidelines, developers can harness the power of the AquilaCode model to customize their own code assistant. | Released | Nvidia-A100 | | AquilaCode-py | Base model, "text-code" generation model, continue-pre-trained based on Aquila-7B, trained on Horizon Robotics chips | Same as above | Released | Nvidia-A100 | We will continue to release improved versions of Aquila model as open source. - 2023/07/24 :release v0.9 - AquilaCode-mutil-01 md5: e6ea49fea7a737ffe41086ec7019cebb - AquilaCode-mutil-02 md5: 4bba98eac44d785358ed5b6d2144a94a - AquilaCode-Python-01 md5: e202e5b82db773ea369fe843fef1c34c - AquilaCode-Python-02 md5: 3923b2b020e2af71755b11248076437f Aquila-7B v0.8 has shown improvements in the FlagEval large model evaluation ("Objective") compared to version 0.7. It achieved improvements of approximately 10.07% on MMLU_Chinese, 14.84% on TruthfulQA, and 7.94% on MMLU datasets. For detailed evaluation results, please refer to the website http://flageval.baai.ac.cn. For detailed version change history, see [Change Log](https://huggingface.co/BAAI/Aquila-7B/blob/main/change_log.log). ## Quick Start Aquila-7B ### 1. Inference ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_info = "BAAI/AquilaCode-multi" tokenizer = AutoTokenizer.from_pretrained(model_info, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True) model.eval() model.to("cuda:3") text = "#补全代码\ndef quick_sort(x):" tokens = tokenizer.encode_plus(text)['input_ids'][:-1] tokens = torch.tensor(tokens)[None,].to("cuda:3") with torch.no_grad(): out = model.generate(tokens, do_sample=True, max_length=512, eos_token_id=100007)[0] out = tokenizer.decode(out.cpu().numpy().tolist()) print(out) ``` ## License Aquila-7B and AquilaChat-33B open-source model is licensed under [ BAAI Aquila Model Licence Agreement](https://huggingface.co/BAAI/AquilaCode-multi/blob/main/BAAI%20Aquila%20Model%20License%20Agreement.pdf)
8,010
[ [ -0.03045654296875, -0.05133056640625, 0.01473236083984375, 0.022735595703125, -0.0133514404296875, -0.0005326271057128906, -0.01105499267578125, -0.038909912109375, 0.0003979206085205078, 0.0228118896484375, -0.043914794921875, -0.026458740234375, -0.02697753906...
BAAI/AquilaCode-py
2023-07-24T00:47:26.000Z
[ "transformers", "pytorch", "aquila", "text-generation", "license:other", "endpoints_compatible", "region:us" ]
text-generation
BAAI
null
null
BAAI/AquilaCode-py
2
2
transformers
2023-07-19T01:37:02
--- license: other --- ![Aquila_logo](./log.jpeg) <h4 align="center"> <p> <b>English</b> | <a href="https://huggingface.co/BAAI/AquilaCode-py/blob/main/README_zh.md">简体中文</a> | <p> </h4> Aquila Language Model is the first open source language model that supports both Chinese and English knowledge, commercial license agreements, and compliance with domestic data regulations. - 🌟 **Supports open source commercial licenses**. The source code of the Aquila series models is based on the [Apache 2.0 agreement](https://www.apache.org/licenses/LICENSE-2.0), while the model weight is based on the [BAAI Aquila Model License Agreement](https://huggingface.co/BAAI/AquilaCode-py/blob/main/BAAI%20Aquila%20Model%20License%20Agreement.pdf). Users can use it for commercial purposes as long as they meet the licensing restrictions. - ✍️ **Possesses Chinese and English knowledge**. The Aquila series model is trained from scratch on a high-quality corpus of Chinese and English languages, with Chinese corpora accounting for about 40%, ensuring that the model accumulates native Chinese world knowledge during the pre-training phase, rather than translated knowledge. - 👮‍♀️ **Complies with domestic data regulations**. The Chinese corpora of the Aquila series models come from Intelligence Source's accumulated Chinese datasets over the years, including Chinese internet data from over 10,000 sources (more than 99% of which are domestic sources), as well as high-quality Chinese literature and book data supported by authoritative domestic organizations. We will continue to accumulate high-quality and diverse datasets and incorporate them into the subsequent training of the Aquila base models. - 🎯 **Continuous improvements and open sourcing**. We will continue to improve training data, optimize training methods, and enhance model performance, cultivate a flourishing "model tree" on a better base model foundation, and continuously update open-source versions. The additional details of the Aquila model will be presented in the official technical report. Please stay tuned for updates on official channels, including the [FlagAI GitHub repository](https://github.com/FlagAI-Open/FlagAI/), [FlagAI's Zhihu account](https://www.zhihu.com/people/95-22-20-18) and [FlagAI's official technical communication group](https://github.com/FlagAI-Open/FlagAI/blob/master/wechat-qrcode.jpg). | Model | Model Type | Description | Status | GPUs Used | | :----------------- | :----------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :--------------| :----------- | | Aquila-7B | Base model, 7 billion parameters | **Aquila Base Model** inherits the architectural design advantages of GPT-3 and LLaMA. It replaces a batch of more efficient underlying operator implementations, redesigns the implementation of bilingual tokenizer, upgrades BMTrain parallel training method, and achieves nearly 8 times the training efficiency of Magtron+DeepSpeed ZeRO-2. | Released | Nvidia-A100 | | Aquila-33B | Base model, 33 billion parameters | Same as above | Coming soon | Nvidia-A100 | | AquilaChat-7B | SFT model, fine-tuned and RL based on Aquila-7B | **AquilaChat Dialog Model** supports fluent text dialogue and multiple language generation tasks, and realizes the call of AquilaChat to other models and tools by defining an expandable special instruction specification, which is easy to extend. For example, calling the open source **[AltDiffusion](https://github.com/FlagAI-Open/FlagAI/tree/master/examples/AltDiffusion-m18) multimodal language image generation model** of Flagship Intelligence achieved smooth image generation capability. Together with Flagship Intelligence's **InstructFace multi-step controllable text-picture model**, it is easy to achieve multi-step controllable editing of human face images. | Released | Nvidia-A100 | | AquilaChat-33B | SFT model, fine-tuned and RL based on Aquila-33B | Same as above | Coming soon | Nvidia-A100 | | AquilaCode-multi | Base model, "text-code" generation model, continue-pre-trained based on Aquila-7B. | AquilaCode utilizes high-quality, filtered, and compliant open-source code data for training, with a dataset size of approximately 10-40% compared to other open-source code generation models. By following the provided official guidelines, developers can harness the power of the AquilaCode model to customize their own code assistant. | Released | Nvidia-A100 | | AquilaCode-py | Base model, "text-code" generation model, continue-pre-trained based on Aquila-7B, trained on Horizon Robotics chips | Same as above | Released | Nvidia-A100 | We will continue to release improved versions of Aquila model as open source. - 2023/07/24 :release v0.9 - AquilaCode-mutil-01 md5: e202e5b82db773ea369fe843fef1c34c - AquilaCode-mutil-02 md5: 3923b2b020e2af71755b11248076437f - AquilaCode-Python-01 md5: e202e5b82db773ea369fe843fef1c34c - AquilaCode-Python-02 md5: 3923b2b020e2af71755b11248076437f Aquila-7B v0.8 has shown improvements in the FlagEval large model evaluation ("Objective") compared to version 0.7. It achieved improvements of approximately 10.07% on MMLU_Chinese, 14.84% on TruthfulQA, and 7.94% on MMLU datasets. For detailed evaluation results, please refer to the website http://flageval.baai.ac.cn. For detailed version change history, see [Change Log](https://huggingface.co/BAAI/Aquila-7B/blob/main/change_log.log). ## Quick Start Aquila-7B ### 1. Inference ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_info = "BAAI/AquilaCode-py" tokenizer = AutoTokenizer.from_pretrained(model_info, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True) model.eval() model.to("cuda:4") text = "#补全代码\ndef quick_sort(x):" tokens = tokenizer.encode_plus(text)['input_ids'][:-1] tokens = torch.tensor(tokens)[None,].to("cuda:4") with torch.no_grad(): out = model.generate(tokens, do_sample=True, max_length=512, eos_token_id=100007)[0] out = tokenizer.decode(out.cpu().numpy().tolist()) print(out) ``` ## License Aquila-7B and AquilaChat-33B open-source model is licensed under [ BAAI Aquila Model Licence Agreement](https://huggingface.co/BAAI/AquilaCode-py/blob/main/BAAI%20Aquila%20Model%20License%20Agreement.pdf)
7,999
[ [ -0.0290374755859375, -0.050872802734375, 0.014892578125, 0.0225067138671875, -0.01305389404296875, -0.00250244140625, -0.01142120361328125, -0.03826904296875, 0.00005507469177246094, 0.023223876953125, -0.044036865234375, -0.0268707275390625, -0.0266265869140625...
jamesx66/ppo-LunarLander-v2
2023-07-19T02:37:58.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
jamesx66
null
null
jamesx66/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T02:37:36
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 238.65 +/- 20.59 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
AltairXz/ppo-LunarLander-v2
2023-07-19T06:36:25.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
AltairXz
null
null
AltairXz/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T06:35:48
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -196.46 +/- 52.95 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
785
[ [ -0.00020432472229003906, -0.0271453857421875, 0.0170745849609375, 0.0233612060546875, -0.00604248046875, 0.002777099609375, 0.034454345703125, -0.01215362548828125, 0.0199127197265625, 0.06500244140625, -0.043121337890625, -0.035247802734375, -0.034332275390625,...
albertHu/temp-model
2023-07-19T06:46:39.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
albertHu
null
null
albertHu/temp-model
0
2
stable-baselines3
2023-07-19T06:46:17
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 173.21 +/- 35.99 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
marcoi/vit-base-patch16-224-finetuned-flower
2023-07-19T08:08:25.000Z
[ "transformers", "pytorch", "vit", "image-classification", "generated_from_trainer", "dataset:imagefolder", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
image-classification
marcoi
null
null
marcoi/vit-base-patch16-224-finetuned-flower
0
2
transformers
2023-07-19T07:56:11
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder model-index: - name: vit-base-patch16-224-finetuned-flower results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 2.0.1+cu118 - Datasets 2.7.1 - Tokenizers 0.13.3
1,119
[ [ -0.0307769775390625, -0.06048583984375, 0.007091522216796875, 0.0204315185546875, -0.030059814453125, -0.038909912109375, -0.01384735107421875, -0.0225677490234375, 0.00707244873046875, 0.0233001708984375, -0.057830810546875, -0.036407470703125, -0.0434265136718...
Oslaw/a2c-PandaReachDense-v2
2023-07-19T08:13:17.000Z
[ "stable-baselines3", "PandaReachDense-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
Oslaw
null
null
Oslaw/a2c-PandaReachDense-v2
0
2
stable-baselines3
2023-07-19T08:10:25
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.64 +/- 0.77 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
802
[ [ -0.019744873046875, -0.04742431640625, -0.004787445068359375, 0.0469970703125, -0.00018846988677978516, -0.006023406982421875, 0.033172607421875, -0.0249481201171875, 0.028045654296875, 0.042694091796875, -0.06256103515625, -0.0289764404296875, -0.03277587890625...
shayonhuggingface/videberta-sentiment-analysis
2023-07-19T16:44:46.000Z
[ "transformers", "pytorch", "tensorboard", "deberta-v2", "text-classification", "generated_from_trainer", "dataset:vietnamese_students_feedback", "model-index", "endpoints_compatible", "region:us" ]
text-classification
shayonhuggingface
null
null
shayonhuggingface/videberta-sentiment-analysis
0
2
transformers
2023-07-19T08:38:36
--- base_model: Fsoft-AIC/videberta-xsmall tags: - generated_from_trainer datasets: - vietnamese_students_feedback metrics: - accuracy - precision - recall - f1 model-index: - name: videberta-sentiment-analysis results: - task: name: Text Classification type: text-classification dataset: name: vietnamese_students_feedback type: vietnamese_students_feedback config: default split: validation args: default metrics: - name: Accuracy type: accuracy value: 0.9470198675496688 - name: Precision type: precision value: 0.9480840543881335 - name: Recall type: recall value: 0.9527950310559006 - name: F1 type: f1 value: 0.9504337050805451 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # videberta-sentiment-analysis This model is a fine-tuned version of [Fsoft-AIC/videberta-xsmall](https://huggingface.co/Fsoft-AIC/videberta-xsmall) on the vietnamese_students_feedback dataset. It achieves the following results on the evaluation set: - Loss: 0.2787 - Accuracy: 0.9470 - Precision: 0.9481 - Recall: 0.9528 - F1: 0.9504 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 0.6152 | 0.58 | 100 | 0.4777 | 0.8007 | 0.8580 | 0.7503 | 0.8005 | | 0.408 | 1.16 | 200 | 0.3241 | 0.8669 | 0.8943 | 0.8509 | 0.8721 | | 0.3268 | 1.74 | 300 | 0.2726 | 0.8954 | 0.8837 | 0.9255 | 0.9041 | | 0.2654 | 2.33 | 400 | 0.2296 | 0.9199 | 0.9212 | 0.9292 | 0.9252 | | 0.253 | 2.91 | 500 | 0.2088 | 0.9159 | 0.9206 | 0.9217 | 0.9212 | | 0.2014 | 3.49 | 600 | 0.2318 | 0.9172 | 0.9028 | 0.9466 | 0.9242 | | 0.1939 | 4.07 | 700 | 0.2131 | 0.9212 | 0.9224 | 0.9304 | 0.9264 | | 0.1698 | 4.65 | 800 | 0.2005 | 0.9311 | 0.9499 | 0.9193 | 0.9343 | | 0.1822 | 5.23 | 900 | 0.2249 | 0.9245 | 0.9089 | 0.9540 | 0.9309 | | 0.1441 | 5.81 | 1000 | 0.2038 | 0.9311 | 0.9311 | 0.9404 | 0.9357 | | 0.1403 | 6.4 | 1100 | 0.2044 | 0.9338 | 0.9315 | 0.9453 | 0.9383 | | 0.1377 | 6.98 | 1200 | 0.1991 | 0.9417 | 0.9567 | 0.9329 | 0.9447 | | 0.1191 | 7.56 | 1300 | 0.2955 | 0.9119 | 0.8792 | 0.9677 | 0.9213 | | 0.1227 | 8.14 | 1400 | 0.2362 | 0.9318 | 0.9199 | 0.9553 | 0.9372 | | 0.1023 | 8.72 | 1500 | 0.2221 | 0.9358 | 0.9286 | 0.9528 | 0.9405 | | 0.1049 | 9.3 | 1600 | 0.1940 | 0.9424 | 0.9454 | 0.9466 | 0.9460 | | 0.1002 | 9.88 | 1700 | 0.1949 | 0.9404 | 0.9649 | 0.9217 | 0.9428 | | 0.0946 | 10.47 | 1800 | 0.2232 | 0.9404 | 0.9625 | 0.9242 | 0.9430 | | 0.0911 | 11.05 | 1900 | 0.2016 | 0.9457 | 0.9641 | 0.9329 | 0.9482 | | 0.0818 | 11.63 | 2000 | 0.2636 | 0.9311 | 0.9128 | 0.9627 | 0.9371 | | 0.0889 | 12.21 | 2100 | 0.2279 | 0.9450 | 0.9524 | 0.9441 | 0.9482 | | 0.0668 | 12.79 | 2200 | 0.2460 | 0.9411 | 0.9409 | 0.9491 | 0.9450 | | 0.0635 | 13.37 | 2300 | 0.2764 | 0.9424 | 0.9465 | 0.9453 | 0.9459 | | 0.072 | 13.95 | 2400 | 0.2519 | 0.9437 | 0.9390 | 0.9565 | 0.9477 | | 0.0697 | 14.53 | 2500 | 0.2705 | 0.9404 | 0.9408 | 0.9478 | 0.9443 | | 0.0602 | 15.12 | 2600 | 0.2686 | 0.9450 | 0.9513 | 0.9453 | 0.9483 | | 0.065 | 15.7 | 2700 | 0.2629 | 0.9450 | 0.9501 | 0.9466 | 0.9484 | | 0.0628 | 16.28 | 2800 | 0.2644 | 0.9450 | 0.9547 | 0.9416 | 0.9481 | | 0.0505 | 16.86 | 2900 | 0.2704 | 0.9424 | 0.9400 | 0.9528 | 0.9463 | | 0.0471 | 17.44 | 3000 | 0.2787 | 0.9470 | 0.9481 | 0.9528 | 0.9504 | | 0.0568 | 18.02 | 3100 | 0.2766 | 0.9450 | 0.9424 | 0.9553 | 0.9488 | | 0.0523 | 18.6 | 3200 | 0.2659 | 0.9424 | 0.9421 | 0.9503 | 0.9462 | | 0.0487 | 19.19 | 3300 | 0.3091 | 0.9338 | 0.9222 | 0.9565 | 0.9390 | | 0.0529 | 19.77 | 3400 | 0.3575 | 0.9272 | 0.9045 | 0.9652 | 0.9339 | | 0.0484 | 20.35 | 3500 | 0.3228 | 0.9358 | 0.9214 | 0.9615 | 0.9410 | | 0.0456 | 20.93 | 3600 | 0.2694 | 0.9437 | 0.9412 | 0.9540 | 0.9476 | | 0.0424 | 21.51 | 3700 | 0.2793 | 0.9404 | 0.9376 | 0.9516 | 0.9445 | | 0.045 | 22.09 | 3800 | 0.2953 | 0.9417 | 0.9356 | 0.9565 | 0.9459 | | 0.0395 | 22.67 | 3900 | 0.2840 | 0.9417 | 0.9377 | 0.9540 | 0.9458 | | 0.0418 | 23.26 | 4000 | 0.3527 | 0.9305 | 0.9108 | 0.9640 | 0.9366 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
5,733
[ [ -0.04302978515625, -0.0352783203125, 0.0256195068359375, 0.01117706298828125, -0.0027866363525390625, -0.0030517578125, 0.0032291412353515625, -0.00007194280624389648, 0.046478271484375, 0.0286407470703125, -0.04437255859375, -0.0557861328125, -0.049560546875, ...
susnato/speecht5_finetuned_voxpopuli_nl
2023-07-19T10:58:48.000Z
[ "transformers", "pytorch", "tensorboard", "speecht5", "text-to-audio", "generated_from_trainer", "dataset:voxpopuli", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
text-to-audio
susnato
null
null
susnato/speecht5_finetuned_voxpopuli_nl
0
2
transformers
2023-07-19T08:42:38
--- license: mit base_model: microsoft/speecht5_tts tags: - generated_from_trainer datasets: - voxpopuli model-index: - name: speecht5_finetuned_voxpopuli_nl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # speecht5_finetuned_voxpopuli_nl This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on the voxpopuli dataset. It achieves the following results on the evaluation set: - Loss: 0.4608 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.5263 | 4.3 | 1000 | 0.4794 | | 0.5015 | 8.6 | 2000 | 0.4671 | | 0.4927 | 12.9 | 3000 | 0.4624 | | 0.4854 | 17.2 | 4000 | 0.4608 | ### Framework versions - Transformers 4.32.0.dev0 - Pytorch 1.13.1 - Datasets 2.13.1 - Tokenizers 0.13.2
1,599
[ [ -0.0322265625, -0.04022216796875, -0.005123138427734375, 0.00908660888671875, -0.0195770263671875, -0.0242919921875, -0.01444244384765625, -0.00939178466796875, -0.00875091552734375, 0.020843505859375, -0.047943115234375, -0.050628662109375, -0.042999267578125, ...
rafaym/LoRaModel
2023-07-21T13:27:17.000Z
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "lora", "license:creativeml-openrail-m", "region:us" ]
text-to-image
rafaym
null
null
rafaym/LoRaModel
0
2
diffusers
2023-07-19T10:49:31
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA text2image fine-tuning - rafaym/LoRaModel These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were fine-tuned on the rafaym/meri_tasweer dataset. You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
524
[ [ -0.01947021484375, -0.046661376953125, 0.0077667236328125, 0.0270538330078125, -0.0299072265625, -0.01242828369140625, 0.0241851806640625, -0.01305389404296875, 0.021240234375, 0.060760498046875, -0.060455322265625, -0.041534423828125, -0.047637939453125, -0...
zaidazhari/ppo-LunarLander-v2
2023-07-19T10:56:38.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
zaidazhari
null
null
zaidazhari/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T10:56:17
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 263.19 +/- 8.90 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
783
[ [ -0.00020432472229003906, -0.027130126953125, 0.0170745849609375, 0.0233612060546875, -0.0060577392578125, 0.0027675628662109375, 0.034454345703125, -0.0121307373046875, 0.019866943359375, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.0343017578125...
zampoan/ppo-LunarLander-v2
2023-07-19T12:33:54.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
zampoan
null
null
zampoan/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T12:33:01
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 168.10 +/- 17.44 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
bspies/ppo-LunarLander-v2
2023-07-19T13:46:40.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
bspies
null
null
bspies/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T13:46:19
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 260.54 +/- 20.81 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
StKirill/ppo-LunarLander-v2
2023-07-19T13:48:30.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
StKirill
null
null
StKirill/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T13:48:12
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 247.47 +/- 68.18 name: mean_reward verified: false --- # **ppo** Agent playing **LunarLander-v2** This is a trained model of a **ppo** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.0001958608627319336, -0.02716064453125, 0.0170745849609375, 0.0233306884765625, -0.006061553955078125, 0.0027523040771484375, 0.034423828125, -0.01212310791015625, 0.019866943359375, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.0343017578125, ...
MatteoColavita/ppo-LunarLander-v2
2023-07-19T13:50:33.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
MatteoColavita
null
null
MatteoColavita/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T13:50:10
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 259.71 +/- 16.68 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.0001958608627319336, -0.02716064453125, 0.0170745849609375, 0.0233306884765625, -0.006061553955078125, 0.0027523040771484375, 0.034423828125, -0.01212310791015625, 0.019866943359375, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.0343017578125, ...
giocs2017/a2c-AntBulletEnv-v0
2023-07-19T14:07:55.000Z
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
giocs2017
null
null
giocs2017/a2c-AntBulletEnv-v0
0
2
stable-baselines3
2023-07-19T14:06:49
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1658.96 +/- 224.56 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
791
[ [ -0.0267791748046875, -0.044403076171875, 0.01070404052734375, 0.0208740234375, -0.003498077392578125, 0.0017900466918945312, 0.0187530517578125, -0.01763916015625, 0.0193939208984375, 0.026580810546875, -0.052581787109375, -0.037506103515625, -0.04425048828125, ...
robertpassmann/ppo-LunarLander-v2
2023-07-19T14:15:21.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
robertpassmann
null
null
robertpassmann/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T14:14:58
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 258.44 +/- 11.37 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
giocs2017/a2c-PandaReachDense-v2
2023-07-19T14:58:29.000Z
[ "stable-baselines3", "PandaReachDense-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
giocs2017
null
null
giocs2017/a2c-PandaReachDense-v2
0
2
stable-baselines3
2023-07-19T14:55:46
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.35 +/- 0.47 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
802
[ [ -0.019744873046875, -0.04742431640625, -0.004787445068359375, 0.0469970703125, -0.00018846988677978516, -0.006023406982421875, 0.033172607421875, -0.0249481201171875, 0.028045654296875, 0.042694091796875, -0.06256103515625, -0.0289764404296875, -0.03277587890625...
dnarqq/ppo-LunarLander-v2
2023-07-19T17:44:31.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
dnarqq
null
null
dnarqq/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T15:45:27
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 283.73 +/- 19.72 name: mean_reward verified: false --- # **ppo** Agent playing **LunarLander-v2** This is a trained model of a **ppo** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
albagon/ppo-LunarLander-v2
2023-07-19T15:51:23.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
albagon
null
null
albagon/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T15:51:05
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 257.03 +/- 19.86 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
germla/satoken
2023-09-01T03:55:12.000Z
[ "sentence-transformers", "pytorch", "bert", "setfit", "text-classification", "en", "fr", "ko", "zh", "ja", "pt", "ru", "dataset:imdb", "doi:10.57967/hf/0905", "license:apache-2.0", "model-index", "region:us" ]
text-classification
germla
null
null
germla/satoken
1
2
sentence-transformers
2023-07-19T16:09:11
--- license: apache-2.0 tags: - setfit - sentence-transformers - text-classification pipeline_tag: text-classification library_name: sentence-transformers metrics: - accuracy - f1 - precision - recall language: - en - fr - ko - zh - ja - pt - ru datasets: - imdb model-index: - name: germla/satoken results: - task: type: text-classification name: sentiment-analysis dataset: type: imdb name: imdb split: test metrics: - type: accuracy value: 73.976 name: Accuracy - type: f1 value: 73.1667079105832 name: F1 - type: precision value: 75.51506895964584 name: Precision - type: recall value: 70.96 name: Recall - task: type: text-classification name: sentiment-analysis dataset: type: sepidmnorozy/Russian_sentiment name: sepidmnorozy/Russian_sentiment split: train metrics: - type: accuracy value: 75.66371681415929 name: Accuracy - type: f1 value: 83.64218714253031 name: F1 - type: precision value: 75.25730753396459 name: Precision - type: recall value: 94.129763130793 name: Recall --- # Satoken This is a [SetFit model](https://github.com/huggingface/setfit) trained on multilingual datasets (mentioned below) for Sentiment classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. It is utilized by [Germla](https://github.com/germla) for it's feedback analysis tool. (specifically the Sentiment analysis feature) For other models (specific language-basis) check [here](https://github.com/germla/satoken#available-models) # Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("germla/satoken") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` # Training Details ## Training Data - [IMDB](https://huggingface.co/datasets/imdb) - [RuReviews](https://github.com/sismetanin/rureviews) - [chABSA](https://github.com/chakki-works/chABSA-dataset) - [Glyph](https://github.com/zhangxiangxiao/glyph) - [nsmc](https://github.com/e9t/nsmc) - [Allocine](https://huggingface.co/datasets/allocine) - [Portuguese Tweets for Sentiment Analysis](https://www.kaggle.com/datasets/augustop/portuguese-tweets-for-sentiment-analysis) ## Training Procedure We made sure to have a balanced dataset. The model was trained on only 35% (50% for chinese) of the train split of all datasets. ### Preprocessing - Basic Cleaning (removal of dups, links, mentions, hashtags, etc.) - Removal of stopwords using [nltk](https://www.nltk.org/) ### Speeds, Sizes, Times The training procedure took 6hours on the NVIDIA T4 GPU. ## Evaluation ### Testing Data, Factors & Metrics - [IMDB test split](https://huggingface.co/datasets/imdb) # Environmental Impact - Hardware Type: NVIDIA T4 GPU - Hours used: 6 - Cloud Provider: Amazon Web Services - Compute Region: ap-south-1 (Mumbai) - Carbon Emitted: 0.39 [kg co2 eq.](https://mlco2.github.io/impact/#co2eq)
3,699
[ [ -0.031585693359375, -0.049285888671875, 0.00943756103515625, 0.01947021484375, -0.031707763671875, -0.00592041015625, -0.019012451171875, -0.022918701171875, 0.028533935546875, 0.0236663818359375, -0.06365966796875, -0.0406494140625, -0.041046142578125, 0.01...
Junr-syl/tweet_sentiments_analysis_distilbert-base-uncased
2023-07-19T17:12:04.000Z
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
Junr-syl
null
null
Junr-syl/tweet_sentiments_analysis_distilbert-base-uncased
0
2
transformers
2023-07-19T16:21:34
--- license: apache-2.0 base_model: distilbert-base-uncased tags: - generated_from_trainer metrics: - f1 model-index: - name: tweet_sentiments_analysis_distilbert-base-uncased results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tweet_sentiments_analysis_distilbert-base-uncased This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.1715 - F1: 0.7180 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 7 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.7542 | 1.0 | 1000 | 0.7429 | 0.6630 | | 0.7264 | 2.0 | 2000 | 0.7214 | 0.6782 | | 0.6376 | 3.0 | 3000 | 0.6610 | 0.7171 | | 0.5196 | 4.0 | 4000 | 0.7578 | 0.7291 | | 0.4344 | 5.0 | 5000 | 0.8670 | 0.7248 | | 0.3342 | 6.0 | 6000 | 1.0522 | 0.7223 | | 0.2841 | 7.0 | 7000 | 1.1715 | 0.7180 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
1,780
[ [ -0.034149169921875, -0.042266845703125, 0.01448822021484375, 0.0176849365234375, -0.0300750732421875, -0.0002655982971191406, -0.00907135009765625, 0.007965087890625, 0.012908935546875, 0.01540374755859375, -0.05633544921875, -0.062744140625, -0.061614990234375,...
piotrsuder/ppo-LunarLander-v2
2023-07-19T16:22:04.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
piotrsuder
null
null
piotrsuder/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T16:21:36
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 178.94 +/- 67.66 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
chandan9t8/a2c-AntBulletEnv-v0
2023-07-19T16:45:56.000Z
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
chandan9t8
null
null
chandan9t8/a2c-AntBulletEnv-v0
0
2
stable-baselines3
2023-07-19T16:44:48
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1571.33 +/- 34.35 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
790
[ [ -0.02679443359375, -0.04443359375, 0.0106964111328125, 0.0208892822265625, -0.0034961700439453125, 0.0018033981323242188, 0.0187530517578125, -0.0176544189453125, 0.0193939208984375, 0.0265655517578125, -0.052642822265625, -0.037506103515625, -0.04425048828125, ...
Trelis/mpt-7b-8k-chat-sharded-bf16
2023-07-25T16:34:38.000Z
[ "transformers", "pytorch", "mpt", "text-generation", "Composer", "MosaicML", "llm-foundry", "sharded", "custom_code", "dataset:camel-ai/code", "dataset:ehartford/wizard_vicuna_70k_unfiltered", "dataset:anon8231489123/ShareGPT_Vicuna_unfiltered", "dataset:teknium1/GPTeacher/roleplay-instruct-...
text-generation
Trelis
null
null
Trelis/mpt-7b-8k-chat-sharded-bf16
1
2
transformers
2023-07-19T18:09:15
--- license: cc-by-nc-sa-4.0 datasets: - camel-ai/code - ehartford/wizard_vicuna_70k_unfiltered - anon8231489123/ShareGPT_Vicuna_unfiltered - teknium1/GPTeacher/roleplay-instruct-v2-final - teknium1/GPTeacher/codegen-isntruct - timdettmers/openassistant-guanaco - camel-ai/math - project-baize/baize-chatbot/medical_chat_data - project-baize/baize-chatbot/quora_chat_data - project-baize/baize-chatbot/stackoverflow_chat_data - camel-ai/biology - camel-ai/chemistry - camel-ai/ai_society - jondurbin/airoboros-gpt4-1.2 - LongConversations - camel-ai/physics tags: - Composer - MosaicML - llm-foundry - sharded - mpt inference: false --- # MPT-7B-Chat-8k-sharded-bf16 ## Sharded version of mpt-7b-8k-chat from MosaicML Sharded using a Google Colab notebook: https://colab.research.google.com/drive/1f1q9qc56wzB_7-bjgNyLlO6f28ui1esQ?usp=sharing Information below is copy-pasted from MosaicML. # MPT-7B-Chat-8k MPT-7B-Chat-8k is a chatbot-like model for dialogue generation. It was built by finetuning [MPT-7B-8k](https://huggingface.co/mosaicml/mpt-7b-8k) on the [ShareGPT-Vicuna](https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered), [Camel-AI](https://huggingface.co/camel-ai), [GPTeacher](https://github.com/teknium1/GPTeacher), [Guanaco](https://huggingface.co/datasets/timdettmers/openassistant-guanaco), [Baize](https://github.com/project-baize/baize-chatbot) and some generated datasets. This is the same dataset that [MPT-30B-Chat](https://huggingface.co/mosaicml/mpt-30b-chat) was trained on. * License: _CC-By-NC-SA-4.0_ (non-commercial use only) This model was trained by [MosaicML](https://www.mosaicml.com) and follows a modified decoder-only transformer architecture. ## Model Date July 18, 2023 ## Model License _CC-By-NC-SA-4.0_ (non-commercial use only) ## Documentation * [Blog post: MPT-7B-8k](https://www.mosaicml.com/blog/long-context-mpt-7b-8k) * [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/) * Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)! ## How to Use This model is best used with the MosaicML [llm-foundry repository](https://github.com/mosaicml/llm-foundry) for training and finetuning. ```python import transformers model = transformers.AutoModelForCausalLM.from_pretrained( 'mosaicml/mpt-7b-chat-8k', trust_remote_code=True ) ``` Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom `MPT` model architecture that is not yet part of the Hugging Face `transformers` package. `MPT` includes options for many training efficiency features such as [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), [QK LayerNorm](https://arxiv.org/abs/2010.04245), and more. To use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision: ```python import torch import transformers name = 'mosaicml/mpt-7b-chat-8k' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.attn_config['attn_impl'] = 'triton' # change this to use triton-based FlashAttention config.init_device = 'cuda:0' # For fast initialization directly on GPU! model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, torch_dtype=torch.bfloat16, # Load model weights in bfloat16 trust_remote_code=True ) ``` The model was trained initially with a sequence length of 2048 with an additional pretraining stage for sequence length adapation up to 8192. However, ALiBi enables users to increase the maximum sequence length even further during finetuning and/or inference. For example: ```python import transformers name = 'mosaicml/mpt-7b-chat-8k' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.max_seq_len = 16384 # (input + output) tokens can now be up to 16384 model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, trust_remote_code=True ) ``` This model was trained with the MPT-7B-chat tokenizer which is based on the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer and includes additional ChatML tokens. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('mosaicml/mpt-7b-8k') ``` The model can then be used, for example, within a text-generation pipeline. Note: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html). ```python from transformers import pipeline with torch.autocast('cuda', dtype=torch.bfloat16): inputs = tokenizer('Here is a recipe for vegan banana bread:\n', return_tensors="pt").to('cuda') outputs = model.generate(**inputs, max_new_tokens=100) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) # or using the HF pipeline pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0') with torch.autocast('cuda', dtype=torch.bfloat16): print( pipe('Here is a recipe for vegan banana bread:\n', max_new_tokens=100, do_sample=True, use_cache=True)) ``` ## Model Description The architecture is a modification of a standard decoder-only transformer. The model has been modified from a standard transformer in the following ways: * It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) * It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings * It does not use biases | Hyperparameter | Value | |----------------|-------| |n_parameters | 6.7B | |n_layers | 32 | | n_heads | 32 | | d_model | 4096 | | vocab size | 50432 | | sequence length | 2048 | ## Data Mix The model was trained on the following data mix: | Data Source | Number of Tokens in Source | Proportion | |-------------|----------------------------|------------| | Airoboros/GPT4-1.2 | 26.4M | 1.71% | | Baize | 55.0M | 3.57% | | Camel | 301M | 19.54% | | GPTeacher | 7.56M | 0.49% | | Guanaco | 15.6M | 1.02% | | LongCoversations | 18.4M | 1.19% | | ShareGPT | 821M | 53.24% | | WizardLM | 297M | 19.23% | "LongConversations" is a GPT3.5/4-generated dataset, details of which will be released at a later date. ### Training Configuration This model was trained on 192 H100s for about 48 minutes using the [MosaicML Platform](https://www.mosaicml.com/platform). The model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the AdamW optimizer. ## Limitations and Biases _The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_ MPT-7B-Chat-8k can produce factually incorrect output, and should not be relied on to produce factually accurate information. MPT-7B-Chat-8k was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs. ## Acknowledgements This model was finetuned by the MosaicML NLP team ## Disclaimer The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please consult an attorney before using this model for commercial purposes. ## MosaicML Platform If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://www.mosaicml.com/get-started?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-7b-8k). ## Citation Please cite this model using the following format: ``` @online{MosaicML2023Introducing, author = {MosaicML NLP Team}, title = {Introducing MPT-30B: Raising the bar for open-source foundation models}, year = {2023}, url = {www.mosaicml.com/blog/mpt-30b}, note = {Accessed: 2023-06-22}, urldate = {2023-06-22} } ```
8,232
[ [ -0.0347900390625, -0.04510498046875, 0.01039886474609375, 0.03204345703125, -0.0206756591796875, -0.0015325546264648438, -0.00975799560546875, -0.025360107421875, 0.004062652587890625, 0.0252532958984375, -0.043853759765625, -0.041595458984375, -0.05291748046875...
Darisian/ppo-LunarLander-custom
2023-07-20T13:36:22.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
Darisian
null
null
Darisian/ppo-LunarLander-custom
0
2
stable-baselines3
2023-07-19T18:31:43
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 95.89 +/- 105.68 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
PlankyxD/ppo-Pyramids
2023-07-19T18:37:16.000Z
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
PlankyxD
null
null
PlankyxD/ppo-Pyramids
0
2
ml-agents
2023-07-19T18:37:11
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: PlankyxD/ppo-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
1,333
[ [ -0.040679931640625, -0.034088134765625, 0.0018854141235351562, 0.014495849609375, -0.01120758056640625, 0.01222991943359375, 0.017364501953125, -0.0146331787109375, 0.033721923828125, 0.03009033203125, -0.03985595703125, -0.05072021484375, -0.0292510986328125, ...
Jelliott/ppo-LunarLander-v2
2023-07-19T19:37:20.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
Jelliott
null
null
Jelliott/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-19T19:23:13
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 271.52 +/- 19.67 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
akdeniz27/ppo-Pyramids
2023-07-19T22:18:47.000Z
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
akdeniz27
null
null
akdeniz27/ppo-Pyramids
0
2
ml-agents
2023-07-19T22:18:40
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: akdeniz27/ppo-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
1,334
[ [ -0.040863037109375, -0.033721923828125, 0.00213623046875, 0.0136871337890625, -0.0108489990234375, 0.01265716552734375, 0.0168609619140625, -0.014556884765625, 0.033233642578125, 0.0300140380859375, -0.0404052734375, -0.05023193359375, -0.0296630859375, -0.0...
ByteExplorer/a2c-PandaReachDense-v2
2023-07-21T00:04:32.000Z
[ "stable-baselines3", "PandaReachDense-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
ByteExplorer
null
null
ByteExplorer/a2c-PandaReachDense-v2
0
2
stable-baselines3
2023-07-19T22:40:05
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.01 +/- 0.22 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
802
[ [ -0.019744873046875, -0.04742431640625, -0.004787445068359375, 0.0469970703125, -0.00018846988677978516, -0.006023406982421875, 0.033172607421875, -0.0249481201171875, 0.028045654296875, 0.042694091796875, -0.06256103515625, -0.0289764404296875, -0.03277587890625...
snicolau/ppo-Pyramids
2023-07-19T23:45:23.000Z
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
snicolau
null
null
snicolau/ppo-Pyramids
0
2
ml-agents
2023-07-19T23:44:31
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: snicolau/ppo-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
1,333
[ [ -0.0404052734375, -0.03436279296875, 0.002323150634765625, 0.01409912109375, -0.01099395751953125, 0.01232147216796875, 0.01629638671875, -0.01416015625, 0.034332275390625, 0.0302581787109375, -0.040252685546875, -0.04986572265625, -0.0296173095703125, -0.01...
PlankyxD/a2c-AntBulletEnv-v0
2023-07-20T00:05:24.000Z
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
PlankyxD
null
null
PlankyxD/a2c-AntBulletEnv-v0
0
2
stable-baselines3
2023-07-20T00:04:08
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1098.32 +/- 98.86 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
790
[ [ -0.0267791748046875, -0.044403076171875, 0.01070404052734375, 0.0208892822265625, -0.003509521484375, 0.0018157958984375, 0.0187530517578125, -0.0176544189453125, 0.0193939208984375, 0.0265655517578125, -0.052581787109375, -0.0374755859375, -0.04425048828125, ...
brunoboat/ppo-LunarLander-v2
2023-07-20T00:43:14.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
brunoboat
null
null
brunoboat/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-20T00:42:56
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 269.80 +/- 21.33 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
marianafmedeiros/a2c-AntBulletEnv-v0
2023-07-20T12:20:04.000Z
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
marianafmedeiros
null
null
marianafmedeiros/a2c-AntBulletEnv-v0
0
2
stable-baselines3
2023-07-20T03:03:24
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 846.46 +/- 66.62 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
789
[ [ -0.02679443359375, -0.04443359375, 0.0106964111328125, 0.0208892822265625, -0.0034961700439453125, 0.0018033981323242188, 0.0187530517578125, -0.0176544189453125, 0.0193939208984375, 0.0265655517578125, -0.052642822265625, -0.037506103515625, -0.04425048828125, ...
kaikaikaikaikaikaikaikai/marian-finetuned-kftt-ja-to-en
2023-07-27T08:28:04.000Z
[ "transformers", "pytorch", "tensorboard", "marian", "text2text-generation", "translation", "generated_from_trainer", "dataset:kftt", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
kaikaikaikaikaikaikaikai
null
null
kaikaikaikaikaikaikaikai/marian-finetuned-kftt-ja-to-en
0
2
transformers
2023-07-20T03:04:20
--- license: apache-2.0 tags: - translation - generated_from_trainer datasets: - kftt metrics: - bleu model-index: - name: marian-finetuned-kftt-ja-to-en results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: kftt type: kftt config: en-ja split: validation args: en-ja metrics: - name: Bleu type: bleu value: 19.353560365370512 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kftt-ja-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-ja-en](https://huggingface.co/Helsinki-NLP/opus-mt-ja-en) on the kftt dataset. It achieves the following results on the evaluation set: - Loss: 1.9124 - Bleu: 19.3536 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu117 - Datasets 2.3.2 - Tokenizers 0.13.3
1,525
[ [ -0.02984619140625, -0.05511474609375, 0.02349853515625, 0.0171051025390625, -0.043670654296875, -0.03240966796875, -0.028350830078125, -0.024627685546875, 0.01502227783203125, 0.032928466796875, -0.053985595703125, -0.035186767578125, -0.051055908203125, 0.0...
aroot/eng-kor-delfy
2023-07-24T02:37:57.000Z
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
aroot
null
null
aroot/eng-kor-delfy
0
2
transformers
2023-07-20T03:30:37
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-kor-delfy results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-kor-delfy This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0666 - Bleu: 5.9354 - Chrf: 23.5962 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1 - Datasets 2.12.0 - Tokenizers 0.11.0
1,174
[ [ -0.040130615234375, -0.049774169921875, 0.0180206298828125, 0.018310546875, -0.0264892578125, -0.036865234375, -0.01611328125, -0.01363372802734375, 0.01434326171875, 0.0245819091796875, -0.059539794921875, -0.03558349609375, -0.044952392578125, -0.004779815...
teilomillet/ppo-LunarLander-v2
2023-07-25T21:15:19.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
teilomillet
null
null
teilomillet/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-20T04:00:54
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 292.03 +/- 14.79 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
kkmkorea/checkpoint25000
2023-08-07T00:25:12.000Z
[ "transformers", "pytorch", "deberta-v2", "fill-mask", "ko", "arxiv:1910.09700", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
kkmkorea
null
null
kkmkorea/checkpoint25000
0
2
transformers
2023-07-20T05:13:54
--- license: mit language: - ko metrics: - f1 --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
5,211
[ [ -0.04803466796875, -0.0455322265625, 0.032012939453125, 0.00844573974609375, -0.024383544921875, -0.0248565673828125, 0.00884246826171875, -0.047119140625, 0.018524169921875, 0.0498046875, -0.0556640625, -0.050628662109375, -0.04437255859375, -0.007740020751...
shanover/disease_classifier_base
2023-07-20T08:58:08.000Z
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "bert-base-uncased", "disease", "medical", "en", "license:mit", "endpoints_compatible", "region:us" ]
text-classification
shanover
null
null
shanover/disease_classifier_base
0
2
transformers
2023-07-20T05:32:16
--- license: mit language: - en library_name: transformers pipeline_tag: text-classification tags: - bert-base-uncased - disease - medical widget: - text: "I am having itching, skin rash, and nodal skin eruptions" example_title: "Fungal infection example" - text: "I feel like vomiting, breathlessness, and sweating" example_title: "Heart Attack example" - text: "I am feeling fatigue, weight loss, restlessness and also lethargy." example_title: "Diabetes example" --- The objective is to develop a symptom-to-disease classification model for a natural language chatbot. This model takes input text such as "I am feeling vomiting, breathlessness, and sweating" and accurately identifies the associated disease (2 - 'Heart attack'). In essence, the chatbot's purpose is to analyze users' symptoms and provide relevant disease predictions in real-time conversation. Labels: 0 - Fungal infection 1 - Diabetes 2 - Heart attack Will add more diseases in coming days
987
[ [ -0.0000330805778503418, -0.07879638671875, 0.0457763671875, 0.021240234375, -0.01971435546875, -0.003307342529296875, 0.0002040863037109375, -0.052825927734375, 0.04791259765625, 0.049163818359375, -0.0242767333984375, -0.04791259765625, -0.06402587890625, 0...
Claaas/ppo-LunarLander-v2
2023-07-20T06:11:20.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
Claaas
null
null
Claaas/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-20T06:11:05
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 266.69 +/- 21.30 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
Claaas/ppo-Huggy
2023-07-20T06:34:45.000Z
[ "ml-agents", "tensorboard", "onnx", "Huggy", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
Claaas
null
null
Claaas/ppo-Huggy
0
2
ml-agents
2023-07-20T06:34:43
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: Claaas/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
1,316
[ [ -0.0426025390625, -0.04608154296875, 0.0170440673828125, 0.0035343170166015625, -0.0160064697265625, 0.0161895751953125, 0.0135040283203125, -0.02191162109375, 0.041839599609375, 0.034271240234375, -0.048797607421875, -0.046112060546875, -0.0301666259765625, ...
lianlian123/ppo-LunarLander-v2
2023-07-20T07:50:25.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
lianlian123
null
null
lianlian123/ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-20T07:50:04
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 250.36 +/- 13.46 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00023484230041503906, -0.02716064453125, 0.017059326171875, 0.023345947265625, -0.00606536865234375, 0.002735137939453125, 0.034454345703125, -0.012115478515625, 0.019866943359375, 0.06500244140625, -0.043212890625, -0.035247802734375, -0.0343017578125, -...
ankush-003/bart-nosqli
2023-07-24T07:40:27.000Z
[ "transformers", "pytorch", "bart", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
ankush-003
null
null
ankush-003/bart-nosqli
0
2
transformers
2023-07-20T10:04:16
--- license: apache-2.0 base_model: facebook/bart-large tags: - generated_from_trainer model-index: - name: bart-nosqli results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-nosqli This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.0 - Datasets 2.1.0 - Tokenizers 0.13.3
1,047
[ [ -0.03558349609375, -0.0738525390625, 0.02606201171875, 0.0028400421142578125, -0.02398681640625, -0.0107421875, -0.00927734375, -0.0299530029296875, 0.0294647216796875, 0.04132080078125, -0.06475830078125, -0.044158935546875, -0.0308380126953125, -0.00350952...
bwilkie/a2c-PandaReachDense-v2
2023-07-20T16:35:18.000Z
[ "stable-baselines3", "PandaReachDense-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
bwilkie
null
null
bwilkie/a2c-PandaReachDense-v2
0
2
stable-baselines3
2023-07-20T10:51:29
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -5.73 +/- 1.19 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
802
[ [ -0.019744873046875, -0.0474853515625, -0.004791259765625, 0.047027587890625, -0.00016224384307861328, -0.006038665771484375, 0.033203125, -0.02496337890625, 0.028076171875, 0.042694091796875, -0.06256103515625, -0.02899169921875, -0.03277587890625, -0.006637...
budecosystem/genz-13b
2023-07-20T15:28:37.000Z
[ "transformers", "pytorch", "llama", "text-generation", "en", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
budecosystem
null
null
budecosystem/genz-13b
2
2
transformers
2023-07-20T11:15:54
--- language: - en library_name: transformers pipeline_tag: text-generation --- # GenZ 13B The instruction finetuned model with 4K input length. The model is finetuned on top of pretrained LLaMa2 ## Inference ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("budecosystem/genz-13b", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("budecosystem/genz-13b", torch_dtype=torch.bfloat16) inputs = tokenizer("The world is", return_tensors="pt") sample = model.generate(**inputs, max_length=128) print(tokenizer.decode(sample[0])) ``` Use following prompt template ``` A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: Hi, how are you? ASSISTANT: ``` ## Finetuning ```bash python finetune.py --model_name meta-llama/Llama-2-13b --data_path dataset.json --output_dir output --trust_remote_code --prompt_column instruction --response_column output ``` Check the GitHub for the code -> [GenZ](https://github.com/BudEcosystem/GenZ)
1,160
[ [ -0.036712646484375, -0.057098388671875, 0.03277587890625, 0.0190887451171875, -0.037994384765625, 0.0056304931640625, -0.0146331787109375, -0.01525115966796875, -0.0105438232421875, 0.024688720703125, -0.067138671875, -0.042999267578125, -0.05303955078125, 0...
jaygdesai/jay-default-ppo-LunarLander-v2
2023-07-20T20:33:01.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
jaygdesai
null
null
jaygdesai/jay-default-ppo-LunarLander-v2
0
2
stable-baselines3
2023-07-20T11:17:18
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 266.06 +/- 22.80 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.00021457672119140625, -0.0271453857421875, 0.017059326171875, 0.0233612060546875, -0.0060577392578125, 0.00274658203125, 0.034423828125, -0.0121307373046875, 0.019866943359375, 0.06500244140625, -0.043182373046875, -0.035247802734375, -0.034332275390625, ...
giovannidispoto/a2c-AntBulletEnv-v0
2023-07-20T13:25:13.000Z
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
giovannidispoto
null
null
giovannidispoto/a2c-AntBulletEnv-v0
0
2
stable-baselines3
2023-07-20T13:24:09
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 2081.25 +/- 50.17 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
790
[ [ -0.02679443359375, -0.044403076171875, 0.01070404052734375, 0.0208892822265625, -0.0035266876220703125, 0.001781463623046875, 0.0187530517578125, -0.0176239013671875, 0.0193939208984375, 0.0265655517578125, -0.052581787109375, -0.037506103515625, -0.044250488281...
patebel/LunarLander
2023-07-20T14:25:11.000Z
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
patebel
null
null
patebel/LunarLander
0
2
stable-baselines3
2023-07-20T13:58:32
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -70.75 +/- 91.73 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
784
[ [ -0.0001957416534423828, -0.0271148681640625, 0.017059326171875, 0.023345947265625, -0.006061553955078125, 0.002750396728515625, 0.034454345703125, -0.01210784912109375, 0.0198516845703125, 0.06494140625, -0.04315185546875, -0.035247802734375, -0.0343017578125, ...
mstaron/wolfBERTa
2023-07-20T15:14:17.000Z
[ "transformers", "pytorch", "roberta", "fill-mask", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
mstaron
null
null
mstaron/wolfBERTa
0
2
transformers
2023-07-20T14:54:57
--- license: mit --- This model is a RoBERTa model trained on a programming language code - WolfSSL. The programming language is C/C++, but the actual inference can also use other languages. Using the model to unmask can be done in the following way ```python from transformers import pipeline unmasker = pipeline('fill-mask', model='mstaron/wolfBERTa') unmasker("Hello I'm a <mask> model.") ``` To obtain the embeddings for downstream task can be done in the following way: ```python # import the model via the huggingface library from transformers import AutoTokenizer, AutoModelForMaskedLM # load the tokenizer and the model for the pretrained wolfBERTa tokenizer = AutoTokenizer.from_pretrained('mstaron/wolfBERTa') # load the model model = AutoModelForMaskedLM.from_pretrained("mstaron/wolfBERTa") # import the feature extraction pipeline from transformers import pipeline # create the pipeline, which will extract the embedding vectors # the models are already pre-defined, so we do not need to train anything here features = pipeline( "feature-extraction", model=model, tokenizer=tokenizer, return_tensor = False ) # extract the features == embeddings lstFeatures = features('Class HTTP::X1') # print the first token's embedding [CLS] # which is also a good approximation of the whole sentence embedding # the same as using np.mean(lstFeatures[0], axis=0) lstFeatures[0][0] ``` In order to use the model, we need to train it on the downstream task.
1,487
[ [ -0.01641845703125, -0.040069580078125, 0.02374267578125, 0.0251312255859375, -0.03826904296875, 0.004398345947265625, -0.00433349609375, -0.00261688232421875, 0.0277557373046875, 0.0435791015625, -0.055084228515625, -0.033050537109375, -0.06451416015625, -0....
hyungtae/mpt-30b
2023-07-21T00:47:36.000Z
[ "transformers", "pytorch", "mpt", "text-generation", "Composer", "MosaicML", "llm-foundry", "StreamingDatasets", "custom_code", "dataset:allenai/c4", "dataset:mc4", "dataset:togethercomputer/RedPajama-Data-1T", "dataset:bigcode/the-stack-dedup", "dataset:allenai/s2orc", "arxiv:2108.12409...
text-generation
hyungtae
null
null
hyungtae/mpt-30b
0
2
transformers
2023-07-20T16:31:38
--- license: apache-2.0 tags: - Composer - MosaicML - llm-foundry - StreamingDatasets datasets: - allenai/c4 - mc4 - togethercomputer/RedPajama-Data-1T - bigcode/the-stack-dedup - allenai/s2orc inference: false --- ### Attribution This model is derived from [MosaicML's MPT-30B model](https://huggingface.co/mosaicml/mpt-30b/tree/main), with changes from [cekal/mpt-7b-peft-compatible](https://huggingface.co/cekal/mpt-7b-peft-compatible) applied; each licensed under the Apache License, version 2.0. # MPT-30B MPT-30B is a decoder-style transformer pretrained from scratch on 1T tokens of English text and code. This model was trained by [MosaicML](https://www.mosaicml.com). MPT-30B is part of the family of Mosaic Pretrained Transformer (MPT) models, which use a modified transformer architecture optimized for efficient training and inference. MPT-30B comes with special features that differentiate it from other LLMs, including an 8k token context window (which can be further extended via finetuning; see [MPT-7B-StoryWriter](https://huggingface.co/mosaicml/mpt-7b-storywriter)), support for context-length extrapolation via [ALiBi](https://arxiv.org/abs/2108.12409), and efficient inference + training via FlashAttention. It also has strong coding abilities thanks to its pretraining mix. MPT models can also be served efficiently with both standard HuggingFace pipelines and NVIDIA's [FasterTransformer](https://github.com/NVIDIA/FasterTransformer). The size of MPT-30B was also specifically chosen to make it easy to deploy on a single GPU—either 1xA100-80GB in 16-bit precision or 1xA100-40GB in 8-bit precision. This model uses the MosaicML LLM codebase, which can be found in the [llm-foundry repository](https://github.com/mosaicml/llm-foundry). It was trained by MosaicML’s NLP team on the [MosaicML platform](https://www.mosaicml.com/training) for LLM pretraining, finetuning, and inference. ### How is this model different? MPT-30B is: * **Licensed for the possibility of commercial use** (unlike [LLaMA](https://arxiv.org/abs/2302.13971)). * **Trained on a large amount of data** (1T tokens like [LLaMA](https://arxiv.org/abs/2302.13971) vs. 300B for [Pythia](https://github.com/EleutherAI/pythia), 300B for [OpenLLaMA](https://github.com/openlm-research/open_llama), and 800B for [StableLM](https://github.com/Stability-AI/StableLM)). * **Prepared to handle extremely long inputs** thanks to [ALiBi](https://arxiv.org/abs/2108.12409). * **Capable of fast training and inference** (via [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) and [FasterTransformer](https://github.com/NVIDIA/FasterTransformer)) * **Equipped with highly efficient open-source training code** via the [llm-foundry repository](https://github.com/mosaicml/llm-foundry) ### Models finetuned off MPT-30B: The following models are finetuned on MPT-30B: * [MPT-30B-Instruct](https://huggingface.co/mosaicml/mpt-30b-instruct): a model for long-form instruction following (especially summarization and question-answering). Built by finetuning MPT-30B on several carefully curated datasets. * License: _CC-BY-SA-3.0_ * [MPT-30B-Chat](https://huggingface.co/mosaicml/mpt-30b-chat): a chatbot-like model for dialogue generation. Built by finetuning MPT-30B on [ShareGPT-Vicuna](https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered), [Camel-AI](https://huggingface.co/camel-ai), [GPTeacher](https://github.com/teknium1/GPTeacher), [Guanaco](https://huggingface.co/datasets/timdettmers/openassistant-guanaco), [Baize](https://github.com/project-baize/baize-chatbot) and some generated datasets. * License: _CC-By-NC-SA-4.0_ * [Demo on Hugging Face Spaces](https://huggingface.co/spaces/mosaicml/mpt-30b-chat) ## Model Date June 22, 2023 ## Model License Apache-2.0 ## Documentation * [Blog post: MPT-30B: Raising the bar for open-source foundation models](https://www.mosaicml.com/blog/mpt-30b) * [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/) * Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)! ## How to Use This model is best used with the MosaicML [llm-foundry repository](https://github.com/mosaicml/llm-foundry) for training and finetuning. ```python import transformers model = transformers.AutoModelForCausalLM.from_pretrained( 'mosaicml/mpt-30b', trust_remote_code=True ) ``` Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom `MPT` model architecture that is not yet part of the Hugging Face `transformers` package. `MPT` includes options for many training efficiency features such as [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), [QK LayerNorm](https://arxiv.org/abs/2010.04245), and more. To use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision: ```python import torch import transformers name = 'mosaicml/mpt-30b' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.attn_config['attn_impl'] = 'triton' # change this to use triton-based FlashAttention config.init_device = 'cuda:0' # For fast initialization directly on GPU! model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, torch_dtype=torch.bfloat16, # Load model weights in bfloat16 trust_remote_code=True ) ``` The model was trained initially with a sequence length of 2048 with an additional pretraining stage for sequence length adapation up to 8192. However, ALiBi enables users to increase the maximum sequence length even further during finetuning and/or inference. For example: ```python import transformers name = 'mosaicml/mpt-30b' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.max_seq_len = 16384 # (input + output) tokens can now be up to 16384 model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, trust_remote_code=True ) ``` This model was trained with the MPT-30B tokenizer which is identical to the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('mosaicml/mpt-30b') ``` The model can then be used, for example, within a text-generation pipeline. Note: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html). ```python from transformers import pipeline with torch.autocast('cuda', dtype=torch.bfloat16): inputs = tokenizer('Here is a recipe for vegan banana bread:\n', return_tensors="pt").to('cuda') outputs = model.generate(**inputs, max_new_tokens=100) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) # or using the HF pipeline pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0') with torch.autocast('cuda', dtype=torch.bfloat16): print( pipe('Here is a recipe for vegan banana bread:\n', max_new_tokens=100, do_sample=True, use_cache=True)) ``` ## Model Description The architecture is a modification of a standard decoder-only transformer. The model has been modified from a standard transformer in the following ways: * It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) * It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings * It does not use biases | Hyperparameter | Value | |----------------|-------| |n_parameters | 29.95B | |n_layers | 48 | | n_heads | 64 | | d_model | 7168 | | vocab size | 50432 | | sequence length | 8192 | ## Training Data ### Streaming Datasets Data was formatted using the MosaicML [StreamingDataset](https://github.com/mosaicml/streaming) library to host our data in object storage and efficiently stream it to our compute cluster during training. StreamingDataset obviates the need to download the whole dataset before starting training, and allows instant resumption of training from any point in the dataset. ### Data Mix The model was trained for 1T tokens on the following data mix: | Data Source | Number of Tokens in Source | Proportion | Effective Number of Tokens | Epochs | |-------------|----------------------------|------------|----------------------------|--------| | mC4 3.1.0 - English (200+ words) | 2417.99 B | 33.50% | 335 B | 0.14 | | c4 - English - SemDedup 80% | 100.42 B | 29.90% | 299 B | 2.98 | | RedPajama - CommonCrawl | 878.45 B | 8.50% | 85 B | 0.097 | | The Stack - Selected Languages | 463.78 B | 10.00% | 100 B | 0.22 | | RedPajama - Wikipedia | 4.87 B | 4.00% | 40 B | 8.21 | | The Stack - Markdown | 107.07 B | 4.50% | 45 B | 0.42 | | Semantic Scholar ORC | 48.95 B | 3.30% | 33 B | 0.67 | | RedPajama - Books | 26.02 B | 3.00% | 30 B | 1.15 | | RedPajama - arXiv | 28.10 B | 1.90% | 19 B | 0.68 | | RedPajama - StackExchange | 20.54 B | 1.40% | 14 B |0.68 | Samples for each batch were selected from one of the datasets with the probability specified above. The examples were shuffled within each dataset, and each example was constructed from as many sequences from that dataset as were necessary to fill the sequence length. To build 8k support into MPT-30B efficiently, we first pre-trained on 1T tokens using sequences that were 2k tokens long, and then trained for an additional 50B tokens using sequences that were 8k tokens long. The data was tokenized using the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. This BPE tokenizer has a number of desirable characteristics, most of which are relevant for tokenizing code: (1) It was trained on a diverse mix of data that includes code (The Pile) (2) It applies consistent space delimitation, unlike the GPT2 tokenizer which tokenizes inconsistently depending on the presence of prefix spaces (3) It contains tokens for repeated space characters, which allows superior compression of text with large amounts of repeated space characters. The model vocabulary size of 50432 was set to be a multiple of 128 (as in [MEGATRON-LM](https://arxiv.org/abs/1909.08053)). ### Training Configuration The model was trained in three stages using the [MosaicML Platform](https://www.mosaicml.com/platform): (i) First it was trained on 440 A100-40GBs with a batch size of 1760. (ii) Then, on 216 A100-40GBs with a batch size of 1728. (iii) Training was completed on 256 H100-80GBs with a batch size of 512 with 8k context length and 50B tokens. The model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the [LION](https://arxiv.org/abs/2302.06675) optimizer. ## Limitations and Biases _The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_ MPT-30B (Base) is **not** intended for deployment without finetuning. It should not be used for human-facing interactions without further guardrails and user consent. MPT-30B can produce factually incorrect output, and should not be relied on to produce factually accurate information. MPT-30B was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs. ## MosaicML Platform If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://forms.mosaicml.com/demo?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-30b). ## Disclaimer The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please consult an attorney before using this model for commercial purposes. ## Citation Please cite this model using the following format: ``` @online{MosaicML2023Introducing, author = {MosaicML NLP Team}, title = {Introducing MPT-30B: Raising the bar for open-source foundation models}, year = {2023}, url = {www.mosaicml.com/blog/mpt-30b}, note = {Accessed: 2023-06-22}, urldate = {2023-06-22} } ```
12,490
[ [ -0.03558349609375, -0.043792724609375, 0.0174713134765625, 0.0306854248046875, -0.01910400390625, -0.0011501312255859375, -0.00882720947265625, -0.023651123046875, -0.004299163818359375, 0.023345947265625, -0.0458984375, -0.04046630859375, -0.04498291015625, ...