modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
BigSalmon/InformalToFormalLincoln18
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 262.25 +/- 17.07 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/InformalToFormalLincoln21
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Access to model AlbertoBalsam/PushModelToHuggingFace is restricted and you are not in the authorized list. Visit https://huggingface.co/AlbertoBalsam/PushModelToHuggingFace to ask for access.
BigSalmon/InformalToFormalLincoln23
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 446.90 +/- 74.13 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
BigSalmon/InformalToFormalLincoln24
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
BigSalmon/MrLincoln11
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - en tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: hBERTv2_rte results: - task: name: Text Classification type: text-classification dataset: name: GLUE RTE type: glue config: rte split: validation args: rte metrics: - name: Accuracy type: accuracy value: 0.5487364620938628 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv2_rte This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2](https://huggingface.co/gokuls/bert_12_layer_model_v2) on the GLUE RTE dataset. It achieves the following results on the evaluation set: - Loss: 0.6896 - Accuracy: 0.5487 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7231 | 1.0 | 10 | 0.7175 | 0.4549 | | 0.702 | 2.0 | 20 | 0.7053 | 0.4729 | | 0.6982 | 3.0 | 30 | 0.6976 | 0.4585 | | 0.7008 | 4.0 | 40 | 0.7261 | 0.4657 | | 0.7022 | 5.0 | 50 | 0.7142 | 0.4946 | | 0.6867 | 6.0 | 60 | 0.6943 | 0.4801 | | 0.6796 | 7.0 | 70 | 0.6896 | 0.5487 | | 0.6614 | 8.0 | 80 | 0.7151 | 0.5162 | | 0.6303 | 9.0 | 90 | 0.7244 | 0.5271 | | 0.602 | 10.0 | 100 | 0.7570 | 0.4729 | | 0.5761 | 11.0 | 110 | 0.7605 | 0.5379 | | 0.5664 | 12.0 | 120 | 0.8160 | 0.5235 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.14.0a0+410ce96 - Datasets 2.10.1 - Tokenizers 0.13.2
BigSalmon/MrLincoln125MNeo
[ "pytorch", "tensorboard", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- language: - en tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: hBERTv2_sst2 results: - task: name: Text Classification type: text-classification dataset: name: GLUE SST2 type: glue config: sst2 split: validation args: sst2 metrics: - name: Accuracy type: accuracy value: 0.5091743119266054 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv2_sst2 This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2](https://huggingface.co/gokuls/bert_12_layer_model_v2) on the GLUE SST2 dataset. It achieves the following results on the evaluation set: - Loss: 0.6964 - Accuracy: 0.5092 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6916 | 1.0 | 264 | 0.6999 | 0.5092 | | 0.6885 | 2.0 | 528 | 0.6978 | 0.5092 | | 0.6871 | 3.0 | 792 | 0.6984 | 0.5092 | | 0.6869 | 4.0 | 1056 | 0.6990 | 0.5092 | | 0.6868 | 5.0 | 1320 | 0.6974 | 0.5092 | | 0.6869 | 6.0 | 1584 | 0.6980 | 0.5092 | | 0.6867 | 7.0 | 1848 | 0.6984 | 0.5092 | | 0.6868 | 8.0 | 2112 | 0.6975 | 0.5092 | | 0.6868 | 9.0 | 2376 | 0.6964 | 0.5092 | | 0.6865 | 10.0 | 2640 | 0.6978 | 0.5092 | | 0.6868 | 11.0 | 2904 | 0.6980 | 0.5092 | | 0.6865 | 12.0 | 3168 | 0.7001 | 0.5092 | | 0.6867 | 13.0 | 3432 | 0.6966 | 0.5092 | | 0.6867 | 14.0 | 3696 | 0.6980 | 0.5092 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.14.0a0+410ce96 - Datasets 2.10.1 - Tokenizers 0.13.2
BigSalmon/MrLincoln13
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - metrics: - type: mean_reward value: -0.28 +/- 0.13 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/MrLincoln14
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 49.07 +/- 67.28 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 500000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'EduardoCGarridoMerchan/ppo_v1' 'batch_size': 512 'minibatch_size': 128} ```
BigSalmon/MrLincoln2
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - generated_from_trainer model-index: - name: detr-resnet-101-CD45RB-1000-att-e4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # detr-resnet-101-CD45RB-1000-att-e4 This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.6218 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 5.9448 | 1.0 | 94 | 4.6466 | | 4.6291 | 2.0 | 188 | 4.6218 | ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.0.dev20230107 - Datasets 2.10.0 - Tokenizers 0.13.2
BigSalmon/MrLincoln4
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-03-01T13:38:57Z
--- language: - en tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: hBERTv1_data_aug_qnli results: - task: name: Text Classification type: text-classification dataset: name: GLUE QNLI type: glue args: qnli metrics: - name: Accuracy type: accuracy value: 0.5053999633900788 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_data_aug_qnli This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1](https://huggingface.co/gokuls/bert_12_layer_model_v1) on the GLUE QNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.6931 - Accuracy: 0.5054 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.6933 | 1.0 | 16604 | 0.6931 | 0.5054 | | 0.6932 | 2.0 | 33208 | 0.6931 | 0.4946 | | 0.6932 | 3.0 | 49812 | 0.6931 | 0.5054 | | 0.6932 | 4.0 | 66416 | 0.6931 | 0.5054 | | 0.6932 | 5.0 | 83020 | 0.6931 | 0.4946 | | 0.6932 | 6.0 | 99624 | 0.6931 | 0.5054 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.14.0a0+410ce96 - Datasets 2.10.1 - Tokenizers 0.13.2
BigSalmon/MrLincoln5
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- This model is a diffusion model for unconditional image generation of mammograms of size 64x64 pixels. The model was trained with 1000 images using the [DDPM](https://arxiv.org/abs/2006.11239) architecture. The model was trained for 50 epochs with a batch size of 64, using around 11 GB of GPU memory. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained({hub_model_id}) image = pipeline().images[0] image ```
BigSalmon/MrLincoln6
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 985.53 +/- 149.51 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/MrLincoln7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="robkayinto/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BigSalmon/MrLincolnBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: dumbassRepoName results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.52 +/- 2.70 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="robkayinto/dumbassRepoName", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BigSalmon/NEO125InformalToFormalLincoln
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.8765 - name: F1 type: f1 value: 0.8703000237896847 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.4371 - Accuracy: 0.8765 - F1: 0.8703 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.7612 | 1.0 | 250 | 0.4371 | 0.8765 | 0.8703 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
BigSalmon/Points
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-pixelcopter results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 48.60 +/- 48.25 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
BigSalmon/Points2
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: zyoscovits/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BigSalmon/SimplifyText
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
2023-03-01T13:56:15Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: rl-course-unit-2-Taxi results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="fwzmyd/rl-course-unit-2-Taxi", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BigSalmon/T52
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
8
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 232.16 +/- 29.10 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/T5F
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: abarekatain/PyramidTraining 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BigeS/DialoGPT-small-Rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - ar license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper larg Ar2 - Mohammed Nasri results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: ar split: test args: 'config: ar, split: test' metrics: - name: Wer type: wer value: 22.49709073163407 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper larg Ar2 - Mohammed Nasri This model is a fine-tuned version of [openai/whisper-large](https://huggingface.co/openai/whisper-large) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.1921 - Wer: 22.4971 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.162 | 0.21 | 1000 | 0.1921 | 22.4971 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.7.0 - Tokenizers 0.13.2
Bimal/my_bot_model
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - en tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: hBERTv1_cola results: - task: name: Text Classification type: text-classification dataset: name: GLUE COLA type: glue config: cola split: validation args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_cola This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1](https://huggingface.co/gokuls/bert_12_layer_model_v1) on the GLUE COLA dataset. It achieves the following results on the evaluation set: - Loss: 0.6180 - Matthews Correlation: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.6212 | 1.0 | 34 | 0.6180 | 0.0 | | 0.6118 | 2.0 | 68 | 0.6211 | 0.0 | | 0.6088 | 3.0 | 102 | 0.6252 | 0.0 | | 0.6103 | 4.0 | 136 | 0.6182 | 0.0 | | 0.6107 | 5.0 | 170 | 0.6210 | 0.0 | | 0.6081 | 6.0 | 204 | 0.6212 | 0.0 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.14.0a0+410ce96 - Datasets 2.10.1 - Tokenizers 0.13.2
BinksSachary/ShaxxBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - en pipeline_tag: text-to-image tags: - code --- Coming from a certain event/controversy, models, vae and modules of great quality for all.
BinksSachary/ShaxxBot2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -44.41 +/- 19.85 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 3000000 'learning_rate': 2.5e-05 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.999 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'EduardoCGarridoMerchan/TobiLander' 'batch_size': 512 'minibatch_size': 128} ```
BitanBiswas/mbert-bengali-ner-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - en tags: - generated_from_trainer datasets: - glue metrics: - accuracy - f1 model-index: - name: hBERTv1_mrpc results: - task: name: Text Classification type: text-classification dataset: name: GLUE MRPC type: glue config: mrpc split: validation args: mrpc metrics: - name: Accuracy type: accuracy value: 0.6862745098039216 - name: F1 type: f1 value: 0.7999999999999999 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_mrpc This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1](https://huggingface.co/gokuls/bert_12_layer_model_v1) on the GLUE MRPC dataset. It achieves the following results on the evaluation set: - Loss: 0.6051 - Accuracy: 0.6863 - F1: 0.8000 - Combined Score: 0.7431 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------------:| | 0.6536 | 1.0 | 15 | 0.6243 | 0.6838 | 0.8122 | 0.7480 | | 0.6275 | 2.0 | 30 | 0.6174 | 0.7010 | 0.8117 | 0.7564 | | 0.6129 | 3.0 | 45 | 0.6089 | 0.6961 | 0.8182 | 0.7571 | | 0.6087 | 4.0 | 60 | 0.6062 | 0.6887 | 0.8130 | 0.7508 | | 0.5939 | 5.0 | 75 | 0.6104 | 0.6863 | 0.7935 | 0.7399 | | 0.5707 | 6.0 | 90 | 0.6184 | 0.7083 | 0.8183 | 0.7633 | | 0.5426 | 7.0 | 105 | 0.6051 | 0.6863 | 0.8000 | 0.7431 | | 0.4819 | 8.0 | 120 | 0.6560 | 0.6936 | 0.8019 | 0.7478 | | 0.4279 | 9.0 | 135 | 0.6673 | 0.6887 | 0.7678 | 0.7283 | | 0.3374 | 10.0 | 150 | 0.8092 | 0.6863 | 0.7902 | 0.7382 | | 0.2789 | 11.0 | 165 | 0.9342 | 0.6887 | 0.7935 | 0.7411 | | 0.2216 | 12.0 | 180 | 0.9708 | 0.6838 | 0.7810 | 0.7324 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.14.0a0+410ce96 - Datasets 2.10.1 - Tokenizers 0.13.2
Blaine-Mason/hackMIT-finetuned-sst2
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- language: - en tags: - generated_from_trainer datasets: - glue metrics: - spearmanr model-index: - name: hBERTv2_stsb results: - task: name: Text Classification type: text-classification dataset: name: GLUE STSB type: glue config: stsb split: validation args: stsb metrics: - name: Spearmanr type: spearmanr value: 0.7706783096515127 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv2_stsb This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2](https://huggingface.co/gokuls/bert_12_layer_model_v2) on the GLUE STSB dataset. It achieves the following results on the evaluation set: - Loss: 0.9534 - Pearson: 0.7722 - Spearmanr: 0.7707 - Combined Score: 0.7714 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Pearson | Spearmanr | Combined Score | |:-------------:|:-----:|:----:|:---------------:|:-------:|:---------:|:--------------:| | 4.4386 | 1.0 | 23 | 2.5331 | 0.1313 | 0.1071 | 0.1192 | | 1.8741 | 2.0 | 46 | 2.0517 | 0.4923 | 0.4766 | 0.4844 | | 1.347 | 3.0 | 69 | 1.3556 | 0.6964 | 0.7079 | 0.7022 | | 0.8443 | 4.0 | 92 | 1.2583 | 0.7340 | 0.7367 | 0.7353 | | 0.5822 | 5.0 | 115 | 0.9534 | 0.7722 | 0.7707 | 0.7714 | | 0.4356 | 6.0 | 138 | 1.1921 | 0.7798 | 0.7771 | 0.7785 | | 0.3531 | 7.0 | 161 | 1.3849 | 0.7701 | 0.7700 | 0.7700 | | 0.2712 | 8.0 | 184 | 1.0015 | 0.7886 | 0.7870 | 0.7878 | | 0.259 | 9.0 | 207 | 1.0523 | 0.7898 | 0.7874 | 0.7886 | | 0.2003 | 10.0 | 230 | 1.1525 | 0.7836 | 0.7824 | 0.7830 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.14.0a0+410ce96 - Datasets 2.10.1 - Tokenizers 0.13.2
Blerrrry/Kkk
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: whisper_small_arabic_cv11_no_diacs results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper_small_arabic_cv11_no_diacs This model is a fine-tuned version of [Seifaber/whisper-small](https://huggingface.co/Seifaber/whisper-small) on an unknown dataset. It achieves the following results on the evaluation set: - eval_loss: 0.2384 - eval_wer: 21.8571 - eval_runtime: 3557.9167 - eval_samples_per_second: 2.934 - eval_steps_per_second: 0.367 - epoch: 1.66 - step: 2000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 3000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
BlightZz/MakiseKurisu
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 269.98 +/- 16.44 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BlindMan820/Sarcastic-News-Headlines
[ "pytorch", "distilbert", "text-classification", "English", "dataset:Kaggle Dataset", "transformers", "Text", "Sequence-Classification", "Sarcasm", "DistilBert" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: cls_asr_trial_sd_1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cls_asr_trial_sd_1 This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.10.2.dev0 - Tokenizers 0.13.2
BlueGamerBeast/DialoGPT-small-joshua
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: mikegarts/poca-SoccerTwos-v2.2 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Bman/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: mikegarts/poca-SoccerTwos-v3 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BobBraico/bert-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: gpl-3.0 language: - en pipeline_tag: image-segmentation --- # 3DL_NuCount model __Model author__: Fabrice Daian #### Model description __3DL_NuCount__ model has been designed by fine tuning a pretrained Stardist3D model [1,2] using a home made dataset [3] in order to assess the number of cells present in a given 3D image stack acquired using an optical microscope. Training and Inference Notebooks are hosted on our Github repo [4]. #### Stardist Training parameters - patch size: (48,96,96) - batch size: 32 - epochs : 100 - data augmentation : flip/rotation/intensity - image normalization: normalize channel independantly - anisotropy: empirical - rays : 96 #### Training dataset parameters - tile size : (4,63,128,128) - split : Train 0.8 / Val 0.2 #### Inference - patch size : (784,784,:) - image size : (2048,2048,:) - model name : __weights_best_1.h5__ - config file : __config.json__ - threshold file : __thresholds.json__ #### References - [1] Stardist Project: [Github](https://github.com/stardist/stardist) - [2] Stardist Paper : [ArXiv](https://arxiv.org/abs/1908.03636) - [3] NuCount Training Dataset : [Zenodo](https://) - [4] NuCount Github Project: [Github](https://github.com/andysaurin/3DL_NuCount/)
Broadus20/DialoGPT-small-joshua
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-03-01T15:20:39Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget library_name: ml-agents --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Write your model_id: CloXD/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CAMeL-Lab/bert-base-arabic-camelbert-mix-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
855
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 535.00 +/- 194.35 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ManarAli -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ManarAli -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga ManarAli ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
CAUKiel/JavaBERT-uncased
[ "pytorch", "safetensors", "bert", "fill-mask", "java", "code", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: creativeml-openrail-m tags: - anime - art - stable diffusion --- --- <p align="center"><img src="https://media.vgm.io/products/86/10068/10068-1615144255.png"> --- Welcome to my Date a Live Lora Page! ## Table of Contents - [Kotori](#kotori) - [Origami](#origami) - [Natsumi](#natsumi) - [Yoshino](#yoshino) - [Kurumi](#kurumi) Civitai Kotori --> [Click here!](https://civitai.com/models/15838/itsuka-kotori-or-date-a-live) Civitai Origami --> [Click here!](https://civitai.com/models/11520/shiro-lora-or-no-game-no-life) Civitai Natsumi --> [Click here!](https://civitai.com/models/11520/shiro-lora-or-no-game-no-life) Civitai Yoshino --> [Click here!](https://civitai.com/models/11520/shiro-lora-or-no-game-no-life) Civitai Kurumi --> [Click here!](https://civitai.com/models/11520/shiro-lora-or-no-game-no-life) --- # Kotori Download: [Click Here](https://huggingface.co/Maisman/Date-a-live/blob/main/KotoriDateALiveLora.safetensors) How to use: Use (black background, white background) in negative. Default images will mostly have black or white background. Sad part: Generating images with lollipops is hard. All my results were not good enaugh. I will try to improve it someday. This lora is very flexible about her outfit. You have to describe her complete outfit to match her anime style. See example images below. Trigger words: ``` itsuka kotori, date a live, school uniform, miniskirt, black ribbon, long hair, red hair, twintails, boots, red eyes, looking at viewer, standing, red jacket, white shirt ``` ``` lollipop, holding lollipop, candy, ``` ``` astral dress, spirit form, spirit, fire, axe, horns, ``` I got better results with her normal outfit/school uniform. Her spirit form has mostly a deformed weapon but the images are still not that bad. More images on Civitai: ▼ Example Images ▼ <p align="center"><img src="https://huggingface.co/Maisman/Date-a-live/resolve/main/02372-365175546-%2C%20masterpiece%2C%20highres%2C%20high%20quality%2C%20extremly%20detailed%2C%20school%20uniform%2C%20twintails%2C%20hands%20behind%20back%2C%20(white%20ribbon)%2C%20red%20hair%2C.png"> ``` <lora:KotoriDateALiveLora:0.7>, masterpiece, highres, high quality, extremly detailed, school uniform, twintails, hands behind back, (white ribbon), red hair, standing on street, (dal style), (aime), miniskirt, ``` Negative prompt: ``` (worst quality, low quality:1.4), (lip, nose, tooth, rouge, lipstick, eyeshadow:1.4), ( jpeg artifacts:1.4), (depth of field, bokeh, blurry, film grain, chromatic aberration, lens flare:1.0), (1boy, abs, muscular, rib:1.0), greyscale, monochrome, dusty sunbeams, trembling, motion lines, motion blur, emphasis lines, text, title, logo, signature, school uniform, (By bad artist -neg), (easynegative), (black background, white background), ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 365175546, Size: 512x768, Model: abyssorangemix3 --- <p align="center"><img src="https://huggingface.co/Maisman/Date-a-live/resolve/main/02353-1108186154-%2C%20masterpiece%2C%20highres%2C%20high%20quality%2C%20extremly%20detailed%2C%20red%20miniskirt%2C%20black%20ribbon%2C%20white%20shirt%2C%20black%20thighhighs%2C%20twintails%2C.png"> ``` <lora:KotoriDateALiveLora:0.7>, masterpiece, highres, high quality, extremly detailed, red miniskirt, black ribbon, white shirt, black thighhighs, twintails, ``` Negative prompt: ``` (worst quality, low quality:1.4), (lip, nose, tooth, rouge, lipstick, eyeshadow:1.4), ( jpeg artifacts:1.4), (depth of field, bokeh, blurry, film grain, chromatic aberration, lens flare:1.0), (1boy, abs, muscular, rib:1.0), greyscale, monochrome, dusty sunbeams, trembling, motion lines, motion blur, emphasis lines, text, title, logo, signature, school uniform, (By bad artist -neg), (easynegative), ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1108186154, Size: 512x768, Model: abyssorangemix3 --- <p align="center"><img src="https://huggingface.co/Maisman/Date-a-live/resolve/main/02498-99036261-%2C%20itsuka%20kotori%2C%20(masterpiece_1.2)%2C%20highres%2C%20high%20quality%2C%20extremly%20detailed%2C%20hands%20up%2C%20(horns)%2C%20red%20hair%2C%20standing%20in%20city%2C%20cit.png"> ``` <lora:KotoriDateALiveLora:0.8>, itsuka kotori, (masterpiece:1.2), highres, high quality, extremly detailed, hands up, (horns), red hair, standing in city, city on fire, burning house in background, (fire), ((astral dress:1.3), spirit form), spirit, (white horns:1.4), <lora:crazyExpressionsLora:0.2>, (screaming), (angry face), red eyes, ``` Negative prompt: ``` (worst quality, low quality:1.4), (lip, nose, tooth, rouge, lipstick, eyeshadow:1.4), ( jpeg artifacts:1.4), (depth of field, bokeh, blurry, film grain, chromatic aberration, lens flare:1.0), (1boy, abs, muscular, rib:1.0), greyscale, monochrome, dusty sunbeams, trembling, motion lines, motion blur, emphasis lines, text, title, logo, signature, school uniform, (By bad artist -neg), (easynegative), (black background, white background), (three legs), ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 99036261, Size: 512x768, Model: abyssorangemix3 Denoising strength: 0.6, Hires upscale: 2, Hires steps: 50, Hires upscaler: Latent (nearest-exact) --- <p align="center"><img src="https://huggingface.co/Maisman/Date-a-live/resolve/main/02527-3016526920-%2C%20itsuka%20kotori%2C%20((masterpiece%2C%20best%20quality_1.2))%2C%20highres%2C%20high%20quality%2C%20extremly%20detailed%2C%20hands%20up%2C%20red%20hair%2C%20red%20eyes%2C%20(sch.png"> ``` <lora:KotoriDateALiveLora:0.8>, itsuka kotori, ((masterpiece, best quality:1.2)), highres, high quality, extremly detailed, hands up, red hair, red eyes, (school uniform), (white ribbon), standing in city, (holding lollipop), ``` Negative prompt: ``` (worst quality, low quality:1.4), (lip, nose, tooth, rouge, lipstick, eyeshadow:1.4), ( jpeg artifacts:1.4), (depth of field, bokeh, blurry, film grain, chromatic aberration, lens flare:1.0), (1boy, abs, muscular, rib:1.0), greyscale, monochrome, dusty sunbeams, trembling, motion lines, motion blur, emphasis lines, text, title, logo, signature, school uniform, (By bad artist -neg), (easynegative), (black background, white background), (three legs), ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3016526920, Size: 512x768, Model: abyssorangemix3 --- # Origami How to use: ▼ Example Images ▼ --- # Natsumi How to use: ▼ Example Images ▼ --- # Yoshino How to use: ▼ Example Images ▼ --- # Kurumi How to use: ▼ Example Images ▼
Camzure/MaamiBot-test
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - sv license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: Whisper Small Sv - Riksdag 100h results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Sv - Riksdag 100h This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4977 - Wer: 1118.4718 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 20000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:---------:| | 0.1384 | 0.11 | 1000 | 0.4747 | 380.8335 | | 0.1186 | 0.22 | 2000 | 0.4513 | 1032.3900 | | 0.1056 | 0.33 | 3000 | 0.4385 | 582.0427 | | 0.0824 | 0.43 | 4000 | 0.4465 | 574.8907 | | 0.0961 | 0.54 | 5000 | 0.4199 | 1004.9138 | | 0.0939 | 0.65 | 6000 | 0.4478 | 866.2979 | | 0.0758 | 0.76 | 7000 | 0.4384 | 907.9496 | | 0.0741 | 0.87 | 8000 | 0.4264 | 641.1371 | | 0.0692 | 0.98 | 9000 | 0.4206 | 1142.6550 | | 0.0257 | 1.08 | 10000 | 0.4707 | 1152.4312 | | 0.0273 | 1.19 | 11000 | 0.4789 | 1100.2058 | | 0.021 | 1.3 | 12000 | 0.4763 | 1236.1719 | | 0.0163 | 1.41 | 13000 | 0.5035 | 924.8006 | | 0.0183 | 1.52 | 14000 | 0.4911 | 1285.1814 | | 0.024 | 1.63 | 15000 | 0.4861 | 1140.8284 | | 0.0158 | 1.73 | 16000 | 0.4793 | 1181.7597 | | 0.0167 | 1.84 | 17000 | 0.4759 | 1207.3064 | | 0.0231 | 1.95 | 18000 | 0.4801 | 1139.6964 | | 0.0054 | 2.06 | 19000 | 0.4934 | 1114.4842 | | 0.006 | 2.17 | 20000 | 0.4977 | 1118.4718 | ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1 - Datasets 2.10.1 - Tokenizers 0.13.2
Canadiancaleb/DialoGPT-small-walter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch def cls_pooling(model_output, attention_mask): return model_output[0][:,0] # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1369 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1369, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Canyonevo/DialoGPT-medium-KingHenry
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch def cls_pooling(model_output, attention_mask): return model_output[0][:,0] # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1369 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 136, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Capreolus/bert-base-msmarco
[ "pytorch", "tf", "jax", "bert", "text-classification", "arxiv:2008.09093", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
238
null
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: marksiaasrheyeu --- ### marksiaasrheyeu-v-10-0 Dreambooth model trained by mlo-on with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v2-1-512 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: marksiaasrheyeu (use that on your prompt) ![marksiaasrheyeu 0](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%281%29.jpg)![marksiaasrheyeu 1](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%282%29.jpg)![marksiaasrheyeu 2](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%283%29.jpg)![marksiaasrheyeu 3](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%284%29.jpg)![marksiaasrheyeu 4](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%285%29.jpg)![marksiaasrheyeu 5](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%286%29.jpg)![marksiaasrheyeu 6](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%287%29.jpg)![marksiaasrheyeu 7](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%288%29.jpg)![marksiaasrheyeu 8](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%289%29.jpg)![marksiaasrheyeu 9](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%2810%29.jpg)![marksiaasrheyeu 10](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%2811%29.jpg)![marksiaasrheyeu 11](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%2812%29.jpg)![marksiaasrheyeu 12](https://huggingface.co/mlo-on/marksiaasrheyeu-v-10-0/resolve/main/concept_images/marksiaasrheyeu_%2813%29.jpg)
Carlork314/Xd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - txoriak_txori metrics: - accuracy model-index: - name: vit-base-txoriaktxori results: - task: name: Image Classification type: image-classification dataset: name: txoriak_txori type: txoriak_txori config: default split: validation args: default metrics: - name: Accuracy type: accuracy value: 0.9864 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-txoriaktxori This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the txoriak_txori dataset. It achieves the following results on the evaluation set: - Loss: 0.0559 - Accuracy: 0.9864 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 5.8505 | 0.02 | 100 | 5.8381 | 0.2584 | | 5.259 | 0.04 | 200 | 5.2556 | 0.4992 | | 4.6643 | 0.06 | 300 | 4.5950 | 0.6532 | | 4.0801 | 0.08 | 400 | 3.9534 | 0.6976 | | 3.3312 | 0.1 | 500 | 3.2908 | 0.7608 | | 2.773 | 0.12 | 600 | 2.6892 | 0.7704 | | 2.3108 | 0.14 | 700 | 2.0982 | 0.7976 | | 1.662 | 0.16 | 800 | 1.6214 | 0.8216 | | 1.3897 | 0.18 | 900 | 1.2662 | 0.8604 | | 1.1634 | 0.2 | 1000 | 0.9868 | 0.8892 | | 1.0498 | 0.22 | 1100 | 0.7855 | 0.8992 | | 0.5978 | 0.24 | 1200 | 0.6305 | 0.912 | | 0.6399 | 0.26 | 1300 | 0.5560 | 0.9164 | | 0.607 | 0.28 | 1400 | 0.5119 | 0.9192 | | 0.6595 | 0.3 | 1500 | 0.4307 | 0.9272 | | 0.5239 | 0.32 | 1600 | 0.4124 | 0.9176 | | 0.5166 | 0.34 | 1700 | 0.3280 | 0.9312 | | 0.5352 | 0.36 | 1800 | 0.3155 | 0.9308 | | 0.4036 | 0.38 | 1900 | 0.2893 | 0.9424 | | 0.3836 | 0.4 | 2000 | 0.3161 | 0.9272 | | 0.3418 | 0.42 | 2100 | 0.3005 | 0.9384 | | 0.4172 | 0.44 | 2200 | 0.2518 | 0.9456 | | 0.4293 | 0.46 | 2300 | 0.2367 | 0.9424 | | 0.3551 | 0.48 | 2400 | 0.2422 | 0.9432 | | 0.2718 | 0.5 | 2500 | 0.2207 | 0.9492 | | 0.3802 | 0.52 | 2600 | 0.2163 | 0.9428 | | 0.2916 | 0.54 | 2700 | 0.2156 | 0.946 | | 0.3384 | 0.56 | 2800 | 0.2037 | 0.9508 | | 0.352 | 0.58 | 2900 | 0.2241 | 0.9432 | | 0.3868 | 0.6 | 3000 | 0.2525 | 0.9428 | | 0.3195 | 0.62 | 3100 | 0.2032 | 0.9496 | | 0.2618 | 0.64 | 3200 | 0.2088 | 0.944 | | 0.326 | 0.66 | 3300 | 0.1744 | 0.9536 | | 0.2691 | 0.68 | 3400 | 0.1853 | 0.9516 | | 0.2629 | 0.7 | 3500 | 0.1788 | 0.9464 | | 0.2965 | 0.72 | 3600 | 0.1719 | 0.9572 | | 0.3565 | 0.74 | 3700 | 0.2041 | 0.9452 | | 0.2344 | 0.76 | 3800 | 0.1863 | 0.9504 | | 0.4416 | 0.78 | 3900 | 0.1938 | 0.9472 | | 0.2901 | 0.8 | 4000 | 0.1674 | 0.9572 | | 0.3158 | 0.82 | 4100 | 0.2006 | 0.9496 | | 0.3708 | 0.84 | 4200 | 0.1850 | 0.952 | | 0.2636 | 0.86 | 4300 | 0.1488 | 0.9624 | | 0.1764 | 0.88 | 4400 | 0.1818 | 0.9524 | | 0.4299 | 0.9 | 4500 | 0.1642 | 0.9576 | | 0.4862 | 0.92 | 4600 | 0.1867 | 0.9516 | | 0.288 | 0.94 | 4700 | 0.1362 | 0.9604 | | 0.2715 | 0.96 | 4800 | 0.1384 | 0.9668 | | 0.3139 | 0.98 | 4900 | 0.1607 | 0.956 | | 0.2301 | 1.0 | 5000 | 0.1428 | 0.9628 | | 0.1527 | 1.02 | 5100 | 0.1313 | 0.9672 | | 0.1856 | 1.04 | 5200 | 0.1356 | 0.9628 | | 0.1143 | 1.06 | 5300 | 0.1469 | 0.962 | | 0.1465 | 1.08 | 5400 | 0.1320 | 0.9648 | | 0.1342 | 1.1 | 5500 | 0.1291 | 0.9644 | | 0.1686 | 1.12 | 5600 | 0.1589 | 0.952 | | 0.0683 | 1.14 | 5700 | 0.1598 | 0.9592 | | 0.095 | 1.16 | 5800 | 0.1330 | 0.9628 | | 0.1458 | 1.18 | 5900 | 0.1307 | 0.9652 | | 0.2321 | 1.2 | 6000 | 0.1498 | 0.9608 | | 0.0593 | 1.22 | 6100 | 0.1393 | 0.9636 | | 0.1721 | 1.24 | 6200 | 0.1564 | 0.9604 | | 0.2735 | 1.26 | 6300 | 0.1509 | 0.9572 | | 0.1384 | 1.28 | 6400 | 0.1526 | 0.958 | | 0.1232 | 1.3 | 6500 | 0.1560 | 0.9596 | | 0.1615 | 1.32 | 6600 | 0.1348 | 0.9652 | | 0.2521 | 1.34 | 6700 | 0.1223 | 0.9684 | | 0.0616 | 1.36 | 6800 | 0.1556 | 0.9616 | | 0.23 | 1.38 | 6900 | 0.1338 | 0.9652 | | 0.237 | 1.4 | 7000 | 0.1140 | 0.9664 | | 0.2572 | 1.42 | 7100 | 0.1191 | 0.9672 | | 0.1841 | 1.44 | 7200 | 0.1121 | 0.9708 | | 0.1212 | 1.46 | 7300 | 0.1089 | 0.9708 | | 0.1436 | 1.48 | 7400 | 0.1246 | 0.9672 | | 0.1403 | 1.5 | 7500 | 0.1234 | 0.9676 | | 0.1794 | 1.52 | 7600 | 0.1273 | 0.966 | | 0.2153 | 1.54 | 7700 | 0.1423 | 0.964 | | 0.1347 | 1.56 | 7800 | 0.0985 | 0.9708 | | 0.1989 | 1.58 | 7900 | 0.1117 | 0.9712 | | 0.2686 | 1.6 | 8000 | 0.1166 | 0.9704 | | 0.134 | 1.62 | 8100 | 0.1391 | 0.962 | | 0.2474 | 1.64 | 8200 | 0.1280 | 0.9676 | | 0.0635 | 1.66 | 8300 | 0.1079 | 0.9696 | | 0.1073 | 1.68 | 8400 | 0.1335 | 0.9628 | | 0.1483 | 1.7 | 8500 | 0.1108 | 0.9692 | | 0.0933 | 1.72 | 8600 | 0.1059 | 0.9708 | | 0.1204 | 1.74 | 8700 | 0.1007 | 0.9752 | | 0.1051 | 1.76 | 8800 | 0.1055 | 0.9712 | | 0.1509 | 1.78 | 8900 | 0.0995 | 0.9704 | | 0.1404 | 1.8 | 9000 | 0.1012 | 0.9744 | | 0.0502 | 1.82 | 9100 | 0.0913 | 0.9768 | | 0.3038 | 1.84 | 9200 | 0.0988 | 0.9732 | | 0.1651 | 1.86 | 9300 | 0.1146 | 0.9656 | | 0.1047 | 1.88 | 9400 | 0.1140 | 0.9664 | | 0.1639 | 1.9 | 9500 | 0.1059 | 0.97 | | 0.1044 | 1.92 | 9600 | 0.1012 | 0.9744 | | 0.1955 | 1.94 | 9700 | 0.1119 | 0.9676 | | 0.1903 | 1.96 | 9800 | 0.1127 | 0.9716 | | 0.1328 | 1.98 | 9900 | 0.1199 | 0.9628 | | 0.1219 | 2.0 | 10000 | 0.1011 | 0.972 | | 0.0514 | 2.02 | 10100 | 0.1040 | 0.9728 | | 0.0194 | 2.04 | 10200 | 0.0994 | 0.9752 | | 0.0469 | 2.06 | 10300 | 0.1027 | 0.9716 | | 0.0417 | 2.08 | 10400 | 0.1045 | 0.9748 | | 0.0566 | 2.1 | 10500 | 0.0861 | 0.9792 | | 0.0427 | 2.12 | 10600 | 0.1094 | 0.974 | | 0.1358 | 2.14 | 10700 | 0.0795 | 0.9776 | | 0.0119 | 2.16 | 10800 | 0.0972 | 0.9748 | | 0.0379 | 2.18 | 10900 | 0.1087 | 0.97 | | 0.0951 | 2.2 | 11000 | 0.1079 | 0.9728 | | 0.0256 | 2.22 | 11100 | 0.0951 | 0.9748 | | 0.076 | 2.24 | 11200 | 0.0945 | 0.9764 | | 0.1004 | 2.26 | 11300 | 0.0870 | 0.9788 | | 0.0657 | 2.28 | 11400 | 0.1073 | 0.974 | | 0.0332 | 2.3 | 11500 | 0.0960 | 0.9752 | | 0.0087 | 2.32 | 11600 | 0.0865 | 0.978 | | 0.0351 | 2.34 | 11700 | 0.0963 | 0.9736 | | 0.0127 | 2.36 | 11800 | 0.0989 | 0.976 | | 0.0447 | 2.38 | 11900 | 0.1038 | 0.9752 | | 0.023 | 2.4 | 12000 | 0.0919 | 0.9744 | | 0.0329 | 2.42 | 12100 | 0.0857 | 0.9796 | | 0.042 | 2.44 | 12200 | 0.0812 | 0.9804 | | 0.0549 | 2.46 | 12300 | 0.1114 | 0.9732 | | 0.0806 | 2.48 | 12400 | 0.0971 | 0.9772 | | 0.1768 | 2.5 | 12500 | 0.0933 | 0.974 | | 0.059 | 2.52 | 12600 | 0.0943 | 0.9788 | | 0.0184 | 2.54 | 12700 | 0.0874 | 0.978 | | 0.021 | 2.56 | 12800 | 0.0903 | 0.9764 | | 0.0457 | 2.58 | 12900 | 0.0999 | 0.976 | | 0.0788 | 2.6 | 13000 | 0.0954 | 0.9732 | | 0.0599 | 2.62 | 13100 | 0.0876 | 0.9752 | | 0.1041 | 2.64 | 13200 | 0.1017 | 0.9744 | | 0.0309 | 2.66 | 13300 | 0.0918 | 0.9772 | | 0.1347 | 2.68 | 13400 | 0.0758 | 0.9792 | | 0.0432 | 2.7 | 13500 | 0.0790 | 0.9808 | | 0.0802 | 2.72 | 13600 | 0.0860 | 0.9776 | | 0.0841 | 2.74 | 13700 | 0.0857 | 0.98 | | 0.0513 | 2.76 | 13800 | 0.0895 | 0.9764 | | 0.0129 | 2.78 | 13900 | 0.0861 | 0.9772 | | 0.1279 | 2.8 | 14000 | 0.0895 | 0.9764 | | 0.0074 | 2.82 | 14100 | 0.0842 | 0.978 | | 0.0132 | 2.84 | 14200 | 0.0742 | 0.9796 | | 0.0974 | 2.86 | 14300 | 0.0854 | 0.9776 | | 0.0803 | 2.88 | 14400 | 0.0769 | 0.9804 | | 0.037 | 2.9 | 14500 | 0.0806 | 0.9788 | | 0.0936 | 2.92 | 14600 | 0.0824 | 0.9812 | | 0.0064 | 2.94 | 14700 | 0.0748 | 0.9832 | | 0.0631 | 2.96 | 14800 | 0.0761 | 0.9828 | | 0.0158 | 2.98 | 14900 | 0.0709 | 0.9848 | | 0.0433 | 3.0 | 15000 | 0.0704 | 0.9828 | | 0.0028 | 3.02 | 15100 | 0.0712 | 0.9824 | | 0.0031 | 3.04 | 15200 | 0.0717 | 0.9808 | | 0.0191 | 3.06 | 15300 | 0.0716 | 0.9828 | | 0.0051 | 3.08 | 15400 | 0.0708 | 0.9832 | | 0.0205 | 3.1 | 15500 | 0.0686 | 0.9828 | | 0.1147 | 3.12 | 15600 | 0.0670 | 0.984 | | 0.0014 | 3.14 | 15700 | 0.0628 | 0.9848 | | 0.0082 | 3.16 | 15800 | 0.0659 | 0.984 | | 0.0149 | 3.18 | 15900 | 0.0672 | 0.9836 | | 0.0056 | 3.2 | 16000 | 0.0676 | 0.9852 | | 0.0059 | 3.22 | 16100 | 0.0706 | 0.9836 | | 0.0198 | 3.24 | 16200 | 0.0725 | 0.9812 | | 0.0019 | 3.26 | 16300 | 0.0681 | 0.9828 | | 0.0013 | 3.28 | 16400 | 0.0681 | 0.9856 | | 0.0663 | 3.3 | 16500 | 0.0704 | 0.9852 | | 0.0024 | 3.32 | 16600 | 0.0697 | 0.984 | | 0.0081 | 3.34 | 16700 | 0.0679 | 0.9852 | | 0.0264 | 3.36 | 16800 | 0.0631 | 0.9872 | | 0.0061 | 3.38 | 16900 | 0.0651 | 0.9848 | | 0.0169 | 3.4 | 17000 | 0.0655 | 0.9828 | | 0.0013 | 3.42 | 17100 | 0.0661 | 0.9836 | | 0.0072 | 3.44 | 17200 | 0.0633 | 0.9848 | | 0.009 | 3.46 | 17300 | 0.0634 | 0.9848 | | 0.0028 | 3.48 | 17400 | 0.0634 | 0.9844 | | 0.0024 | 3.5 | 17500 | 0.0637 | 0.9836 | | 0.0031 | 3.52 | 17600 | 0.0641 | 0.9848 | | 0.004 | 3.54 | 17700 | 0.0619 | 0.9856 | | 0.0562 | 3.56 | 17800 | 0.0673 | 0.9856 | | 0.0005 | 3.58 | 17900 | 0.0644 | 0.9864 | | 0.0079 | 3.6 | 18000 | 0.0647 | 0.9872 | | 0.0016 | 3.62 | 18100 | 0.0617 | 0.9872 | | 0.0019 | 3.64 | 18200 | 0.0636 | 0.9872 | | 0.0047 | 3.66 | 18300 | 0.0608 | 0.9848 | | 0.0327 | 3.68 | 18400 | 0.0586 | 0.9868 | | 0.0108 | 3.7 | 18500 | 0.0594 | 0.9872 | | 0.0061 | 3.72 | 18600 | 0.0597 | 0.9868 | | 0.0106 | 3.74 | 18700 | 0.0579 | 0.9872 | | 0.001 | 3.76 | 18800 | 0.0564 | 0.9872 | | 0.012 | 3.78 | 18900 | 0.0561 | 0.9876 | | 0.0038 | 3.8 | 19000 | 0.0566 | 0.9868 | | 0.0099 | 3.82 | 19100 | 0.0573 | 0.9864 | | 0.0026 | 3.84 | 19200 | 0.0575 | 0.9864 | | 0.0062 | 3.86 | 19300 | 0.0573 | 0.9872 | | 0.0239 | 3.88 | 19400 | 0.0573 | 0.9864 | | 0.0026 | 3.9 | 19500 | 0.0568 | 0.9868 | | 0.0014 | 3.92 | 19600 | 0.0557 | 0.9868 | | 0.0019 | 3.94 | 19700 | 0.0562 | 0.9864 | | 0.0484 | 3.96 | 19800 | 0.0560 | 0.9864 | | 0.0022 | 3.98 | 19900 | 0.0559 | 0.9864 | | 0.0145 | 4.0 | 20000 | 0.0559 | 0.9864 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
dccuchile/albert-tiny-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- language: - en tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: hBERTv1_sst2 results: - task: name: Text Classification type: text-classification dataset: name: GLUE SST2 type: glue config: sst2 split: validation args: sst2 metrics: - name: Accuracy type: accuracy value: 0.7901376146788991 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_sst2 This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1](https://huggingface.co/gokuls/bert_12_layer_model_v1) on the GLUE SST2 dataset. It achieves the following results on the evaluation set: - Loss: 0.4525 - Accuracy: 0.7901 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6905 | 1.0 | 264 | 0.6919 | 0.5252 | | 0.6609 | 2.0 | 528 | 0.6088 | 0.6915 | | 0.4152 | 3.0 | 792 | 0.4525 | 0.7901 | | 0.2611 | 4.0 | 1056 | 0.4627 | 0.8096 | | 0.1953 | 5.0 | 1320 | 0.4894 | 0.8073 | | 0.1588 | 6.0 | 1584 | 0.6002 | 0.8016 | | 0.1336 | 7.0 | 1848 | 0.6467 | 0.8062 | | 0.1117 | 8.0 | 2112 | 0.6409 | 0.8062 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.14.0a0+410ce96 - Datasets 2.10.1 - Tokenizers 0.13.2
dccuchile/albert-tiny-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - generated_from_trainer model-index: - name: diff_based_error_tagger results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # diff_based_error_tagger This model is a fine-tuned version of [csebuetnlp/banglabert](https://huggingface.co/csebuetnlp/banglabert) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0009 - 5 Err Precision: 0.9667 - 5 Err Recall: 1.0 - 5 Err F1: 0.9831 - 5 Err Number: 29 - Precision: 0.9922 - Recall: 0.9916 - F1: 0.9919 - Number: 9932 - Err Precision: 0.9695 - Err Recall: 1.0 - Err F1: 0.9845 - Err Number: 286 - Egin Err Precision: 0.9938 - Egin Err Recall: 0.9964 - Egin Err F1: 0.9951 - Egin Err Number: 1126 - El Err Precision: 0.9957 - El Err Recall: 0.9942 - El Err F1: 0.9949 - El Err Number: 1384 - Nd Err Precision: 0.9932 - Nd Err Recall: 0.9941 - Nd Err F1: 0.9937 - Nd Err Number: 1183 - Ne Word Err Precision: 0.9978 - Ne Word Err Recall: 0.9942 - Ne Word Err F1: 0.9960 - Ne Word Err Number: 8248 - Unc Insert Err Precision: 0.9956 - Unc Insert Err Recall: 0.9978 - Unc Insert Err F1: 0.9967 - Unc Insert Err Number: 903 - Micro Avg Precision: 0.9944 - Micro Avg Recall: 0.9934 - Micro Avg F1: 0.9939 - Micro Avg Number: 23091 - Macro Avg Precision: 0.9881 - Macro Avg Recall: 0.9960 - Macro Avg F1: 0.9920 - Macro Avg Number: 23091 - Weighted Avg Precision: 0.9944 - Weighted Avg Recall: 0.9934 - Weighted Avg F1: 0.9939 - Weighted Avg Number: 23091 - Overall Accuracy: 0.9994 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 40.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | 5 Err Precision | 5 Err Recall | 5 Err F1 | 5 Err Number | Precision | Recall | F1 | Number | Err Precision | Err Recall | Err F1 | Err Number | Egin Err Precision | Egin Err Recall | Egin Err F1 | Egin Err Number | El Err Precision | El Err Recall | El Err F1 | El Err Number | Nd Err Precision | Nd Err Recall | Nd Err F1 | Nd Err Number | Ne Word Err Precision | Ne Word Err Recall | Ne Word Err F1 | Ne Word Err Number | Unc Insert Err Precision | Unc Insert Err Recall | Unc Insert Err F1 | Unc Insert Err Number | Micro Avg Precision | Micro Avg Recall | Micro Avg F1 | Micro Avg Number | Macro Avg Precision | Macro Avg Recall | Macro Avg F1 | Macro Avg Number | Weighted Avg Precision | Weighted Avg Recall | Weighted Avg F1 | Weighted Avg Number | Overall Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------------:|:------------:|:--------:|:------------:|:-----------:|:--------:|:------:|:--------:|:--------------:|:-----------:|:-------:|:-----------:|:------------------:|:---------------:|:-----------:|:---------------:|:----------------:|:-------------:|:---------:|:-------------:|:----------------:|:-------------:|:---------:|:-------------:|:---------------------:|:------------------:|:--------------:|:------------------:|:------------------------:|:---------------------:|:-----------------:|:---------------------:|:-------------------:|:----------------:|:------------:|:----------------:|:-------------------:|:----------------:|:------------:|:----------------:|:----------------------:|:-------------------:|:---------------:|:-------------------:|:----------------:| | 0.8517 | 1.0 | 575 | 0.2995 | 0.0 | 0.0 | 0.0 | 29 | 0.2579 | 0.0859 | 0.1289 | 9932 | 0.0 | 0.0 | 0.0 | 286 | 0.0 | 0.0 | 0.0 | 1126 | 0.0 | 0.0 | 0.0 | 1384 | 0.0 | 0.0 | 0.0 | 1183 | 0.6479 | 0.2836 | 0.3945 | 8248 | 0.0 | 0.0 | 0.0 | 903 | 0.4615 | 0.1382 | 0.2127 | 23091 | 0.1132 | 0.0462 | 0.0654 | 23091 | 0.3424 | 0.1382 | 0.1963 | 23091 | 0.9291 | | 0.2635 | 2.0 | 1150 | 0.2054 | 0.0 | 0.0 | 0.0 | 29 | 0.3578 | 0.2270 | 0.2778 | 9932 | 0.0 | 0.0 | 0.0 | 286 | 0.8148 | 0.0195 | 0.0382 | 1126 | 0.8571 | 0.0650 | 0.1209 | 1384 | 0.7344 | 0.2384 | 0.3599 | 1183 | 0.6604 | 0.5507 | 0.6006 | 8248 | 0.0 | 0.0 | 0.0 | 903 | 0.5250 | 0.3114 | 0.3910 | 23091 | 0.4281 | 0.1376 | 0.1747 | 23091 | 0.5185 | 0.3114 | 0.3616 | 23091 | 0.9422 | | 0.2027 | 3.0 | 1725 | 0.1546 | 0.0 | 0.0 | 0.0 | 29 | 0.4421 | 0.3362 | 0.3819 | 9932 | 0.0 | 0.0 | 0.0 | 286 | 0.6649 | 0.5639 | 0.6103 | 1126 | 0.8 | 0.2168 | 0.3411 | 1384 | 0.6406 | 0.5740 | 0.6054 | 1183 | 0.7271 | 0.6519 | 0.6875 | 8248 | 0.6094 | 0.0864 | 0.1513 | 903 | 0.5959 | 0.4507 | 0.5133 | 23091 | 0.4855 | 0.3036 | 0.3472 | 23091 | 0.5869 | 0.4507 | 0.4970 | 23091 | 0.9526 | | 0.1655 | 4.0 | 2300 | 0.1197 | 0.0 | 0.0 | 0.0 | 29 | 0.5546 | 0.4326 | 0.4861 | 9932 | 1.0 | 0.0315 | 0.0610 | 286 | 0.8037 | 0.6874 | 0.7410 | 1126 | 0.8553 | 0.3374 | 0.4839 | 1384 | 0.8139 | 0.6653 | 0.7321 | 1183 | 0.8075 | 0.7180 | 0.7601 | 8248 | 0.6805 | 0.1816 | 0.2867 | 903 | 0.6974 | 0.5379 | 0.6073 | 23091 | 0.6894 | 0.3817 | 0.4439 | 23091 | 0.6981 | 0.5379 | 0.5952 | 23091 | 0.9636 | | 0.1321 | 5.0 | 2875 | 0.0841 | 0.0 | 0.0 | 0.0 | 29 | 0.6868 | 0.6291 | 0.6567 | 9932 | 0.8431 | 0.1503 | 0.2552 | 286 | 0.8635 | 0.7869 | 0.8234 | 1126 | 0.7739 | 0.7197 | 0.7458 | 1384 | 0.8690 | 0.7625 | 0.8122 | 1183 | 0.8646 | 0.8339 | 0.8490 | 8248 | 0.6517 | 0.3378 | 0.4449 | 903 | 0.7771 | 0.7041 | 0.7388 | 23091 | 0.6941 | 0.5275 | 0.5734 | 23091 | 0.7732 | 0.7041 | 0.7327 | 23091 | 0.9756 | | 0.0998 | 6.0 | 3450 | 0.0578 | 0.0 | 0.0 | 0.0 | 29 | 0.8054 | 0.7739 | 0.7893 | 9932 | 0.8182 | 0.2832 | 0.4208 | 286 | 0.8971 | 0.8597 | 0.8780 | 1126 | 0.8313 | 0.8259 | 0.8286 | 1384 | 0.8867 | 0.8335 | 0.8593 | 1183 | 0.9104 | 0.9089 | 0.9097 | 8248 | 0.7280 | 0.6224 | 0.6710 | 903 | 0.8518 | 0.8195 | 0.8353 | 23091 | 0.7346 | 0.6384 | 0.6696 | 23091 | 0.8492 | 0.8195 | 0.8324 | 23091 | 0.9845 | | 0.0768 | 7.0 | 4025 | 0.0410 | 0.0 | 0.0 | 0.0 | 29 | 0.8790 | 0.8519 | 0.8652 | 9932 | 0.8889 | 0.3916 | 0.5437 | 286 | 0.9032 | 0.9032 | 0.9032 | 1126 | 0.9332 | 0.8374 | 0.8827 | 1384 | 0.8774 | 0.8588 | 0.8680 | 1183 | 0.9427 | 0.9377 | 0.9402 | 8248 | 0.7850 | 0.7885 | 0.7867 | 903 | 0.9027 | 0.8753 | 0.8888 | 23091 | 0.7762 | 0.6961 | 0.7237 | 23091 | 0.9014 | 0.8753 | 0.8869 | 23091 | 0.9897 | | 0.0601 | 8.0 | 4600 | 0.0294 | 0.0 | 0.0 | 0.0 | 29 | 0.9161 | 0.8936 | 0.9047 | 9932 | 0.8775 | 0.6259 | 0.7306 | 286 | 0.9336 | 0.9245 | 0.9290 | 1126 | 0.9555 | 0.8526 | 0.9011 | 1384 | 0.9115 | 0.8791 | 0.8950 | 1183 | 0.9606 | 0.9630 | 0.9618 | 8248 | 0.8757 | 0.8505 | 0.8629 | 903 | 0.9333 | 0.9106 | 0.9218 | 23091 | 0.8038 | 0.7487 | 0.7731 | 23091 | 0.9317 | 0.9106 | 0.9206 | 23091 | 0.9928 | | 0.0465 | 9.0 | 5175 | 0.0233 | 0.0 | 0.0 | 0.0 | 29 | 0.9420 | 0.9258 | 0.9338 | 9932 | 0.8583 | 0.7413 | 0.7955 | 286 | 0.9158 | 0.9369 | 0.9263 | 1126 | 0.9421 | 0.9162 | 0.9289 | 1384 | 0.8985 | 0.8977 | 0.8981 | 1183 | 0.9781 | 0.9622 | 0.9701 | 8248 | 0.8934 | 0.9280 | 0.9104 | 903 | 0.9484 | 0.9340 | 0.9411 | 23091 | 0.8035 | 0.7885 | 0.7954 | 23091 | 0.9473 | 0.9340 | 0.9405 | 23091 | 0.9944 | | 0.037 | 10.0 | 5750 | 0.0167 | 0.0 | 0.0 | 0.0 | 29 | 0.9539 | 0.9528 | 0.9534 | 9932 | 0.8418 | 0.8741 | 0.8576 | 286 | 0.9517 | 0.9449 | 0.9483 | 1126 | 0.9699 | 0.9321 | 0.9506 | 1384 | 0.9330 | 0.9298 | 0.9314 | 1183 | 0.9726 | 0.9787 | 0.9756 | 8248 | 0.9411 | 0.9557 | 0.9484 | 903 | 0.9585 | 0.9572 | 0.9578 | 23091 | 0.8205 | 0.8210 | 0.8207 | 23091 | 0.9573 | 0.9572 | 0.9572 | 23091 | 0.9960 | | 0.0295 | 11.0 | 6325 | 0.0141 | 0.0 | 0.0 | 0.0 | 29 | 0.9551 | 0.9578 | 0.9565 | 9932 | 0.8571 | 0.9021 | 0.8790 | 286 | 0.9607 | 0.9547 | 0.9577 | 1126 | 0.9791 | 0.9473 | 0.9629 | 1384 | 0.9367 | 0.9374 | 0.9371 | 1183 | 0.9868 | 0.9807 | 0.9838 | 8248 | 0.8456 | 0.9767 | 0.9065 | 903 | 0.9609 | 0.9630 | 0.9619 | 23091 | 0.8151 | 0.8321 | 0.8229 | 23091 | 0.9605 | 0.9630 | 0.9616 | 23091 | 0.9964 | | 0.0249 | 12.0 | 6900 | 0.0102 | 1.0 | 0.0690 | 0.1290 | 29 | 0.9775 | 0.9723 | 0.9749 | 9932 | 0.9231 | 0.8811 | 0.9016 | 286 | 0.9453 | 0.9671 | 0.9561 | 1126 | 0.9708 | 0.9624 | 0.9666 | 1384 | 0.9456 | 0.9544 | 0.9499 | 1183 | 0.9896 | 0.9850 | 0.9873 | 8248 | 0.9671 | 0.9779 | 0.9725 | 903 | 0.9771 | 0.9730 | 0.9751 | 23091 | 0.9649 | 0.8461 | 0.8547 | 23091 | 0.9772 | 0.9730 | 0.9746 | 23091 | 0.9975 | | 0.0203 | 13.0 | 7475 | 0.0084 | 1.0 | 0.1379 | 0.2424 | 29 | 0.9787 | 0.9723 | 0.9755 | 9932 | 0.9357 | 0.9161 | 0.9258 | 286 | 0.9733 | 0.9716 | 0.9724 | 1126 | 0.9904 | 0.9646 | 0.9773 | 1384 | 0.9574 | 0.9687 | 0.9630 | 1183 | 0.9924 | 0.9879 | 0.9902 | 8248 | 0.9757 | 0.9767 | 0.9762 | 903 | 0.9823 | 0.9756 | 0.9789 | 23091 | 0.9755 | 0.8620 | 0.8779 | 23091 | 0.9823 | 0.9756 | 0.9785 | 23091 | 0.9980 | | 0.0181 | 14.0 | 8050 | 0.0066 | 1.0 | 0.2069 | 0.3429 | 29 | 0.9827 | 0.9785 | 0.9806 | 9932 | 0.9627 | 0.9021 | 0.9314 | 286 | 0.9743 | 0.9760 | 0.9752 | 1126 | 0.9804 | 0.9776 | 0.9790 | 1384 | 0.9662 | 0.9653 | 0.9658 | 1183 | 0.9934 | 0.9905 | 0.9920 | 8248 | 0.9738 | 0.9889 | 0.9813 | 903 | 0.9846 | 0.9804 | 0.9825 | 23091 | 0.9792 | 0.8732 | 0.8935 | 23091 | 0.9846 | 0.9804 | 0.9822 | 23091 | 0.9983 | | 0.0149 | 15.0 | 8625 | 0.0060 | 1.0 | 0.3448 | 0.5128 | 29 | 0.9842 | 0.9783 | 0.9812 | 9932 | 0.9416 | 0.9580 | 0.9497 | 286 | 0.9744 | 0.9822 | 0.9783 | 1126 | 0.9883 | 0.9776 | 0.9829 | 1384 | 0.9748 | 0.9806 | 0.9777 | 1183 | 0.9957 | 0.9871 | 0.9914 | 8248 | 0.9824 | 0.9900 | 0.9862 | 903 | 0.9870 | 0.9811 | 0.9840 | 23091 | 0.9802 | 0.8998 | 0.9200 | 23091 | 0.9870 | 0.9811 | 0.9839 | 23091 | 0.9985 | | 0.0128 | 16.0 | 9200 | 0.0041 | 1.0 | 0.4828 | 0.6512 | 29 | 0.9874 | 0.9854 | 0.9864 | 9932 | 0.9618 | 0.9685 | 0.9652 | 286 | 0.9832 | 0.9885 | 0.9858 | 1126 | 0.9898 | 0.9848 | 0.9873 | 1384 | 0.9789 | 0.9822 | 0.9806 | 1183 | 0.9951 | 0.9928 | 0.9940 | 8248 | 0.9879 | 0.9945 | 0.9912 | 903 | 0.9894 | 0.9875 | 0.9884 | 23091 | 0.9855 | 0.9224 | 0.9427 | 23091 | 0.9894 | 0.9875 | 0.9883 | 23091 | 0.9989 | | 0.0109 | 17.0 | 9775 | 0.0038 | 1.0 | 0.6552 | 0.7917 | 29 | 0.9880 | 0.9858 | 0.9869 | 9932 | 0.9516 | 0.9615 | 0.9565 | 286 | 0.9876 | 0.9876 | 0.9876 | 1126 | 0.9877 | 0.9892 | 0.9884 | 1384 | 0.9772 | 0.9789 | 0.9780 | 1183 | 0.9947 | 0.9932 | 0.9939 | 8248 | 0.9944 | 0.9911 | 0.9928 | 903 | 0.9896 | 0.9879 | 0.9887 | 23091 | 0.9851 | 0.9428 | 0.9595 | 23091 | 0.9896 | 0.9879 | 0.9887 | 23091 | 0.9989 | | 0.0104 | 18.0 | 10350 | 0.0033 | 1.0 | 0.6207 | 0.7660 | 29 | 0.9885 | 0.9872 | 0.9879 | 9932 | 0.9561 | 0.9895 | 0.9725 | 286 | 0.9763 | 0.9893 | 0.9828 | 1126 | 0.9899 | 0.9921 | 0.9910 | 1384 | 0.9783 | 0.9890 | 0.9836 | 1183 | 0.9953 | 0.9941 | 0.9947 | 8248 | 0.9945 | 0.9934 | 0.9939 | 903 | 0.9897 | 0.9900 | 0.9898 | 23091 | 0.9849 | 0.9444 | 0.9590 | 23091 | 0.9897 | 0.9900 | 0.9898 | 23091 | 0.9990 | | 0.009 | 19.0 | 10925 | 0.0026 | 0.9565 | 0.7586 | 0.8462 | 29 | 0.9903 | 0.9898 | 0.9901 | 9932 | 0.9690 | 0.9825 | 0.9757 | 286 | 0.9902 | 0.9876 | 0.9889 | 1126 | 0.9942 | 0.9899 | 0.9920 | 1384 | 0.9890 | 0.9899 | 0.9894 | 1183 | 0.9952 | 0.9954 | 0.9953 | 8248 | 0.9934 | 0.9956 | 0.9945 | 903 | 0.9920 | 0.9916 | 0.9918 | 23091 | 0.9847 | 0.9612 | 0.9715 | 23091 | 0.9920 | 0.9916 | 0.9918 | 23091 | 0.9992 | | 0.0077 | 20.0 | 11500 | 0.0024 | 1.0 | 0.8966 | 0.9455 | 29 | 0.9913 | 0.9906 | 0.9910 | 9932 | 0.9530 | 0.9930 | 0.9726 | 286 | 0.9885 | 0.9938 | 0.9911 | 1126 | 0.9942 | 0.9949 | 0.9946 | 1384 | 0.9874 | 0.9915 | 0.9895 | 1183 | 0.9976 | 0.9931 | 0.9953 | 8248 | 0.9956 | 0.9956 | 0.9956 | 903 | 0.9931 | 0.9921 | 0.9926 | 23091 | 0.9885 | 0.9811 | 0.9844 | 23091 | 0.9931 | 0.9921 | 0.9926 | 23091 | 0.9993 | | 0.0068 | 21.0 | 12075 | 0.0023 | 1.0 | 0.8966 | 0.9455 | 29 | 0.9911 | 0.9895 | 0.9903 | 9932 | 0.9823 | 0.9720 | 0.9772 | 286 | 0.9868 | 0.9947 | 0.9907 | 1126 | 0.9957 | 0.9928 | 0.9942 | 1384 | 0.9858 | 0.9941 | 0.9899 | 1183 | 0.9966 | 0.9939 | 0.9953 | 8248 | 0.9967 | 0.9934 | 0.9950 | 903 | 0.9930 | 0.9916 | 0.9923 | 23091 | 0.9919 | 0.9784 | 0.9848 | 23091 | 0.9930 | 0.9916 | 0.9923 | 23091 | 0.9993 | | 0.0062 | 22.0 | 12650 | 0.0019 | 1.0 | 0.8966 | 0.9455 | 29 | 0.9913 | 0.9914 | 0.9914 | 9932 | 0.9758 | 0.9860 | 0.9809 | 286 | 0.9894 | 0.9911 | 0.9902 | 1126 | 0.9942 | 0.9935 | 0.9939 | 1384 | 0.9890 | 0.9907 | 0.9899 | 1183 | 0.9960 | 0.9956 | 0.9958 | 8248 | 0.9956 | 0.9956 | 0.9956 | 903 | 0.9929 | 0.9930 | 0.9930 | 23091 | 0.9914 | 0.9801 | 0.9854 | 23091 | 0.9929 | 0.9930 | 0.9930 | 23091 | 0.9993 | | 0.0055 | 23.0 | 13225 | 0.0018 | 1.0 | 0.9310 | 0.9643 | 29 | 0.9923 | 0.9911 | 0.9917 | 9932 | 0.9758 | 0.9860 | 0.9809 | 286 | 0.9902 | 0.9911 | 0.9907 | 1126 | 0.9942 | 0.9949 | 0.9946 | 1384 | 0.9882 | 0.9932 | 0.9907 | 1183 | 0.9967 | 0.9943 | 0.9955 | 8248 | 0.9967 | 0.9945 | 0.9956 | 903 | 0.9937 | 0.9926 | 0.9931 | 23091 | 0.9918 | 0.9845 | 0.9880 | 23091 | 0.9937 | 0.9926 | 0.9931 | 23091 | 0.9994 | | 0.0053 | 24.0 | 13800 | 0.0015 | 1.0 | 0.9310 | 0.9643 | 29 | 0.9922 | 0.9916 | 0.9919 | 9932 | 0.9860 | 0.9825 | 0.9842 | 286 | 0.9903 | 0.9929 | 0.9916 | 1126 | 0.9942 | 0.9957 | 0.9949 | 1384 | 0.9899 | 0.9924 | 0.9911 | 1183 | 0.9959 | 0.9958 | 0.9958 | 8248 | 0.9967 | 0.9945 | 0.9956 | 903 | 0.9935 | 0.9934 | 0.9935 | 23091 | 0.9931 | 0.9845 | 0.9887 | 23091 | 0.9935 | 0.9934 | 0.9935 | 23091 | 0.9994 | | 0.0048 | 25.0 | 14375 | 0.0015 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9916 | 0.9915 | 0.9916 | 9932 | 0.9758 | 0.9860 | 0.9809 | 286 | 0.9912 | 0.9956 | 0.9934 | 1126 | 0.9928 | 0.9971 | 0.9950 | 1384 | 0.9891 | 0.9949 | 0.9920 | 1183 | 0.9967 | 0.9941 | 0.9954 | 8248 | 0.9967 | 0.9967 | 0.9967 | 903 | 0.9933 | 0.9933 | 0.9933 | 23091 | 0.9876 | 0.9945 | 0.9910 | 23091 | 0.9933 | 0.9933 | 0.9933 | 23091 | 0.9994 | | 0.0039 | 26.0 | 14950 | 0.0013 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9920 | 0.9908 | 0.9914 | 9932 | 0.9792 | 0.9895 | 0.9843 | 286 | 0.9912 | 0.9956 | 0.9934 | 1126 | 0.9971 | 0.9928 | 0.9949 | 1384 | 0.9916 | 0.9932 | 0.9924 | 1183 | 0.9966 | 0.9952 | 0.9959 | 8248 | 0.9967 | 0.9967 | 0.9967 | 903 | 0.9939 | 0.9931 | 0.9935 | 23091 | 0.9889 | 0.9942 | 0.9915 | 23091 | 0.9939 | 0.9931 | 0.9935 | 23091 | 0.9994 | | 0.0039 | 27.0 | 15525 | 0.0013 | 1.0 | 0.9310 | 0.9643 | 29 | 0.9912 | 0.9921 | 0.9917 | 9932 | 0.9726 | 0.9930 | 0.9827 | 286 | 0.9929 | 0.9973 | 0.9951 | 1126 | 0.9978 | 0.9921 | 0.9949 | 1384 | 0.9907 | 0.9949 | 0.9928 | 1183 | 0.9949 | 0.9964 | 0.9956 | 8248 | 0.9923 | 0.9989 | 0.9956 | 903 | 0.9928 | 0.9942 | 0.9935 | 23091 | 0.9916 | 0.9870 | 0.9891 | 23091 | 0.9928 | 0.9942 | 0.9935 | 23091 | 0.9994 | | 0.0037 | 28.0 | 16100 | 0.0013 | 1.0 | 0.9655 | 0.9825 | 29 | 0.9925 | 0.9915 | 0.9920 | 9932 | 0.9826 | 0.9860 | 0.9843 | 286 | 0.9929 | 0.9956 | 0.9942 | 1126 | 0.9942 | 0.9957 | 0.9949 | 1384 | 0.9924 | 0.9949 | 0.9937 | 1183 | 0.9982 | 0.9936 | 0.9959 | 8248 | 0.9956 | 0.9978 | 0.9967 | 903 | 0.9947 | 0.9930 | 0.9938 | 23091 | 0.9936 | 0.9901 | 0.9918 | 23091 | 0.9947 | 0.9930 | 0.9938 | 23091 | 0.9994 | | 0.0034 | 29.0 | 16675 | 0.0012 | 1.0 | 0.9655 | 0.9825 | 29 | 0.9918 | 0.9919 | 0.9919 | 9932 | 0.9726 | 0.9930 | 0.9827 | 286 | 0.9964 | 0.9938 | 0.9951 | 1126 | 0.9957 | 0.9942 | 0.9949 | 1384 | 0.9949 | 0.9924 | 0.9937 | 1183 | 0.9965 | 0.9950 | 0.9958 | 8248 | 0.9956 | 0.9978 | 0.9967 | 903 | 0.9940 | 0.9935 | 0.9938 | 23091 | 0.9929 | 0.9905 | 0.9916 | 23091 | 0.9940 | 0.9935 | 0.9938 | 23091 | 0.9994 | | 0.0031 | 30.0 | 17250 | 0.0011 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9909 | 0.9920 | 0.9915 | 9932 | 0.9792 | 0.9895 | 0.9843 | 286 | 0.9956 | 0.9947 | 0.9951 | 1126 | 0.9942 | 0.9957 | 0.9949 | 1384 | 0.9949 | 0.9932 | 0.9941 | 1183 | 0.9956 | 0.9962 | 0.9959 | 8248 | 0.9989 | 0.9945 | 0.9967 | 903 | 0.9934 | 0.9940 | 0.9937 | 23091 | 0.9895 | 0.9945 | 0.9920 | 23091 | 0.9934 | 0.9940 | 0.9937 | 23091 | 0.9994 | | 0.0026 | 31.0 | 17825 | 0.0011 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9910 | 0.9918 | 0.9914 | 9932 | 0.9727 | 0.9965 | 0.9845 | 286 | 0.9938 | 0.9964 | 0.9951 | 1126 | 0.9928 | 0.9971 | 0.9950 | 1384 | 0.9932 | 0.9924 | 0.9928 | 1183 | 0.9964 | 0.9953 | 0.9958 | 8248 | 0.9956 | 0.9978 | 0.9967 | 903 | 0.9932 | 0.9939 | 0.9936 | 23091 | 0.9878 | 0.9959 | 0.9918 | 23091 | 0.9932 | 0.9939 | 0.9936 | 23091 | 0.9994 | | 0.0025 | 32.0 | 18400 | 0.0011 | 1.0 | 0.9655 | 0.9825 | 29 | 0.9914 | 0.9920 | 0.9917 | 9932 | 0.9727 | 0.9965 | 0.9845 | 286 | 0.9973 | 0.9929 | 0.9951 | 1126 | 0.9900 | 1.0 | 0.9950 | 1384 | 0.9932 | 0.9924 | 0.9928 | 1183 | 0.9972 | 0.9948 | 0.9960 | 8248 | 0.9945 | 0.9989 | 0.9967 | 903 | 0.9937 | 0.9939 | 0.9938 | 23091 | 0.9920 | 0.9916 | 0.9918 | 23091 | 0.9937 | 0.9939 | 0.9938 | 23091 | 0.9994 | | 0.0025 | 33.0 | 18975 | 0.0010 | 1.0 | 0.9655 | 0.9825 | 29 | 0.9918 | 0.9922 | 0.9920 | 9932 | 0.9694 | 0.9965 | 0.9828 | 286 | 0.9973 | 0.9929 | 0.9951 | 1126 | 0.9928 | 0.9971 | 0.9950 | 1384 | 0.9932 | 0.9932 | 0.9932 | 1183 | 0.9966 | 0.9952 | 0.9959 | 8248 | 0.9967 | 0.9967 | 0.9967 | 903 | 0.9939 | 0.9939 | 0.9939 | 23091 | 0.9922 | 0.9912 | 0.9916 | 23091 | 0.9939 | 0.9939 | 0.9939 | 23091 | 0.9994 | | 0.0023 | 34.0 | 19550 | 0.0010 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9914 | 0.9920 | 0.9917 | 9932 | 0.9662 | 1.0 | 0.9828 | 286 | 0.9947 | 0.9956 | 0.9951 | 1126 | 0.9964 | 0.9935 | 0.9949 | 1384 | 0.9932 | 0.9924 | 0.9928 | 1183 | 0.9964 | 0.9954 | 0.9959 | 8248 | 0.9956 | 0.9978 | 0.9967 | 903 | 0.9935 | 0.9939 | 0.9937 | 23091 | 0.9876 | 0.9958 | 0.9916 | 23091 | 0.9936 | 0.9939 | 0.9937 | 23091 | 0.9994 | | 0.0023 | 35.0 | 20125 | 0.0010 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9924 | 0.9918 | 0.9921 | 9932 | 0.9662 | 1.0 | 0.9828 | 286 | 0.9929 | 0.9973 | 0.9951 | 1126 | 0.9964 | 0.9935 | 0.9949 | 1384 | 0.9916 | 0.9958 | 0.9937 | 1183 | 0.9970 | 0.9945 | 0.9958 | 8248 | 0.9967 | 0.9967 | 0.9967 | 903 | 0.9941 | 0.9937 | 0.9939 | 23091 | 0.9875 | 0.9962 | 0.9918 | 23091 | 0.9941 | 0.9937 | 0.9939 | 23091 | 0.9994 | | 0.002 | 36.0 | 20700 | 0.0010 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9923 | 0.9915 | 0.9919 | 9932 | 0.9695 | 1.0 | 0.9845 | 286 | 0.9947 | 0.9956 | 0.9951 | 1126 | 0.9957 | 0.9942 | 0.9949 | 1384 | 0.9924 | 0.9949 | 0.9937 | 1183 | 0.9979 | 0.9941 | 0.9960 | 8248 | 0.9956 | 0.9978 | 0.9967 | 903 | 0.9945 | 0.9933 | 0.9939 | 23091 | 0.9881 | 0.9960 | 0.9920 | 23091 | 0.9945 | 0.9933 | 0.9939 | 23091 | 0.9994 | | 0.0018 | 37.0 | 21275 | 0.0009 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9915 | 0.9919 | 0.9917 | 9932 | 0.9792 | 0.9895 | 0.9843 | 286 | 0.9938 | 0.9964 | 0.9951 | 1126 | 0.9964 | 0.9935 | 0.9949 | 1384 | 0.9924 | 0.9949 | 0.9937 | 1183 | 0.9965 | 0.9955 | 0.9960 | 8248 | 0.9967 | 0.9967 | 0.9967 | 903 | 0.9938 | 0.9939 | 0.9938 | 23091 | 0.9891 | 0.9948 | 0.9919 | 23091 | 0.9938 | 0.9939 | 0.9938 | 23091 | 0.9994 | | 0.002 | 38.0 | 21850 | 0.0009 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9920 | 0.9914 | 0.9917 | 9932 | 0.9727 | 0.9965 | 0.9845 | 286 | 0.9956 | 0.9947 | 0.9951 | 1126 | 0.9949 | 0.9949 | 0.9949 | 1384 | 0.9924 | 0.9949 | 0.9937 | 1183 | 0.9979 | 0.9938 | 0.9959 | 8248 | 0.9956 | 0.9978 | 0.9967 | 903 | 0.9944 | 0.9932 | 0.9938 | 23091 | 0.9885 | 0.9955 | 0.9919 | 23091 | 0.9944 | 0.9932 | 0.9938 | 23091 | 0.9994 | | 0.0018 | 39.0 | 22425 | 0.0009 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9924 | 0.9914 | 0.9919 | 9932 | 0.9695 | 1.0 | 0.9845 | 286 | 0.9947 | 0.9956 | 0.9951 | 1126 | 0.9957 | 0.9942 | 0.9949 | 1384 | 0.9941 | 0.9932 | 0.9937 | 1183 | 0.9978 | 0.9942 | 0.9960 | 8248 | 0.9956 | 0.9978 | 0.9967 | 903 | 0.9945 | 0.9932 | 0.9939 | 23091 | 0.9883 | 0.9958 | 0.9920 | 23091 | 0.9946 | 0.9932 | 0.9939 | 23091 | 0.9994 | | 0.0017 | 40.0 | 23000 | 0.0009 | 0.9667 | 1.0 | 0.9831 | 29 | 0.9922 | 0.9916 | 0.9919 | 9932 | 0.9695 | 1.0 | 0.9845 | 286 | 0.9938 | 0.9964 | 0.9951 | 1126 | 0.9957 | 0.9942 | 0.9949 | 1384 | 0.9932 | 0.9941 | 0.9937 | 1183 | 0.9978 | 0.9942 | 0.9960 | 8248 | 0.9956 | 0.9978 | 0.9967 | 903 | 0.9944 | 0.9934 | 0.9939 | 23091 | 0.9881 | 0.9960 | 0.9920 | 23091 | 0.9944 | 0.9934 | 0.9939 | 23091 | 0.9994 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
dccuchile/albert-xlarge-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1369 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1369, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Normalize() ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
dccuchile/albert-xlarge-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 265.16 +/- 21.39 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/albert-xlarge-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3-base results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Jatayoo/Taxi-v3-base", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
dccuchile/albert-xxlarge-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
Access to model drift-ai/autotrain-mona-lisa-detection-38345101350 is restricted and you are not in the authorized list. Visit https://huggingface.co/drift-ai/autotrain-mona-lisa-detection-38345101350 to ask for access.
dccuchile/distilbert-base-spanish-uncased-finetuned-ner
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
2023-03-01T20:02:25Z
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: pixelcopter-unit4 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 18.20 +/- 13.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
dccuchile/distilbert-base-spanish-uncased-finetuned-qa-mlqa
[ "pytorch", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: gpt-neox-20b-imdb_adapter-lr5e-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt-neox-20b-imdb_adapter-lr5e-4 This model is a fine-tuned version of [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) on the imdb dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 ### Training results ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
Chakita/Kalbert
[ "pytorch", "tensorboard", "albert", "fill-mask", "transformers", "generated_from_trainer", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-03-01T20:55:19Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 11.96 +/- 4.43 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r lukee/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.8.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.8.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Chakita/gpt2_mwp
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2023-03-01T20:57:15Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `__main__.PubmedLowMemoryLoader` of length 26041 with parameters: ``` {'batch_size': 128} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 2000, "evaluator": "__main__.PubmedTruePositiveIRetrievalEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 21, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Chalponkey/DialoGPT-small-Barry
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- tags: - generated_from_trainer datasets: - city_learn model-index: - name: decision_transformer_random4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # decision_transformer_random4 This model is a fine-tuned version of [](https://huggingface.co/) on the city_learn dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Champion/test_upload_vox2_wavlm_epoch8
[ "sidekit", "audio" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-01T21:05:25Z
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA text2image fine-tuning - https://huggingface.co/jwb/pokemon-lora These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were fine-tuned on the lambdalabs/pokemon-blip-captions dataset. You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
Charlotte/text2dm_models
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 266.78 +/- 18.46 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Charlotte77/model_test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-01T21:13:14Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget library_name: ml-agents --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Write your model_id: npit/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Check/vaw2tmp
[ "tensorboard" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer model-index: - name: gpt-neo-1.3B-finetuned-v5-seinfeld results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt-neo-1.3B-finetuned-v5-seinfeld This model is a fine-tuned version of [EleutherAI/gpt-neo-1.3B](https://huggingface.co/EleutherAI/gpt-neo-1.3B) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3717 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 1000 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.5867 | 0.79 | 8 | 2.5308 | | 2.5459 | 1.59 | 16 | 2.4639 | | 2.4799 | 2.4 | 24 | 2.4183 | | 2.4034 | 3.2 | 32 | 2.3830 | | 2.2838 | 3.99 | 40 | 2.3568 | | 2.2223 | 4.79 | 48 | 2.3410 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
ChoboAvenger/DialoGPT-small-DocBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### carioca-guy Dreambooth model trained by Ingrid0693 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
ChoboAvenger/DialoGPT-small-joshua
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en pipeline_tag: summarization --- google/pegasus-large trained on sample of VT-SSum (https://github.com/Dod-o/VT-SSum; https://arxiv.org/pdf/1610.02424.pdf)</br> Sample consists of 600 train, 200 validation, 200 test using categories Computer Science, Data Science, Mathematics</br> </br> * Train Loss: ~2.0 * Validation Loss: ~0.7
Chuah/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - en pipeline_tag: summarization --- google/pegasus-large trained on sample of VT-SSum (https://github.com/Dod-o/VT-SSum; https://arxiv.org/pdf/1610.02424.pdf)</br> Sample consists of 600 train, 200 validation, 200 test using categories Computer Science, Data Science, Mathematics</br> </br> * Train Loss: ~3.6 * Validation Loss: ~2.1
Chun/w-zh2en-hsk
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-03-01T22:33:55Z
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image --- # Galactic Diffusion This is the fine-tuned Stable Diffusion model trained on images from the <b>entergalactic</b> on Netflix.. No tokens is needed. ### Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). ```python #!pip install diffusers transformers scipy torch from diffusers import StableDiffusionPipeline import torch model_id = "AlexZheng/galactic-diffusion-v1.0" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "a beautiful young female with long dark hair, clothed in full dress" image = pipe(prompt).images[0] image.save("./samples/1.png") ``` ### Sample images "a beautiful young female with long dark hair, clothed in full dress" ![output Samples v3](https://huggingface.co/AlexZheng/galactic-diffusion-v1.0/resolve/main/samples/0_0.png) "a strong handsome young male clothed in metal armors" ![output Samples v3](https://huggingface.co/AlexZheng/galactic-diffusion-v1.0/resolve/main/samples/2_0.png) "a British shorthair cat sitting on the floor" ![output Samples v3](https://huggingface.co/AlexZheng/galactic-diffusion-v1.0/resolve/main/samples/6_0.png) "a golden retriever running in the park" ![output Samples v3](https://huggingface.co/AlexZheng/galactic-diffusion-v1.0/resolve/main/samples/8_0.png) "a blue shining Porsche sports car" ![output Samples v3](https://huggingface.co/AlexZheng/galactic-diffusion-v1.0/resolve/main/samples/10_0.png) "a modern concept house, two stories, no people" ![output Samples v3](https://huggingface.co/AlexZheng/galactic-diffusion-v1.0/resolve/main/samples/12_0.png) "a warm and sweet living room, a TV, no people" ![output Samples v3](https://huggingface.co/AlexZheng/galactic-diffusion-v1.0/resolve/main/samples/14_0.png) "a beautiful city night scene, no people" ![output Samples v3](https://huggingface.co/AlexZheng/galactic-diffusion-v1.0/resolve/main/samples/16_0.png)
Chuu/Chumar
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer metrics: - f1 - accuracy model-index: - name: ber2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ber2 This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0472 - F1: 0.0 - Roc Auc: 0.5 - Accuracy: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Roc Auc | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|:--------:| | No log | 1.0 | 121 | 0.2033 | 0.0039 | 0.5009 | 0.0 | | No log | 2.0 | 242 | 0.0886 | 0.0 | 0.5 | 0.0 | | No log | 3.0 | 363 | 0.0604 | 0.0 | 0.5 | 0.0 | | No log | 4.0 | 484 | 0.0501 | 0.0 | 0.5 | 0.0 | | 0.1703 | 5.0 | 605 | 0.0472 | 0.0 | 0.5 | 0.0 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Cinnamon/electra-small-japanese-generator
[ "pytorch", "electra", "fill-mask", "ja", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19
null
--- tags: - conversational --- # PowPowGaming DialoGPT Model
Cloudy/DialoGPT-CJ-large
[ "pytorch", "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- datasets: - SirNeural/flan_v2 metrics: - perplexity tags: - flan - opt - peft --- ## FLAN-OPT-2.7b-LoRA OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI. This model is [facebook/opt-2.7b](https://hf.co/facebook/opt-2.7b) finetuned with low-rank adapters (https://arxiv.org/abs/2106.09685) on the FLAN datasets (https://arxiv.org/pdf/2210.11416.pdf). Low-rank adapters (r=16) finetuned over 1.1m new tokens of a FLAN task mixture, with the start of each example cut off if it was too large to fit within a 256 token context. The model reaches a train ppl of 5.09 and an eval ppl of 4.36. ### Inference Example (Chain-of-Thought prompt): ```python # %pip install -qq transformers git+https://github.com/huggingface/peft accelerate bitsandbytes from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "crumb/FLAN-OPT-2.7b-LoRA" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, load_in_8bit=True, low_cpu_mem_usage=True, device_map='auto') model = PeftModel.from_pretrained(model, peft_model_id) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) import torch prompt = """ Q: Answer the following yes/no question by reasoning step-by-step. Could a dandelion suffer from hepatitis? A: Hepatitis only affects organisms with livers. Dandelions don’t have a liver. The answer is no. Q: Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet? A: A haiku is a japanese three-line poem. That is short enough to fit in 280 characters. The answer is yes. Q: Answer the following yes/no question by reasoning step-by-step. Can you reach space with a Cessna? A: """.strip() inputs = tokenizer([prompt], return_tensors='pt') with torch.autocast("cuda", dtype=torch.float16): outputs = model.generate( input_ids=inputs.input_ids.cuda(), attention_mask=inputs.attention_mask.cuda(), max_new_tokens=32, top_k=4, penalty_alpha=0.6 ) print("\n".join(tokenizer.decode(outputs[0]).split("\n")[:prompt.count("\n")+1])) # Cessna is a single-engine aircraft. Cessna cannot reach space. The answer is no. ```
CogComp/roberta-temporal-predictor
[ "pytorch", "roberta", "fill-mask", "arxiv:2202.00436", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="onigiriboy/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Connorvr/TeachingGen
[ "pytorch", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: creativeml-openrail-m --- Javalion-R is a penta merge of KoboldAI's GPT-J classics + PygmalionAI's Pygmalion6b; ((Janeway + Shinen) + (Skein + Pygmalion)) + GPT-R. Janeway + Shinen is listed under JANIN-GPTJ. Skein + Pygmalion is listed under SKEGMA-GPTJ. GPT-R itself is a 60/40 merge of two instruct research models (see digitous/GPT-R for full credits). This 5x+ merge is not intended for minors, as it can produce NC-17+ content. This model differs from Javelin-R by substituting the Adventure model with Pygmalion, as Adventure is rendered redundant in training data by Skein. Javalion-R is a research artefact with dual purpose for entertainment as well as an intended example of potential value instruct can bring when combined with models of a different purpose through the use of weight sum merge technology. Mileage mat vary. No refunds best wishes. Mainly intended to be utilized with Open Source KoboldAI software. Optimal sampler and settings not determined. Feedback Welcome! https://github.com/KoboldAI/KoboldAI-Client
Contrastive-Tension/BERT-Distil-CT-STSb
[ "pytorch", "tf", "distilbert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "DistilBertModel" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: creativeml-openrail-m base_model: WarriorMama777/AbyssOrangeMix2 instance_prompt: unlight tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - unlight These are LoRA adaption weights for [WarriorMama777/AbyssOrangeMix2](https://huggingface.co/WarriorMama777/AbyssOrangeMix2). The weights were trained on the instance prompt "unlight" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. Test prompt: 1boy, unlight ![image_0](test_images/image_0.png) ![image_1](test_images/image_1.png) ![image_2](test_images/image_2.png) ![image_3](test_images/image_3.png)
CouchCat/ma_mlc_v7_distil
[ "pytorch", "distilbert", "text-classification", "en", "transformers", "multi-label", "license:mit" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
2023-03-02T02:50:47Z
## Pretraining Without Attention(BiGS) <br> ## Official JAX Models with Maximal Sequence Length 128 <br> ### This is the finetune checkpoint in MNLI, val accruacy 86.34 ### [Paper](https://arxiv.org/abs/2212.10544) | [![Hugging Face Hub](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Hub-blue)](https://huggingface.co/JunxiongWang) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1Fz3OSRF3PZEF_dlnyJ3KZ8Bq35DfUrIB?usp=sharing) <img width="537" alt="BiGS" src="https://user-images.githubusercontent.com/16102460/221464744-06b6538a-7e84-4c95-909f-239eab1dba71.png"> This [repository](https://github.com/jxiw/BiGS) contains BiGS's jax model definitions, pretrained models weights, training and fintuning code for our paper exploring using state space models for pretraining. You can find more details in our paper. [**Pretraining Without Attention**](https://arxiv.org/abs/2212.10544)<br> [Junxiong Wang](), [Jing Nathan Yan](), [Albert Gu](), [Alexander M.Rush]() <br>Cornell University, Cornell Tech, DeepMind<br> Transformers have been essential to pretraining success in NLP. While other architectures have been used, downstream accuracy is either significantly worse, or requires attention layers to match standard benchmarks such as GLUE. This work explores pretraining without attention by using recent advances in sequence routing based on state-space models (SSMs). Our proposed model, Bidirectional Gated SSM (BiGS), combines SSM layers with a multiplicative gating architecture that has been effective in simplified sequence modeling architectures. The model learns static layers that do not consider pair-wise interactions. Even so, BiGS is able to match BERT pretraining accuracy on GLUE and can be extended to long-form pretraining of 4096 tokens without approximation. Analysis shows that while the models have similar accuracy, the approach has significantly different inductive biases than BERT in terms of interactions and syntactic representations. ### Load Sequence Classification Model ```python from BiGS.modeling_flax_bigs import FlaxBiGSForSequenceClassification model = FlaxBiGSForSequenceClassification.from_pretrained('JunxiongWang/BiGS_128_MNLI') ``` ### GLUE For MRPC, STS-B and RTE, we finetune on the MNLI model ``` export TASK_NAME=mrpc python run_glue.py \ --model_name_or_path JunxiongWang/BiGS_128_MNLI \ --task_name $TASK_NAME \ --max_seq_length 128 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --per_device_train_batch_size 2 \ --logging_steps 100 \ --eval_steps 500 \ --weight_decay 0.01 \ --output_dir BiGS_$TASK_NAME/ ``` | Task | Metric | Result | |-------|------------------------------|-------------| | MRPC | F1/Accuracy | 88.4/83.6 | | STS-B | Pearson/Spearman corr. | 89.3/89.2 | | RTE | Accuracy | 80.1 |
Craftified/Bob
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: pryjuli/ppo-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CrypticT1tan/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="CyberKing/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Cryptikdw/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.52 +/- 2.72 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="CyberKing/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Crystal/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-02T04:18:37Z
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9850746393203735 --- # rare-puppers Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### golden retriever ![golden retriever](images/golden_retriever.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
Culmenus/XLMR-ENIS-finetuned-ner
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:mim_gold_ner", "transformers", "generated_from_trainer", "license:agpl-3.0", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: flan-t5-large-extraction-cnndm_1000-all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-large-extraction-cnndm_1000-all This model is a fine-tuned version of [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8091 - Rouge1: 34.4985 - Rouge2: 14.964 - Rougel: 29.6673 - Rougelsum: 29.6632 - Gen Len: 18.978 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 24 - seed: 1799 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 2.1257 | 1.6 | 200 | 1.8318 | 35.0827 | 14.7034 | 29.3871 | 29.3654 | 18.994 | | 1.8202 | 3.2 | 400 | 1.8333 | 34.8523 | 15.0986 | 29.9144 | 29.9472 | 18.986 | | 1.6711 | 4.8 | 600 | 1.8091 | 34.4985 | 14.964 | 29.6673 | 29.6632 | 18.978 | | 1.5597 | 6.4 | 800 | 1.8217 | 34.6517 | 15.2819 | 29.7011 | 29.7576 | 18.988 | | 1.4743 | 8.0 | 1000 | 1.8223 | 34.0466 | 14.7149 | 29.259 | 29.3157 | 18.976 | | 1.3895 | 9.6 | 1200 | 1.8799 | 34.224 | 14.8791 | 29.0879 | 29.0948 | 18.986 | | 1.3446 | 11.2 | 1400 | 1.8773 | 34.4388 | 14.5656 | 29.0993 | 29.0684 | 18.994 | | 1.272 | 12.8 | 1600 | 1.9016 | 34.4097 | 14.6647 | 29.1727 | 29.1556 | 18.984 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.0+cu111 - Datasets 2.5.1 - Tokenizers 0.12.1
Culmenus/opus-mt-de-is-finetuned-de-to-is_ancc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 264.35 +/- 40.31 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Culmenus/opus-mt-de-is-finetuned-de-to-is_ekkicc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1856.09 +/- 108.86 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Czapla/Rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- # LLama Model Card ## Model details **Organization developing the model** The FAIR team of Meta AI. **Model date** LLaMA was trained between December. 2022 and Feb. 2023. **Model version** This is version 1 of the model. **Model type** LLaMA is an auto-regressive language model, based on the transformer architecture. The model comes in different sizes: 7B, 13B, 33B and 65B parameters. **Paper or resources for more information** More information can be found in the paper “LLaMA, Open and Efficient Foundation Language Models”, available at https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/. **Citations details** https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/ **License** Non-commercial bespoke license **Where to send questions or comments about the model** Questions and comments about LLaMA can be sent via the [GitHub repository](https://github.com/facebookresearch/llama) of the project , by opening an issue. ## Intended use **Primary intended uses** The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations. **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** LLaMA is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Factors **Relevant factors** One of the most relevant factors for which model performance may vary is which language is used. Although we included 20 languages in the training data, most of our dataset is made of English text, and we thus expect the model to perform better for English than other languages. Relatedly, it has been shown in previous studies that performance might vary for different dialects, and we expect that it will be the case for our model. **Evaluation factors** As our model is trained on data from the Web, we expect that it reflects biases from this source. We thus evaluated on RAI datasets to measure biases exhibited by the model for gender, religion, race, sexual orientation, age, nationality, disability, physical appearance and socio-economic status. We also measure the toxicity of model generations, depending on the toxicity of the context used to prompt the model. ## Metrics **Model performance measures** We use the following measure to evaluate the model: - Accuracy for common sense reasoning, reading comprehension, natural language understanding (MMLU), BIG-bench hard, WinoGender and CrowS-Pairs, - Exact match for question answering, - The toxicity score from Perspective API on RealToxicityPrompts. **Decision thresholds** Not applicable. **Approaches to uncertainty and variability** Due to the high computational requirements of training LLMs, we trained only one model of each size, and thus could not evaluate variability of pre-training. ## Evaluation datasets The model was evaluated on the following benchmarks: BoolQ, PIQA, SIQA, HellaSwag, WinoGrande, ARC, OpenBookQA, NaturalQuestions, TriviaQA, RACE, MMLU, BIG-bench hard, GSM8k, RealToxicityPrompts, WinoGender, CrowS-Pairs. ## Training dataset The model was trained using the following source of data: CCNet [67%], C4 [15%], GitHub [4.5%], Wikipedia [4.5%], Books [4.5%], ArXiv [2.5%], Stack Exchange[2%]. The Wikipedia and Books domains include data in the following languages: bg, ca, cs, da, de, en, es, fr, hr, hu, it, nl, pl, pt, ro, ru, sl, sr, sv, uk. See the paper for more details about the training set and corresponding preprocessing. ## Quantitative analysis Hyperparameters for the model architecture <table> <thead> <tr> <th >LLaMa</th> <th colspan=6>Model hyper parameters </th> </tr> <tr> <th>Number of parameters</th><th>dimension</th><th>n heads</th><th>n layers</th><th>Learn rate</th><th>Batch size</th><th>n tokens</th> </tr> </thead> <tbody> <tr> <th>7B</th> <th>4096</th> <th>32</th> <th>32</th> <th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>13B</th><th>5120</th><th>40</th><th>40</th><th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>33B</th><th>6656</th><th>52</th><th>60</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> <tr> <th>65B</th><th>8192</th><th>64</th><th>80</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> </tbody> </table> *Table 1 - Summary of LLama Model Hyperparameters* We present our results on eight standard common sense reasoning benchmarks in the table below. <table> <thead> <tr> <th>LLaMa</th> <th colspan=9>Reasoning tasks </th> </tr> <tr> <th>Number of parameters</th> <th>BoolQ</th><th>PIQA</th><th>SIQA</th><th>HellaSwag</th><th>WinoGrande</th><th>ARC-e</th><th>ARC-c</th><th>OBQA</th><th>COPA</th> </tr> </thead> <tbody> <tr> <th>7B</th><th>76.5</th><th>79.8</th><th>48.9</th><th>76.1</th><th>70.1</th><th>76.7</th><th>47.6</th><th>57.2</th><th>93 </th> <tr><th>13B</th><th>78.1</th><th>80.1</th><th>50.4</th><th>79.2</th><th>73</th><th>78.1</th><th>52.7</th><th>56.4</th><th>94 </th> <tr><th>33B</th><th>83.1</th><th>82.3</th><th>50.4</th><th>82.8</th><th>76</th><th>81.4</th><th>57.8</th><th>58.6</th><th>92 </th> <tr><th>65B</th><th>85.3</th><th>82.8</th><th>52.3</th><th>84.2</th><th>77</th><th>81.5</th><th>56</th><th>60.2</th><th>94</th></tr> </tbody> </table> *Table 2 - Summary of LLama Model Performance on Reasoning tasks* We present our results on bias in the table below. Note that lower value is better indicating lower bias. | No | Category | FAIR LLM | | --- | -------------------- | -------- | | 1 | Gender | 70.6 | | 2 | Religion | 79 | | 3 | Race/Color | 57 | | 4 | Sexual orientation | 81 | | 5 | Age | 70.1 | | 6 | Nationality | 64.2 | | 7 | Disability | 66.7 | | 8 | Physical appearance | 77.8 | | 9 | Socioeconomic status | 71.5 | | | LLaMA Average | 66.6 | *Table 3 - Summary bias of our model output* ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Mitigations** We filtered the data from the Web based on its proximity to Wikipedia text and references. For this, we used a Kneser-Ney language model and a fastText linear classifier. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** LLaMA is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
D3vil/DialoGPT-smaall-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- # LLama Model Card ## Model details **Organization developing the model** The FAIR team of Meta AI. **Model date** LLaMA was trained between December. 2022 and Feb. 2023. **Model version** This is version 1 of the model. **Model type** LLaMA is an auto-regressive language model, based on the transformer architecture. The model comes in different sizes: 7B, 13B, 33B and 65B parameters. **Paper or resources for more information** More information can be found in the paper “LLaMA, Open and Efficient Foundation Language Models”, available at https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/. **Citations details** https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/ **License** Non-commercial bespoke license **Where to send questions or comments about the model** Questions and comments about LLaMA can be sent via the [GitHub repository](https://github.com/facebookresearch/llama) of the project , by opening an issue. ## Intended use **Primary intended uses** The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations. **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** LLaMA is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Factors **Relevant factors** One of the most relevant factors for which model performance may vary is which language is used. Although we included 20 languages in the training data, most of our dataset is made of English text, and we thus expect the model to perform better for English than other languages. Relatedly, it has been shown in previous studies that performance might vary for different dialects, and we expect that it will be the case for our model. **Evaluation factors** As our model is trained on data from the Web, we expect that it reflects biases from this source. We thus evaluated on RAI datasets to measure biases exhibited by the model for gender, religion, race, sexual orientation, age, nationality, disability, physical appearance and socio-economic status. We also measure the toxicity of model generations, depending on the toxicity of the context used to prompt the model. ## Metrics **Model performance measures** We use the following measure to evaluate the model: - Accuracy for common sense reasoning, reading comprehension, natural language understanding (MMLU), BIG-bench hard, WinoGender and CrowS-Pairs, - Exact match for question answering, - The toxicity score from Perspective API on RealToxicityPrompts. **Decision thresholds** Not applicable. **Approaches to uncertainty and variability** Due to the high computational requirements of training LLMs, we trained only one model of each size, and thus could not evaluate variability of pre-training. ## Evaluation datasets The model was evaluated on the following benchmarks: BoolQ, PIQA, SIQA, HellaSwag, WinoGrande, ARC, OpenBookQA, NaturalQuestions, TriviaQA, RACE, MMLU, BIG-bench hard, GSM8k, RealToxicityPrompts, WinoGender, CrowS-Pairs. ## Training dataset The model was trained using the following source of data: CCNet [67%], C4 [15%], GitHub [4.5%], Wikipedia [4.5%], Books [4.5%], ArXiv [2.5%], Stack Exchange[2%]. The Wikipedia and Books domains include data in the following languages: bg, ca, cs, da, de, en, es, fr, hr, hu, it, nl, pl, pt, ro, ru, sl, sr, sv, uk. See the paper for more details about the training set and corresponding preprocessing. ## Quantitative analysis Hyperparameters for the model architecture <table> <thead> <tr> <th >LLaMa</th> <th colspan=6>Model hyper parameters </th> </tr> <tr> <th>Number of parameters</th><th>dimension</th><th>n heads</th><th>n layers</th><th>Learn rate</th><th>Batch size</th><th>n tokens</th> </tr> </thead> <tbody> <tr> <th>7B</th> <th>4096</th> <th>32</th> <th>32</th> <th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>13B</th><th>5120</th><th>40</th><th>40</th><th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>33B</th><th>6656</th><th>52</th><th>60</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> <tr> <th>65B</th><th>8192</th><th>64</th><th>80</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> </tbody> </table> *Table 1 - Summary of LLama Model Hyperparameters* We present our results on eight standard common sense reasoning benchmarks in the table below. <table> <thead> <tr> <th>LLaMa</th> <th colspan=9>Reasoning tasks </th> </tr> <tr> <th>Number of parameters</th> <th>BoolQ</th><th>PIQA</th><th>SIQA</th><th>HellaSwag</th><th>WinoGrande</th><th>ARC-e</th><th>ARC-c</th><th>OBQA</th><th>COPA</th> </tr> </thead> <tbody> <tr> <th>7B</th><th>76.5</th><th>79.8</th><th>48.9</th><th>76.1</th><th>70.1</th><th>76.7</th><th>47.6</th><th>57.2</th><th>93 </th> <tr><th>13B</th><th>78.1</th><th>80.1</th><th>50.4</th><th>79.2</th><th>73</th><th>78.1</th><th>52.7</th><th>56.4</th><th>94 </th> <tr><th>33B</th><th>83.1</th><th>82.3</th><th>50.4</th><th>82.8</th><th>76</th><th>81.4</th><th>57.8</th><th>58.6</th><th>92 </th> <tr><th>65B</th><th>85.3</th><th>82.8</th><th>52.3</th><th>84.2</th><th>77</th><th>81.5</th><th>56</th><th>60.2</th><th>94</th></tr> </tbody> </table> *Table 2 - Summary of LLama Model Performance on Reasoning tasks* We present our results on bias in the table below. Note that lower value is better indicating lower bias. | No | Category | FAIR LLM | | --- | -------------------- | -------- | | 1 | Gender | 70.6 | | 2 | Religion | 79 | | 3 | Race/Color | 57 | | 4 | Sexual orientation | 81 | | 5 | Age | 70.1 | | 6 | Nationality | 64.2 | | 7 | Disability | 66.7 | | 8 | Physical appearance | 77.8 | | 9 | Socioeconomic status | 71.5 | | | LLaMA Average | 66.6 | *Table 3 - Summary bias of our model output* ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Mitigations** We filtered the data from the Web based on its proximity to Wikipedia text and references. For this, we used a Kneser-Ney language model and a fastText linear classifier. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** LLaMA is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
D3vil/DialoGPT-smaall-harrypottery
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: AY00/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
D3xter1922/electra-base-discriminator-finetuned-cola
[ "pytorch", "tensorboard", "electra", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
null
--- license: creativeml-openrail-m tags: - coreml - stable-diffusion - text-to-image --- # Core ML Converted Model: - This model was converted to [Core ML for use on Apple Silicon devices](https://github.com/apple/ml-stable-diffusion). Conversion instructions can be found [here](https://github.com/godly-devotion/MochiDiffusion/wiki/How-to-convert-ckpt-or-safetensors-files-to-Core-ML).<br> - Provide the model to an app such as Mochi Diffusion [Github](https://github.com/godly-devotion/MochiDiffusion) - [Discord](https://discord.gg/x2kartzxGv) to generate images.<br> - `split_einsum` version is compatible with all compute unit options including Neural Engine.<br> - `original` version is only compatible with CPU & GPU option.<br> - Custom resolution versions are tagged accordingly.<br> - `vae` tagged files have a vae embedded into the model.<br> - Descriptions are posted as-is from original model source. Not all features and/or results may be available in CoreML format.<br> - This model was converted with `vae-encoder` for i2i. # Note: Some models do not have the [unet split into chunks](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml). # Clarity: Source(s): [Hugging Face](https://huggingface.co/Schisim/Clarity) - [CivitAI](https://civitai.com/models/5062/clarity) <h1>Clarity 🤠</h1> <img src="https://huggingface.co/Schisim/Clarity/resolve/main/Images/00615-2392482435.png" width=512/> VAE NOT REQUIRED BUT RECOMENDED Model requires VAE - https://huggingface.co/stabilityai/sd-vae-ft-mse-original/tree/main File Structure for AUTOMATIC1111-webui : |──sd |----|──stable-diffusion-webui |----|----|──models |----|----|----|──VAE |----|----|----|----|──Put your VAE file here Merged Models A list of merged models can be found bellow in the description of the attached model version. Capabilities NSFW Photography SFW Photography is also possible, see "Trigger Words" bellow. Photorealistic 3D renders Emphasis on human anatomy Limitations Anything not listed above. This is model was created as a baseline to a general purpose model I'm working on. Stylized images and object images are possible, but require a little finesse to generate. Trigger Words This checkpoint does not contain any trigger words. However, placing some tags at the beginning of the prompts can heavily influence the generation. These tags include: "nsfw", "sfw", "erotica", and "nudity", "3d render", "cartoon" Note: For SFW generation, try adding sfw to your prompt and nsfw to your negative prompt. For NSFW generation, try adding either nsfw, erotica, or nudity to your prompt and sfw to your negative prompt. In general, this is more useful for generating sfw images. This concept also applies to 3rd render and cartoon. I recommend leaving 3rd render and cartoon both in your negative prompt for generating photographic images. Basic Prompt Guide This model heavily revolves around UnstablePhotorealv.5. This means that you can the tagging system for PhotoReal, although I would recommend using a combination of the PhotoReal comma system and more natural language prompting. Guide to prompting with PhotoReal - https://docs.google.com/document/d/1-DDIHVbsYfynTp_rsKLu4b2tSQgxtO5F6pNsNla12k0/edit#heading=h.3znysh7 Example prompt using commas and natural language: Positive A Professional Full Body Photo, of a beautiful young woman, clothed, standing indoors, Caucasian, toned physique, strawberry red hair, neutral expression Negative I recommend something simple like, deformed, bad anatomy, disfigured, missing limb, floating limbs, twisted, blurry, fused fingers, long neck, words, logo, text, mutated hands, mutated fingers Modify as needed. For example, adding 3d render, cartoon to your negative prompt will help generate photographic images. The prompts for this model are fairly flexible, experiment to find out what works best for you.
D3xter1922/electra-base-discriminator-finetuned-mnli
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-PLE-v0-1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 51.80 +/- 30.14 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
DARKVIP3R/DialoGPT-medium-Anakin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: openrail++ language: - en library_name: diffusers pipeline_tag: text-to-image inference: false ---
DLNLP/t5-small-finetuned-xsum
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1-3k results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
DTAI-KULeuven/mbert-corona-tweets-belgium-topics
[ "pytorch", "jax", "bert", "text-classification", "multilingual", "nl", "fr", "en", "arxiv:2104.09947", "transformers", "Dutch", "French", "English", "Tweets", "Topic classification" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
167
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 250.37 +/- 21.92 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DTAI-KULeuven/robbertje-1-gb-shuffled
[ "pytorch", "roberta", "fill-mask", "nl", "dataset:oscar", "dataset:oscar (NL)", "dataset:dbrd", "dataset:lassy-ud", "dataset:europarl-mono", "dataset:conll2002", "arxiv:2101.05716", "transformers", "Dutch", "Flemish", "RoBERTa", "RobBERT", "RobBERTje", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 728.00 +/- 321.14 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ElementBrawlerAI -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ElementBrawlerAI -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga ElementBrawlerAI ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
alexandrainst/da-subjectivivity-classification-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "dataset:DDSC/twitter-sent", "dataset:DDSC/europarl", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
846
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-pixelcopter-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 52.80 +/- 36.16 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Daltcamalea01/Camaleaodalt
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-02T07:09:26Z
from transformers import AutoModelForCausalLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("Ahmade/rick-and-morty-v2") model = AutoModelForCausalLM.from_pretrained("Ahmade/rick-and-morty-v2") def chat(model, tokenizer): print("type \"q\" to quit. Automatically quits after 5 messages") for step in range(5): message = input("MESSAGE: ") if message in ["", "q"]: # if the user doesn't wanna talk break # encode the new user input, add the eos_token and return a tensor in Pytorch new_user_input_ids = tokenizer.encode(message + tokenizer.eos_token, return_tensors='pt') # append the new user input tokens to the chat history bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids # generated a response while limiting the total chat history to 1000 tokens, chat_history_ids = model.generate( bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=3, do_sample=True, top_k=100, top_p=0.7, temperature = 0.8, ) # pretty print last ouput tokens from bot print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))) chat(model, tokenizer)
DamolaMack/Classyfied
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-02T07:12:50Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### natnatcat Dreambooth model trained by trappy with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
DavidAMcIntosh/DialoGPT-small-rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-cartpole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Davlan/bert-base-multilingual-cased-finetuned-swahili
[ "pytorch", "tf", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
67
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: jamesup/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Davlan/distilbert-base-multilingual-cased-masakhaner
[ "pytorch", "tf", "distilbert", "token-classification", "arxiv:2103.11811", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- language: - el tags: - text - language-modeling - bert - pretraining - greek-media - domain-adaptation pipeline_tag: fill-mask metrics: - accuracy model-index: - name: greek-media-bert-base-uncased results: [] --- # Greek Media BERT (uncased) This model is a domain-adapted version of [nlpaueb/bert-base-greek-uncased-v1](https://huggingface.co/nlpaueb/bert-base-greek-uncased-v1) on Greek media centric data. ## Model description Details will be updated soon. ## Intended uses & limitations Details will be updated soon. ## Training and evaluation data Details will be updated soon. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results Details will be updated soon. ### Framework versions - Transformers 4.21.0.dev0 - Pytorch 1.12.0+cu116 - Tensorflow 2.11.0 - Datasets 2.3.2 - Tokenizers 0.12.1 ### Citation The model has been officially released with the article "PIMA: Parameter-shared Intelligent Media Analytics Framework for Low Resource Language. Dimitrios Zaikis, Nikolaos Stylianou and Ioannis Vlahavas. In the Special Issue: New Techniques of Machine Learning and Deep Learning in Text Classification, Applied Sciences Journal. 2023" (https://www.mdpi.com/2174928). If you use the model, please cite the following: ```bibtex @Article{app13053265, AUTHOR = {Zaikis, Dimitrios and Stylianou, Nikolaos and Vlahavas, Ioannis}, TITLE = {PIMA: Parameter-Shared Intelligent Media Analytics Framework for Low Resource Languages}, JOURNAL = {Applied Sciences}, VOLUME = {13}, YEAR = {2023}, NUMBER = {5}, ARTICLE-NUMBER = {3265}, URL = {https://www.mdpi.com/2076-3417/13/5/3265}, ISSN = {2076-3417}, DOI = {10.3390/app13053265} } ```
Davlan/distilbert-base-multilingual-cased-ner-hrl
[ "pytorch", "tf", "distilbert", "token-classification", "transformers", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
123,856
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.48 +/- 2.77 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="frank1991/Taxi", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Davlan/mbart50-large-yor-eng-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-PixelCopter results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 24.60 +/- 9.71 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Davlan/xlm-roberta-base-finetuned-xhosa
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 12.18 +/- 4.94 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r mahmoud-mohey/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.8.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.8.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Davlan/xlm-roberta-base-finetuned-yoruba
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- pipeline_tag: image-classification datasets: - imagenet-1k ---
Dawit/DialogGPT-small-ironman
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-03-02T10:01:37Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="jorgelzn/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Dayout/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 2204.74 +/- 82.93 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Declan/Breitbart_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-03-02T10:48:36Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget library_name: ml-agents --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Write your model_id: Lakshya2k/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/Breitbart_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m tags: - coreml - stable-diffusion - text-to-image --- # Core ML Converted Model This model was converted to Core ML for use on Apple Silicon devices by following Apple's instructions [here](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml).<br> Provide the model to an app such as [Mochi Diffusion](https://github.com/godly-devotion/MochiDiffusion) to generate images. `split_einsum` versions are compatible with all compute unit.\ `original` versions are only compatible with CPU & GPU.\ <br> # Grapefruit Source: [CivitAI](https://civitai.com/models/2583) Grapefruit aims to be a hentai model with a bright, clear and less semirealistic anime style. Model is also good for SFW images. You can support me on [patreon](https://patreon.com/user?u=27247323&utm_medium=clipboard_copy&utm_source=copyLink&utm_campaign=creatorshare_creator&utm_content=join_link), where you can get also other models of me. My newly created [discord](https://discord.gg/zSR5FcYWWE), for everything related to anime models. Since 3.2, no extra vae needed. Thanks to my supporter tnpinc, merlinAI and Alessandro on patreon! _____________________________________________________ Using the model: Use mostly [danbooru](https://danbooru.donmai.us/) tags. For better promting on it, use this [LINK](https://aituts.com/novelai-anime-prompt-techniques/) or [LINK](https://lunarmimi.net/freebies/novelai-anime-girl-prompt-guide/#1basic). My negative ones are: worst quality, low quality, with extra monochrome, signature, text or logo when needed. <img width="768" src="https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/6a7e2780-49b6-44d0-d6d1-ae0e12403200/width=2048"> <img width="768" src="https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/b88b64c7-b4e8-4959-ecf3-205eb37c5200/width=2048"> <img width="768" src="https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/f34957e0-b6ec-4402-5034-b26e0b4ddb00/width=2048"> _____________________________________________________ Merged models for grapefruit are [AnythingV3](https://civitai.com/models/66/anything-v3), [ElysiumV2](https://huggingface.co/hesw23168/SD-Elysium-Model/), [AbyssOrangeMix](https://huggingface.co/WarriorMama777/OrangeMixs), a bit of [AbyssOrangeMix2](https://huggingface.co/WarriorMama777/OrangeMixs), [basilMix](https://huggingface.co/nuigurumi/basil_mix), my own lemon and gape out of anyGape. Lemon is a mix of anyGape and [basilMix](https://huggingface.co/nuigurumi/basil_mix). Note: [Mage](https://www.mage.space/) have my permission to used my model (no, get no money for it). Contact: ikenaaigrapefruit@gmail.com
Declan/CNN_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - en tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: hBERTv1_wnli results: - task: name: Text Classification type: text-classification dataset: name: GLUE WNLI type: glue config: wnli split: validation args: wnli metrics: - name: Accuracy type: accuracy value: 0.5633802816901409 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_wnli This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1](https://huggingface.co/gokuls/bert_12_layer_model_v1) on the GLUE WNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.6877 - Accuracy: 0.5634 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7359 | 1.0 | 3 | 0.7194 | 0.4366 | | 0.6989 | 2.0 | 6 | 0.6899 | 0.5634 | | 0.7031 | 3.0 | 9 | 0.7028 | 0.4366 | | 0.7012 | 4.0 | 12 | 0.6889 | 0.5634 | | 0.697 | 5.0 | 15 | 0.6894 | 0.5634 | | 0.6971 | 6.0 | 18 | 0.7015 | 0.4366 | | 0.7 | 7.0 | 21 | 0.6882 | 0.5634 | | 0.6928 | 8.0 | 24 | 0.6890 | 0.5634 | | 0.6932 | 9.0 | 27 | 0.6897 | 0.5634 | | 0.6954 | 10.0 | 30 | 0.6956 | 0.4366 | | 0.6962 | 11.0 | 33 | 0.6913 | 0.5634 | | 0.6956 | 12.0 | 36 | 0.6877 | 0.5634 | | 0.6973 | 13.0 | 39 | 0.6926 | 0.5070 | | 0.6978 | 14.0 | 42 | 0.6933 | 0.4930 | | 0.6945 | 15.0 | 45 | 0.6883 | 0.5634 | | 0.6974 | 16.0 | 48 | 0.6881 | 0.5634 | | 0.6936 | 17.0 | 51 | 0.6925 | 0.5211 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.14.0a0+410ce96 - Datasets 2.10.1 - Tokenizers 0.13.2
Declan/ChicagoTribune_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-03-02T11:32:08Z
--- language: en inference: false tags: - onnx - exbert license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ONNX export of distilbert-base-uncased This model is a distilled version of the [BERT base model](https://huggingface.co/bert-base-uncased). It was introduced in [this paper](https://arxiv.org/abs/1910.01108). The code for the distillation process can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation). This model is uncased: it does not make a difference between english and English. ## Model description DistilBERT is a transformers model, smaller and faster than BERT, which was pretrained on the same corpus in a self-supervised fashion, using the BERT base model as a teacher. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts using the BERT base model. More precisely, it was pretrained with three objectives: - Distillation loss: the model was trained to return the same probabilities as the BERT base model. - Masked language modeling (MLM): this is part of the original training loss of the BERT base model. When taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Cosine embedding loss: the model was also trained to generate hidden states as close as possible as the BERT base model. This way, the model learns the same inner representation of the English language than its teacher model, while being faster for inference or downstream tasks. ## Intended uses & limitations You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=distilbert) to look for fine-tuned versions on a task that interests you. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='distilbert-base-uncased') >>> unmasker("Hello I'm a [MASK] model.") [{'sequence': "[CLS] hello i'm a role model. [SEP]", 'score': 0.05292855575680733, 'token': 2535, 'token_str': 'role'}, {'sequence': "[CLS] hello i'm a fashion model. [SEP]", 'score': 0.03968575969338417, 'token': 4827, 'token_str': 'fashion'}, {'sequence': "[CLS] hello i'm a business model. [SEP]", 'score': 0.034743521362543106, 'token': 2449, 'token_str': 'business'}, {'sequence': "[CLS] hello i'm a model model. [SEP]", 'score': 0.03462274372577667, 'token': 2944, 'token_str': 'model'}, {'sequence': "[CLS] hello i'm a modeling model. [SEP]", 'score': 0.018145186826586723, 'token': 11643, 'token_str': 'modeling'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import DistilBertTokenizer, DistilBertModel tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') model = DistilBertModel.from_pretrained("distilbert-base-uncased") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import DistilBertTokenizer, TFDistilBertModel tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') model = TFDistilBertModel.from_pretrained("distilbert-base-uncased") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions. It also inherits some of [the bias of its teacher model](https://huggingface.co/bert-base-uncased#limitations-and-bias). ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='distilbert-base-uncased') >>> unmasker("The White man worked as a [MASK].") [{'sequence': '[CLS] the white man worked as a blacksmith. [SEP]', 'score': 0.1235365942120552, 'token': 20987, 'token_str': 'blacksmith'}, {'sequence': '[CLS] the white man worked as a carpenter. [SEP]', 'score': 0.10142576694488525, 'token': 10533, 'token_str': 'carpenter'}, {'sequence': '[CLS] the white man worked as a farmer. [SEP]', 'score': 0.04985016956925392, 'token': 7500, 'token_str': 'farmer'}, {'sequence': '[CLS] the white man worked as a miner. [SEP]', 'score': 0.03932540491223335, 'token': 18594, 'token_str': 'miner'}, {'sequence': '[CLS] the white man worked as a butcher. [SEP]', 'score': 0.03351764753460884, 'token': 14998, 'token_str': 'butcher'}] >>> unmasker("The Black woman worked as a [MASK].") [{'sequence': '[CLS] the black woman worked as a waitress. [SEP]', 'score': 0.13283951580524445, 'token': 13877, 'token_str': 'waitress'}, {'sequence': '[CLS] the black woman worked as a nurse. [SEP]', 'score': 0.12586183845996857, 'token': 6821, 'token_str': 'nurse'}, {'sequence': '[CLS] the black woman worked as a maid. [SEP]', 'score': 0.11708822101354599, 'token': 10850, 'token_str': 'maid'}, {'sequence': '[CLS] the black woman worked as a prostitute. [SEP]', 'score': 0.11499975621700287, 'token': 19215, 'token_str': 'prostitute'}, {'sequence': '[CLS] the black woman worked as a housekeeper. [SEP]', 'score': 0.04722772538661957, 'token': 22583, 'token_str': 'housekeeper'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data DistilBERT pretrained on the same data as BERT, which is [BookCorpus](https://yknzhu.wixsite.com/mbweb), a dataset consisting of 11,038 unpublished books and [English Wikipedia](https://en.wikipedia.org/wiki/English_Wikipedia) (excluding lists, tables and headers). ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence A [SEP] Sentence B [SEP] ``` With probability 0.5, sentence A and sentence B correspond to two consecutive sentences in the original corpus and in the other cases, it's another random sentence in the corpus. Note that what is considered a sentence here is a consecutive span of text usually longer than a single sentence. The only constrain is that the result with the two "sentences" has a combined length of less than 512 tokens. The details of the masking procedure for each sentence are the following: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by `[MASK]`. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. ### Pretraining The model was trained on 8 16 GB V100 for 90 hours. See the [training code](https://github.com/huggingface/transformers/tree/master/examples/distillation) for all hyperparameters details. ## Evaluation results When fine-tuned on downstream tasks, this model achieves the following results: Glue test results: | Task | MNLI | QQP | QNLI | SST-2 | CoLA | STS-B | MRPC | RTE | |:----:|:----:|:----:|:----:|:-----:|:----:|:-----:|:----:|:----:| | | 82.2 | 88.5 | 89.2 | 91.3 | 51.3 | 85.8 | 87.5 | 59.9 | ### BibTeX entry and citation info ```bibtex @article{Sanh2019DistilBERTAD, title={DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter}, author={Victor Sanh and Lysandre Debut and Julien Chaumond and Thomas Wolf}, journal={ArXiv}, year={2019}, volume={abs/1910.01108} } ``` <a href="https://huggingface.co/exbert/?model=distilbert-base-uncased"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
Declan/ChicagoTribune_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: arenbeglaryan/PyramidsRND 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/FoxNews_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1352.85 +/- 32.73 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```