modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
ChukSamuels/DialoGPT-small-Dr.FauciBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1127.86 +/- 338.12 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Chun/DialoGPT-large-dailydialog
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
# Model summary Train flan-T5-large on alpaca dataset with LoRA # training * torch==2.0.0+cu117 * transformers==4.28.0.dev0 * 8 x V100 32G # How to use ```python import transformers from peft import PeftModel base_model = transformers.AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-large") peft_model = PeftModel.from_pretrained("zirui3/flan-t5-large-alpaca") inputs = tokenizer("Any instruction that you like.", return_tensors="pt") outputs = peft_model.generate(**inputs, max_length=128, do_sample=True) print(tokenizer.batch_decode(outputs, skip_special_tokens=True) ```
Chun/DialoGPT-small-dailydialog
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - ru library_name: transformers tags: - chatml ---
Chun/w-en2zh-mtm
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: detr_algae_0.25r_v0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # detr_algae_0.25r_v0 This model is a fine-tuned version of [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.1346 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 200 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 5.1721 | 1.0 | 69 | 3.3857 | | 3.3617 | 2.0 | 138 | 2.9312 | | 3.0175 | 3.0 | 207 | 2.6671 | | 2.9799 | 4.0 | 276 | 2.7044 | | 2.835 | 5.0 | 345 | 2.7200 | | 2.7184 | 6.0 | 414 | 2.4760 | | 2.7039 | 7.0 | 483 | 2.5395 | | 2.6316 | 8.0 | 552 | 2.4040 | | 2.683 | 9.0 | 621 | 2.4622 | | 2.6908 | 10.0 | 690 | 2.5163 | | 2.5706 | 11.0 | 759 | 2.2428 | | 2.5066 | 12.0 | 828 | 2.3053 | | 2.5146 | 13.0 | 897 | 2.2936 | | 2.446 | 14.0 | 966 | 2.1469 | | 2.3444 | 15.0 | 1035 | 2.2260 | | 2.3997 | 16.0 | 1104 | 2.2093 | | 2.3823 | 17.0 | 1173 | 2.2598 | | 2.3741 | 18.0 | 1242 | 2.1737 | | 2.3437 | 19.0 | 1311 | 2.1670 | | 2.3134 | 20.0 | 1380 | 2.2041 | | 2.3636 | 21.0 | 1449 | 2.1305 | | 2.2936 | 22.0 | 1518 | 2.0564 | | 2.2117 | 23.0 | 1587 | 2.0024 | | 2.1955 | 24.0 | 1656 | 2.0013 | | 2.1724 | 25.0 | 1725 | 1.9785 | | 2.1765 | 26.0 | 1794 | 2.0120 | | 2.1489 | 27.0 | 1863 | 1.9891 | | 2.157 | 28.0 | 1932 | 1.9267 | | 2.1454 | 29.0 | 2001 | 1.9686 | | 2.1545 | 30.0 | 2070 | 2.0207 | | 2.0847 | 31.0 | 2139 | 1.8773 | | 2.0731 | 32.0 | 2208 | 1.8358 | | 2.0717 | 33.0 | 2277 | 1.9076 | | 2.0616 | 34.0 | 2346 | 1.9287 | | 2.1044 | 35.0 | 2415 | 1.8625 | | 2.04 | 36.0 | 2484 | 1.8683 | | 2.0489 | 37.0 | 2553 | 1.8393 | | 2.0562 | 38.0 | 2622 | 1.9013 | | 2.0433 | 39.0 | 2691 | 1.8472 | | 2.0093 | 40.0 | 2760 | 1.8122 | | 2.0149 | 41.0 | 2829 | 1.7668 | | 2.049 | 42.0 | 2898 | 1.8419 | | 1.9992 | 43.0 | 2967 | 1.8292 | | 1.9494 | 44.0 | 3036 | 1.8329 | | 2.0128 | 45.0 | 3105 | 1.8827 | | 2.0712 | 46.0 | 3174 | 1.8425 | | 1.9346 | 47.0 | 3243 | 1.8509 | | 1.899 | 48.0 | 3312 | 1.7352 | | 1.9576 | 49.0 | 3381 | 1.7825 | | 1.9877 | 50.0 | 3450 | 1.7996 | | 1.9176 | 51.0 | 3519 | 1.7754 | | 1.9217 | 52.0 | 3588 | 1.7418 | | 1.9365 | 53.0 | 3657 | 1.7711 | | 1.9032 | 54.0 | 3726 | 1.7001 | | 1.8404 | 55.0 | 3795 | 1.6628 | | 1.8447 | 56.0 | 3864 | 1.6939 | | 1.8418 | 57.0 | 3933 | 1.7099 | | 1.7911 | 58.0 | 4002 | 1.6751 | | 1.7899 | 59.0 | 4071 | 1.7471 | | 1.8368 | 60.0 | 4140 | 1.7111 | | 1.853 | 61.0 | 4209 | 1.7785 | | 1.88 | 62.0 | 4278 | 1.7709 | | 1.8734 | 63.0 | 4347 | 1.6597 | | 1.8107 | 64.0 | 4416 | 1.6720 | | 1.8329 | 65.0 | 4485 | 1.6868 | | 1.8129 | 66.0 | 4554 | 1.6611 | | 1.7972 | 67.0 | 4623 | 1.6452 | | 1.7828 | 68.0 | 4692 | 1.6538 | | 1.7653 | 69.0 | 4761 | 1.6246 | | 1.7343 | 70.0 | 4830 | 1.5364 | | 1.6567 | 71.0 | 4899 | 1.5308 | | 1.6873 | 72.0 | 4968 | 1.5473 | | 1.7233 | 73.0 | 5037 | 1.6096 | | 1.6934 | 74.0 | 5106 | 1.5679 | | 1.7263 | 75.0 | 5175 | 1.6542 | | 1.7109 | 76.0 | 5244 | 1.5674 | | 1.6977 | 77.0 | 5313 | 1.5367 | | 1.6761 | 78.0 | 5382 | 1.5456 | | 1.69 | 79.0 | 5451 | 1.5624 | | 1.7241 | 80.0 | 5520 | 1.5067 | | 1.643 | 81.0 | 5589 | 1.5723 | | 1.6358 | 82.0 | 5658 | 1.5349 | | 1.6511 | 83.0 | 5727 | 1.5321 | | 1.6932 | 84.0 | 5796 | 1.5640 | | 1.7214 | 85.0 | 5865 | 1.5118 | | 1.6988 | 86.0 | 5934 | 1.5471 | | 1.6697 | 87.0 | 6003 | 1.5650 | | 1.6828 | 88.0 | 6072 | 1.5087 | | 1.7211 | 89.0 | 6141 | 1.5302 | | 1.6195 | 90.0 | 6210 | 1.5018 | | 1.5924 | 91.0 | 6279 | 1.4886 | | 1.5746 | 92.0 | 6348 | 1.4365 | | 1.6277 | 93.0 | 6417 | 1.4995 | | 1.5936 | 94.0 | 6486 | 1.4569 | | 1.6132 | 95.0 | 6555 | 1.4982 | | 1.5637 | 96.0 | 6624 | 1.4032 | | 1.5502 | 97.0 | 6693 | 1.4388 | | 1.5535 | 98.0 | 6762 | 1.4101 | | 1.5306 | 99.0 | 6831 | 1.4048 | | 1.5425 | 100.0 | 6900 | 1.4133 | | 1.529 | 101.0 | 6969 | 1.4244 | | 1.5659 | 102.0 | 7038 | 1.4268 | | 1.5234 | 103.0 | 7107 | 1.3829 | | 1.498 | 104.0 | 7176 | 1.3884 | | 1.4838 | 105.0 | 7245 | 1.3627 | | 1.4774 | 106.0 | 7314 | 1.3501 | | 1.479 | 107.0 | 7383 | 1.3738 | | 1.475 | 108.0 | 7452 | 1.3537 | | 1.4592 | 109.0 | 7521 | 1.3621 | | 1.5015 | 110.0 | 7590 | 1.4022 | | 1.4948 | 111.0 | 7659 | 1.4069 | | 1.4875 | 112.0 | 7728 | 1.3325 | | 1.437 | 113.0 | 7797 | 1.3080 | | 1.4276 | 114.0 | 7866 | 1.3100 | | 1.4489 | 115.0 | 7935 | 1.3712 | | 1.4951 | 116.0 | 8004 | 1.4256 | | 1.4585 | 117.0 | 8073 | 1.3720 | | 1.4736 | 118.0 | 8142 | 1.4397 | | 1.4664 | 119.0 | 8211 | 1.4036 | | 1.4569 | 120.0 | 8280 | 1.3672 | | 1.4627 | 121.0 | 8349 | 1.3809 | | 1.4924 | 122.0 | 8418 | 1.3420 | | 1.4487 | 123.0 | 8487 | 1.3142 | | 1.4341 | 124.0 | 8556 | 1.3238 | | 1.4025 | 125.0 | 8625 | 1.2891 | | 1.4013 | 126.0 | 8694 | 1.3140 | | 1.3909 | 127.0 | 8763 | 1.3329 | | 1.4305 | 128.0 | 8832 | 1.3489 | | 1.3771 | 129.0 | 8901 | 1.3640 | | 1.4442 | 130.0 | 8970 | 1.3695 | | 1.4272 | 131.0 | 9039 | 1.3752 | | 1.4087 | 132.0 | 9108 | 1.3145 | | 1.3648 | 133.0 | 9177 | 1.3222 | | 1.3981 | 134.0 | 9246 | 1.3483 | | 1.4116 | 135.0 | 9315 | 1.2986 | | 1.4117 | 136.0 | 9384 | 1.3789 | | 1.4416 | 137.0 | 9453 | 1.3491 | | 1.3753 | 138.0 | 9522 | 1.3026 | | 1.3721 | 139.0 | 9591 | 1.3292 | | 1.3951 | 140.0 | 9660 | 1.2946 | | 1.3406 | 141.0 | 9729 | 1.2646 | | 1.3336 | 142.0 | 9798 | 1.3247 | | 1.3182 | 143.0 | 9867 | 1.2960 | | 1.3293 | 144.0 | 9936 | 1.2845 | | 1.3242 | 145.0 | 10005 | 1.2849 | | 1.3171 | 146.0 | 10074 | 1.2662 | | 1.3193 | 147.0 | 10143 | 1.2827 | | 1.3298 | 148.0 | 10212 | 1.2776 | | 1.3014 | 149.0 | 10281 | 1.2603 | | 1.3419 | 150.0 | 10350 | 1.2484 | | 1.3385 | 151.0 | 10419 | 1.2477 | | 1.3029 | 152.0 | 10488 | 1.2408 | | 1.2803 | 153.0 | 10557 | 1.2191 | | 1.2562 | 154.0 | 10626 | 1.2264 | | 1.2667 | 155.0 | 10695 | 1.2250 | | 1.2669 | 156.0 | 10764 | 1.2095 | | 1.2643 | 157.0 | 10833 | 1.1964 | | 1.2548 | 158.0 | 10902 | 1.2019 | | 1.262 | 159.0 | 10971 | 1.2393 | | 1.2596 | 160.0 | 11040 | 1.2018 | | 1.2476 | 161.0 | 11109 | 1.2308 | | 1.2755 | 162.0 | 11178 | 1.2103 | | 1.237 | 163.0 | 11247 | 1.2020 | | 1.226 | 164.0 | 11316 | 1.2173 | | 1.2278 | 165.0 | 11385 | 1.1912 | | 1.244 | 166.0 | 11454 | 1.2036 | | 1.2467 | 167.0 | 11523 | 1.1831 | | 1.2063 | 168.0 | 11592 | 1.1720 | | 1.2141 | 169.0 | 11661 | 1.1580 | | 1.224 | 170.0 | 11730 | 1.1897 | | 1.2171 | 171.0 | 11799 | 1.1546 | | 1.2151 | 172.0 | 11868 | 1.1621 | | 1.1623 | 173.0 | 11937 | 1.1899 | | 1.2037 | 174.0 | 12006 | 1.1649 | | 1.1741 | 175.0 | 12075 | 1.1794 | | 1.1921 | 176.0 | 12144 | 1.1584 | | 1.1811 | 177.0 | 12213 | 1.1589 | | 1.1956 | 178.0 | 12282 | 1.1555 | | 1.1703 | 179.0 | 12351 | 1.1510 | | 1.1727 | 180.0 | 12420 | 1.1363 | | 1.1747 | 181.0 | 12489 | 1.1570 | | 1.1524 | 182.0 | 12558 | 1.1655 | | 1.1645 | 183.0 | 12627 | 1.1324 | | 1.1549 | 184.0 | 12696 | 1.1529 | | 1.1432 | 185.0 | 12765 | 1.1396 | | 1.1552 | 186.0 | 12834 | 1.1406 | | 1.1568 | 187.0 | 12903 | 1.1585 | | 1.1407 | 188.0 | 12972 | 1.1417 | | 1.1419 | 189.0 | 13041 | 1.1542 | | 1.1451 | 190.0 | 13110 | 1.1330 | | 1.1421 | 191.0 | 13179 | 1.1309 | | 1.1283 | 192.0 | 13248 | 1.1271 | | 1.1528 | 193.0 | 13317 | 1.1195 | | 1.1367 | 194.0 | 13386 | 1.1300 | | 1.1407 | 195.0 | 13455 | 1.1144 | | 1.1456 | 196.0 | 13524 | 1.1584 | | 1.1072 | 197.0 | 13593 | 1.1334 | | 1.1081 | 198.0 | 13662 | 1.1378 | | 1.1205 | 199.0 | 13731 | 1.1327 | | 1.1275 | 200.0 | 13800 | 1.1346 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
Chungu424/DATA
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m language: - en pipeline_tag: text2text-generation ---
Cilan/dalle-knockoff
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -127.66 +/- 43.37 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 50000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'DoctorRobotnik/ppo-CartPole-v1' 'batch_size': 512 'minibatch_size': 128} ```
Cloudy/DialoGPT-CJ-large
[ "pytorch", "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
# Vocabulary Trimmed [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg): `vocabtrimmer/mt5-small-squad-qg-trimmed-en-60000` This model is a trimmed version of [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qg | vocabtrimmer/mt5-small-squad-qg-trimmed-en-60000 | |:---------------------------|:--------------------------|:---------------------------------------------------| | parameter_size_full | 300,165,504 | 105,504,128 | | parameter_size_embedding | 256,103,424 | 61,442,048 | | vocab_size | 250,101 | 60,002 | | compression_rate_full | 100.0 | 35.15 | | compression_rate_embedding | 100.0 | 23.99 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 60000 | 2 |
CoachCarter/distilbert-base-uncased
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: mariav/helsinki-opus-de-en-fine-tuned-wmt16 results: [] datasets: - wmt16 language: - de - en metrics: - bleu pipeline_tag: translation --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # mariav/helsinki-opus-de-en-fine-tuned-wmt16 This model is a fine-tuned version of [Helsinki-NLP/opus-mt-de-en](https://huggingface.co/Helsinki-NLP/opus-mt-de-en) on the wmt16. It achieves the following results on the evaluation set: - Train Loss: 1.0077 - Validation Loss: 1.4381 - Epoch: 4 ## Model description This model is a fine-tuned version of Helsinki-NLP/opus-mt-de-en with the dataset wmt16 for the pair of languages german-english. A tutorial for this task is available in the files. ## Intended uses & limitations Limitations: scholar use. ## Training and evaluation data Training done with keras from Transformers. Evaluation with Bleu score. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-05, 'decay_steps': 1245, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 1.5115 | 1.4061 | 0 | | 1.2931 | 1.4111 | 1 | | 1.1590 | 1.4200 | 2 | | 1.0644 | 1.4324 | 3 | | 1.0077 | 1.4381 | 4 | ### Framework versions - Transformers 4.27.4 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.2
CodeNinja1126/koelectra-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-itquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-itquad-qa): `vocabtrimmer/mbart-large-cc25-itquad-qa-trimmed-it-10000` This model is a trimmed version of [lmqg/mbart-large-cc25-itquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-itquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-itquad-qa | vocabtrimmer/mbart-large-cc25-itquad-qa-trimmed-it-10000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 365,068,288 | | parameter_size_embedding | 512,057,344 | 20,488,192 | | vocab_size | 250,028 | 10,004 | | compression_rate_full | 100.0 | 59.76 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 10000 | 2 |
Venkatakrishnan-Ramesh/Text_gen
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T10:23:25Z
--- license: mit library_name: sklearn tags: - sklearn - skops - tabular-classification model_format: skops model_file: examplej.skops widget: structuredData: 'Unnamed: 32': - .nan - .nan - .nan area_mean: - 481.9 - 1130.0 - 748.9 area_se: - 30.29 - 96.05 - 48.31 area_worst: - 677.9 - 1866.0 - 1156.0 compactness_mean: - 0.1058 - 0.1029 - 0.1223 compactness_se: - 0.01911 - 0.01652 - 0.01484 compactness_worst: - 0.2378 - 0.2336 - 0.2394 concave points_mean: - 0.03821 - 0.07951 - 0.08087 concave points_se: - 0.01037 - 0.0137 - 0.01093 concave points_worst: - 0.1015 - 0.1789 - 0.1514 concavity_mean: - 0.08005 - 0.108 - 0.1466 concavity_se: - 0.02701 - 0.02269 - 0.02813 concavity_worst: - 0.2671 - 0.2687 - 0.3791 fractal_dimension_mean: - 0.06373 - 0.05461 - 0.05796 fractal_dimension_se: - 0.003586 - 0.001698 - 0.002461 fractal_dimension_worst: - 0.0875 - 0.06589 - 0.08019 id: - 87930 - 859575 - 8670 perimeter_mean: - 81.09 - 123.6 - 101.7 perimeter_se: - 2.497 - 5.486 - 3.094 perimeter_worst: - 96.05 - 165.9 - 124.9 radius_mean: - 12.47 - 18.94 - 15.46 radius_se: - 0.3961 - 0.7888 - 0.4743 radius_worst: - 14.97 - 24.86 - 19.26 smoothness_mean: - 0.09965 - 0.09009 - 0.1092 smoothness_se: - 0.006953 - 0.004444 - 0.00624 smoothness_worst: - 0.1426 - 0.1193 - 0.1546 symmetry_mean: - 0.1925 - 0.1582 - 0.1931 symmetry_se: - 0.01782 - 0.01386 - 0.01397 symmetry_worst: - 0.3014 - 0.2551 - 0.2837 texture_mean: - 18.6 - 21.31 - 19.48 texture_se: - 1.044 - 0.7975 - 0.7859 texture_worst: - 24.64 - 26.58 - 26.0 --- # Model description [More Information Needed] ## Intended uses & limitations This model is not ready to be used in production (J). ## Training Procedure ### Hyperparameters The model is trained with below hyperparameters. <details> <summary> Click to expand </summary> | Hyperparameter | Value | |------------------------------|-----------------------------------------------------------------------------------------------| | memory | | | steps | [('imputer', SimpleImputer()), ('scaler', StandardScaler()), ('model', LogisticRegression())] | | verbose | False | | imputer | SimpleImputer() | | scaler | StandardScaler() | | model | LogisticRegression() | | imputer__add_indicator | False | | imputer__copy | True | | imputer__fill_value | | | imputer__keep_empty_features | False | | imputer__missing_values | nan | | imputer__strategy | mean | | imputer__verbose | deprecated | | scaler__copy | True | | scaler__with_mean | True | | scaler__with_std | True | | model__C | 1.0 | | model__class_weight | | | model__dual | False | | model__fit_intercept | True | | model__intercept_scaling | 1 | | model__l1_ratio | | | model__max_iter | 100 | | model__multi_class | auto | | model__n_jobs | | | model__penalty | l2 | | model__random_state | | | model__solver | lbfgs | | model__tol | 0.0001 | | model__verbose | 0 | | model__warm_start | False | </details> ### Model Plot The model plot is below. <style>#sk-container-id-6 {color: black;background-color: white;}#sk-container-id-6 pre{padding: 0;}#sk-container-id-6 div.sk-toggleable {background-color: white;}#sk-container-id-6 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-6 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-6 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-6 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-6 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-6 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-6 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-6 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-6 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-6 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-6 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-6 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-6 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-6 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-6 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-6 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-6 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-6 div.sk-item {position: relative;z-index: 1;}#sk-container-id-6 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-6 div.sk-item::before, #sk-container-id-6 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-6 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-6 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-6 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-6 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-6 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-6 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-6 div.sk-label-container {text-align: center;}#sk-container-id-6 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-6 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-6" class="sk-top-container" style="overflow: auto;"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[(&#x27;imputer&#x27;, SimpleImputer()), (&#x27;scaler&#x27;, StandardScaler()),(&#x27;model&#x27;, LogisticRegression())])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-21" type="checkbox" ><label for="sk-estimator-id-21" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[(&#x27;imputer&#x27;, SimpleImputer()), (&#x27;scaler&#x27;, StandardScaler()),(&#x27;model&#x27;, LogisticRegression())])</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-22" type="checkbox" ><label for="sk-estimator-id-22" class="sk-toggleable__label sk-toggleable__label-arrow">SimpleImputer</label><div class="sk-toggleable__content"><pre>SimpleImputer()</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-23" type="checkbox" ><label for="sk-estimator-id-23" class="sk-toggleable__label sk-toggleable__label-arrow">StandardScaler</label><div class="sk-toggleable__content"><pre>StandardScaler()</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-24" type="checkbox" ><label for="sk-estimator-id-24" class="sk-toggleable__label sk-toggleable__label-arrow">LogisticRegression</label><div class="sk-toggleable__content"><pre>LogisticRegression()</pre></div></div></div></div></div></div></div> ## Evaluation Results [More Information Needed] # How to Get Started with the Model [More Information Needed] # Model Card Authors This model card is written by following authors: [More Information Needed] # Model Card Contact You can contact the model card authors through following channels: [More Information Needed] # Citation Below you can find information related to citation. **BibTeX:** ``` [More Information Needed] ```
CoffeeAddict93/gpt1-modest-proposal
[ "pytorch", "openai-gpt", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "OpenAIGPTLMHeadModel" ], "model_type": "openai-gpt", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2023-03-30T10:25:44Z
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Find your model_id: gnonguichi/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CoffeeAddict93/gpt2-medium-call-of-the-wild
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
# Vocabulary Trimmed [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg): `vocabtrimmer/mt5-small-squad-qg-trimmed-en-90000` This model is a trimmed version of [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qg | vocabtrimmer/mt5-small-squad-qg-trimmed-en-90000 | |:---------------------------|:--------------------------|:---------------------------------------------------| | parameter_size_full | 300,165,504 | 136,224,128 | | parameter_size_embedding | 256,103,424 | 92,162,048 | | vocab_size | 250,101 | 90,002 | | compression_rate_full | 100.0 | 45.38 | | compression_rate_embedding | 100.0 | 35.99 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 90000 | 2 |
Coldestadam/Breakout_Mentors_SpongeBob_Model
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 62.22 +/- 38.23 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 1000000 'learning_rate': 0.0006 'num_envs': 64 'num_steps': 1024 'anneal_lr': True 'gae': True 'gamma': 0.98 'gae_lambda': 0.98 'num_minibatches': 64 'update_epochs': 64 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'yumingyi/lunarlander-v2-unit8-2' 'batch_size': 65536 'minibatch_size': 1024} ```
ComCom/gpt2-medium
[ "pytorch", "gpt2", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "GPT2Model" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-itquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-itquad-qa): `vocabtrimmer/mbart-large-cc25-itquad-qa-trimmed-it-15000` This model is a trimmed version of [lmqg/mbart-large-cc25-itquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-itquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-itquad-qa | vocabtrimmer/mbart-large-cc25-itquad-qa-trimmed-it-15000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 370,188,288 | | parameter_size_embedding | 512,057,344 | 30,728,192 | | vocab_size | 250,028 | 15,004 | | compression_rate_full | 100.0 | 60.6 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 15000 | 2 |
ComCom/gpt2
[ "pytorch", "gpt2", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "GPT2Model" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image datasets: embed/EasyNegative --- Civitai: https://civitai.com/models/26622 ## Descriptions This is a fine-tuned variant derived from [Animix](https://huggingface.co/OedoSoldier/animix), trained with selected beautiful anime images. It gives you more delicate anime-like illustrations and a lesser AI feeling. This image shows a comparison between three of my mix models: [Aniflatmix](https://huggingface.co/OedoSoldier/aniflatmix), [Animix](https://huggingface.co/OedoSoldier/animix), and Ambientmix (this model). ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/24.png) ## Recommend settings: - VAE: Orangemix / NAI - Sampler: DPM++ 2M Karras - Sampling steps: 20 - Negative embedding: [EasyNegative](https://civitai.com/models/7808)、[badhandv4](https://civitai.com/models/16993/badhandv4-animeillustdiffusion) Highres fix is also recommended. ## Samples ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/19.png) ``` masterpiece, best quality, 1girl, best quality, ultra high res, 1girl, hatsune miku, full body, scenery, smile, ocean, sunset, city, barefoot, footprints, sand, white dress, from back, looking at viewer, looking back, silhouette, wading, standing on liquid Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 703183065, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/6.png) ``` (from below dutch angle:1.2),sunflower field scenery, blue sky,rainbow,water drop, cutie girl wearing simple white dress,straw hat on head, floating hair,wind,smile, (depth of field:1.3),(lens flare:1.1), Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 2057351628, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/20.png) ``` masterpiece, best quality, 1girl, solo, light smile, mountain, lake, meadow, panorama, jacket, kneehighs, boots Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 2614481802, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/21.png) ``` masterpiece, best quality, 1boy, flat color, limited palette, low contrast, (ligne claire), long straight black hair, looking away, standing. smoke, night sky, city, sunset, sky scrapers, bridge, depth of field, black, red, orange, brown, autumn, haze Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 4271450578, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/23.png) ``` amane kanata, 1girl, angel wings, star halo, solo, angel, feathered wings, wings, virtual youtuber, polearm, gloves, thighhighs, blue thighhighs, multicolored hair, weapon, halo, holding, smile, blue hair, purple eyes, castle, white wings, holding polearm, skirt, feathers, blue skirt, streaked hair, short hair, holding weapon, spear, grey hair, open mouth, bangs, colored inner hair, black gloves, looking at viewer, :d, hair ornament, frills, pink hair, outdoors, pleated skirt, sky, hair over one eye, cloud, single hair intake, shirt, day, tower, frilled skirt, (masterpiece,best quality) Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1135798785, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/7.png) ``` (masterpiece:1.2), best quality, spring \(season\) illustration, beautiful detailed elf girl face, (cherryblossom patels:1.2), atmospheric perspective, pose on circle bass, straight-on, huge cherryblossom park, tree of (cherryblossom:1.3), looking at another, pointng at away, large breasts, school uniform, pointy ears, deformed, aged down, platinum blonde hair, twintails, elf, Snow White, tone shading, glowing sunlight Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 2278280509, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/2.png) ``` 1girl,young,japanese idol,stare at,medium hair,wavy hair,brown hair,(policewoman),police hat,police jacket,blue jaket,black belt,mini skirt,blue skirt,(handcuffs),spread legs, Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3849220177, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/3.png) ``` (masterpiece)+,(best quality)+,(ultra detailed)+ vector art,vivid tone,thick outline,limited palette,1girl soro, cowboy shot, looking at viewer, dutch angle, blue hair, short hair, angel, (Angel wings on the back)++, morning, grass field, Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 367964160, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/4.png) ``` (masterpiece, best quality:1.2), (extremely detailed CG unity 8k wallpaper:1.2), (illustration:1.2), (3d:1.2), (realistic body:1.1), ((awesome detailed)), [hyper detailed], [ultra detailed skin], ((many layers)), beautiful hair, beautiful eyes, (glistening shining skin:1.4), intense shadow, (official art:1.0), (sketch:1.0), masterpiece, best quality, illustration, 10000000k, ((best quality, illustration)), solo (cool beauty:1.3) (kawaii:1.3) (girl:1.1)(standing fighting stance arm up holding giant sword long sword in hand:1.3) (upturned sharp:1.3) eyes (glaring at viewer:1.2) (upper body:1.3) (long silver hair:1.3) (medium breasts) (silver shoulder armor gauntlets:1.2) (detailed black victorian dress black dragon horns:1.4) (yellow eyes :1.2) (glaring:1.2) (disdain:1.3) (:< angry:1.1) (floating hair floating debris floating dust stones:1.3) ( shining sword blue flame effects flare ray shining sword winds :1.4) Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 2331059072, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/5.png) ``` (best quality,masterpiece:1.2),(tarot card:1.2),1 person,profile picture,(card borders),(kandinsky),stained glass, Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1874572449, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/8.png) ``` masterpiece, best quality, ultra-detailed, illustration, 1girl, solo, fantasy, flying, broom, night sky, outdoors, magic, spells, moon, stars, clouds, wind, hair, cape, hat, boots, broomstick, glowing, mysterious, enchanting, whimsical, playful, adventurous, freedom, wonder, imagination, determination, skill, speed, movement, energy, realism, naturalistic, figurative, representational, beauty, fantasy culture, mythology, fairy tales, folklore, legends, witches, wizards, magical creatures, fantasy worlds, composition, scale, foreground, middle ground, background, perspective, light, color, texture, detail, beauty, wonder. Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 2745491572, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/9.png) ``` masterpiece, best quality, ultra-detailed, illustration, 1girl, solo, outdoors, camping, night, mountains, nature, stars, moon, tent, twin ponytails, green eyes, cheerful, happy, backpack, sleeping bag, camping stove, water bottle, mountain boots, gloves, sweater, hat, flashlight, forest, rocks, river, wood, smoke, shadows, contrast, clear sky, constellations, Milky Way, peaceful, serene, quiet, tranquil, remote, secluded, adventurous, exploration, escape, independence, survival, resourcefulness, challenge, perseverance, stamina, endurance, observation, intuition, adaptability, creativity, imagination, artistry, inspiration, beauty, awe, wonder, gratitude, appreciation, relaxation, enjoyment, rejuvenation, mindfulness, awareness, connection, harmony, balance, texture, detail, realism, depth, perspective, composition, color, light, shadow, reflection, refraction, tone, contrast, foreground, middle ground, background, naturalistic, figurative, representational, impressionistic, expressionistic, abstract, innovative, experimental, unique Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3799140628, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/10.png) ``` (masterpiece, best quality, ultra-detailed, best shadow), (detailed background,dark fantasy), (beautiful detailed face), high contrast, (best illumination, an extremely delicate and beautiful), ((cinematic light)), colorful, hyper detail, dramatic light, intricate details, (1 girl, solo,black hair, sharp face,low twintails,red eyes, hair between eyes,dynamic angle), blood splatter, swirling black light around the character, depth of field,black light particles,(broken glass),magic circle, Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 2698586958, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/11.png) ``` (best quality:1.4), highres, masterpiece, 1girl, (light purple hair:1.1), purple eyes, medium breasts, hair ornament, (black|white maid dress:1.2), detached sleeves, black pantyhose, blush, house, detailed face Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1511620501, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/12.png) ``` masterpiece, best quality, 1girl, solo, night, colorful, cityscape, cyberpunk, light smile, city pop, vector illustration Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3779026953, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/13.png) ``` masterpiece, best quality, 1girl, solo, black hair, medium hair, red eyes, blunt bangs, petite, expressionless, red skirt, white legwear, thighhighs, suspender skirt, white shirt, mary janes, night, dark, shadow, ligne claire, lineart Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 207774017, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/14.png) ``` ((masterpiece,best quality))1girl, solo, black skirt, blue eyes, electric guitar, guitar, headphones, holding, holding plectrum, instrument, long hair, , music, one side up, pink hair, playing guiter, pleated skirt, black shirt, indoors Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 4039374284, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/15.png) ``` ((masterpiece,best quality)) amazoness, dark skin, gold tattoos, black hair, 1girl, happy, the whole body, Thigh length,slender waist, long hair, outdoors,cloudy weather,Trees, flowers, summer, (off shoulder) Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3796269479, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/16.png) ``` negative space, shuimobysim , portrait of a woman standing , willow branches, (masterpiece, best quality:1.2), traditional chinese ink painting, modelshoot style, peaceful, (smile), looking at viewer, wearing long hanfu, hanfu, song, willow tree in background, wuchangshuo, Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 162243009, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/17.png) ``` shukezouma, negative space, shuimobysim , portrait of a woman standing , willow branches, (masterpiece, best quality:1.2), traditional chinese ink painting, modelshoot style, peaceful, (smile), looking at viewer, wearing long hanfu, hanfu, song, willow tree in background, wuchangshuo, Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1338460762, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/18.png) ``` [(white background:1.5),::5] hexagon, mid shot, (full body:1.4), fiery background, despairing expression, (ruined city:1.4), fire, ((Female wearing cybernetic exoskeleton)), sky, stand on street, day, (strong pose:1.4), Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 49371877, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ``` ![](https://huggingface.co/OedoSoldier/ambientmix/resolve/main/samples/1.png) ``` masterpiece, best quality, hot beauty girl,realistic shading,twintail hair girl ,standing in the snow, hyper-realistic cyberpunk style,hyper detailed, illustrious clothes,majestic fantasy cityscape, weathering with you, ((wearing a long coat)), majestic night view,wind,powder snow, Negative prompt: EasyNegative, badhandv4 Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 301493607, Size: 512x768, Model hash: fdffd3c312, Denoising strength: 0.5, Clip skip: 2, ENSD: 31337, Hires upscale: 2.5, Hires steps: 10, Hires upscaler: 4x-UltraSharp ```
Cometasonmi451/Mine
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - translation language: - unk - unk datasets: - zhaozh/autotrain-data-chatdoctor-reft-en-zh co2_eq_emissions: emissions: 2.240193635056679 --- # Model Trained Using AutoTrain - Problem type: Translation - Model ID: 45173113346 - CO2 Emissions (in grams): 2.2402 ## Validation Metrics - Loss: 1.636 - SacreBLEU: 29.513 - Gen len: 176.613
Connorvr/BrightBot-small
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
# Vocabulary Trimmed [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg): `vocabtrimmer/mt5-small-squad-qg-trimmed-en-120000` This model is a trimmed version of [lmqg/mt5-small-squad-qg](https://huggingface.co/lmqg/mt5-small-squad-qg) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qg | vocabtrimmer/mt5-small-squad-qg-trimmed-en-120000 | |:---------------------------|:--------------------------|:----------------------------------------------------| | parameter_size_full | 300,165,504 | 166,944,128 | | parameter_size_embedding | 256,103,424 | 122,882,048 | | vocab_size | 250,101 | 120,002 | | compression_rate_full | 100.0 | 55.62 | | compression_rate_embedding | 100.0 | 47.98 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 120000 | 2 |
ConstellationBoi/Oop
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T10:56:59Z
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-PLE-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 43.20 +/- 26.48 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Contrastive-Tension/BERT-Base-CT-STSb
[ "pytorch", "tf", "jax", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Cartpole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Contrastive-Tension/BERT-Distil-CT
[ "pytorch", "tf", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-PixelCopter-1.5L results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 41.90 +/- 16.89 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0**.
Contrastive-Tension/BERT-Distil-NLI-CT
[ "pytorch", "tf", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-itquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-itquad-qa): `vocabtrimmer/mbart-large-cc25-itquad-qa-trimmed-it-30000` This model is a trimmed version of [lmqg/mbart-large-cc25-itquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-itquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-itquad-qa | vocabtrimmer/mbart-large-cc25-itquad-qa-trimmed-it-30000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 385,548,288 | | parameter_size_embedding | 512,057,344 | 61,448,192 | | vocab_size | 250,028 | 30,004 | | compression_rate_full | 100.0 | 63.12 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 30000 | 2 |
Contrastive-Tension/BERT-Large-CT-STSb
[ "pytorch", "tf", "jax", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 datasets: - tatsu-lab/alpaca --- ## 🍮 🦙 Flan-Alpaca: Instruction Tuning from Humans and Machines Thanks to [declare-lab](https://huggingface.co/declare-lab) for the training [repository](https://github.com/declare-lab/flan-alpaca), contains code for extending the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) synthetic instruction tuning to existing instruction-tuned models such as [Flan-T5](https://arxiv.org/abs/2210.11416). The pretrained models and demos are available on HuggingFace 🤗 : | Model | Parameters | Training GPUs | |---------------------------------------------------------------------------|------------|-----------------| | [Flan-Alpaca-Base](https://huggingface.co/declare-lab/flan-alpaca-base) | 220M | 1x A6000 | | [Flan-Alpaca-Large](https://huggingface.co/declare-lab/flan-alpaca-large) | 770M | 1x A6000 | | [Flan-Alpaca-XL](https://huggingface.co/declare-lab/flan-alpaca-xl) | 3B | 1x A6000 | | [Flan-Alpaca-XXL](https://huggingface.co/declare-lab/flan-alpaca-xxl) | 11B | 4x A6000 (FSDP) | | [Flan-Alpaca-UL2](https://huggingface.co/0-hero/flan-alpaca-ul2) | 20B | 4x A100 (80G) (FSDP) | ### Why? [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html) represents an exciting new direction to approximate the performance of large language models (LLMs) like ChatGPT cheaply and easily. Concretely, they leverage an LLM such as GPT-3 to generate instructions as synthetic training data. The synthetic data which covers more than 50k tasks can then be used to finetune a smaller model. However, the original implementation is less accessible due to licensing constraints of the underlying [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) model. Furthermore, users have noted [potential noise](https://github.com/tloen/alpaca-lora/issues/65) in the synthetic dataset. Hence, it may be better to explore a fully accessible model that is already trained on high-quality (but less diverse) instructions such as [Flan-T5](https://arxiv.org/abs/2210.11416). ### Usage ``` from transformers import pipeline prompt = "Write an email about an alpaca that likes flan" model = pipeline(model="0-hero/flan-alpaca-ul2") model(prompt, max_length=128, do_sample=True) ``` Readme forked from declare-lab/flan-alpaca-xxl
Contrastive-Tension/BERT-Large-CT
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('abush6352/sd-class-butterflies-32') image = pipeline().images[0] image ```
Contrastive-Tension/BERT-Large-NLI-CT
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
Access to model DillonKoch/the-office-overfit is restricted and you are not in the authorized list. Visit https://huggingface.co/DillonKoch/the-office-overfit to ask for access.
Contrastive-Tension/RoBerta-Large-CT-STSb
[ "pytorch", "tf", "jax", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
# Vocabulary Trimmed [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa): `vocabtrimmer/mt5-small-squad-qa-trimmed-en` This model is a trimmed version of [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qa | vocabtrimmer/mt5-small-squad-qa-trimmed-en | |:---------------------------|:--------------------------|:---------------------------------------------| | parameter_size_full | 300,165,504 | 258,414,976 | | parameter_size_embedding | 256,103,424 | 214,352,896 | | vocab_size | 250,101 | 209,329 | | compression_rate_full | 100.0 | 86.09 | | compression_rate_embedding | 100.0 | 83.7 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | | 2 |
Cool/Demo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilgpt2-finetuned-mecanicos results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-mecanicos This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.6138 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.8441 | 1.0 | 873 | 1.6876 | | 1.5373 | 2.0 | 1746 | 1.6241 | | 1.5216 | 3.0 | 2619 | 1.6138 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Coolhand/Abuela
[ "en", "image_restoration", "superresolution", "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - samsum metrics: - rouge model-index: - name: flan-t5-small-samsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: samsum type: samsum config: samsum split: test args: samsum metrics: - name: Rouge1 type: rouge value: 43.7829 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-small-samsum This model is a fine-tuned version of [google/flan-t5-small](https://huggingface.co/google/flan-t5-small) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.6325 - Rouge1: 43.7829 - Rouge2: 19.4994 - Rougel: 36.5484 - Rougelsum: 39.9323 - Gen Len: 16.8730 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.8137 | 1.0 | 1842 | 1.6636 | 42.6155 | 18.9113 | 35.7997 | 39.046 | 16.7473 | | 1.7502 | 2.0 | 3684 | 1.6408 | 43.3833 | 19.1709 | 36.0178 | 39.5973 | 16.8620 | | 1.6864 | 3.0 | 5526 | 1.6372 | 43.31 | 19.2269 | 35.9239 | 39.6082 | 16.8559 | | 1.6647 | 4.0 | 7368 | 1.6334 | 43.7043 | 19.462 | 36.4417 | 39.8969 | 16.9512 | | 1.6391 | 5.0 | 9210 | 1.6325 | 43.7829 | 19.4994 | 36.5484 | 39.9323 | 16.8730 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0 - Datasets 2.11.0 - Tokenizers 0.13.2
CouchCat/ma_ner_v6_distil
[ "pytorch", "distilbert", "token-classification", "en", "transformers", "ner", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 language: - en pipeline_tag: text-classification ---
CouchCat/ma_ner_v7_distil
[ "pytorch", "distilbert", "token-classification", "en", "transformers", "ner", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
# Vocabulary Trimmed [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa): `vocabtrimmer/mt5-small-squad-qa-trimmed-en-5000` This model is a trimmed version of [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qa | vocabtrimmer/mt5-small-squad-qa-trimmed-en-5000 | |:---------------------------|:--------------------------|:--------------------------------------------------| | parameter_size_full | 300,165,504 | 49,184,128 | | parameter_size_embedding | 256,103,424 | 5,122,048 | | vocab_size | 250,101 | 5,002 | | compression_rate_full | 100.0 | 16.39 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 5000 | 2 |
CouchCat/ma_sa_v7_distil
[ "pytorch", "distilbert", "text-classification", "en", "transformers", "sentiment-analysis", "license:mit" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38
2023-03-30T11:41:11Z
--- license: apache-2.0 --- | Model Name | Parameters | Class | Ratio | Tokens | Batch Size (Tokens) | Training Loss ↓ | | --- | --- | --- | --- | --- | --- | --- | | [GerbilLab/GerbilBlender-A-32m](https://hf.co/GerbilLab/GerbilBlender-A-32m) | 32m | A-Class | 20 | 640M | 262K | 4.127 | "Blender" models, inspired by UL2 pretraining, are trained equally in fill-in-the-middle, causal modelling, and masked language modelling tasks. Special tokens for these models include: ``` '<fitm_start>', '<multiple_tok_mask>', '<fitm_result>', '<causal>', '<mlm_start>', '<single_tok_mask>', '<mlm_end>' # Example fill in the middle '<fitm_start> this is an <multiple_tok_mask> for fill-in-the-middle <fitm_result> example text <|endoftext|>' # Example causal language modelling '<causal> this is an example text for causal language modelling <|endoftext|>' # Example masked language modelling '<mlm_start> this is an <single_tok_mask> text for masked language modelling <mlm_end> example <|endoftext|>' ```
CoveJH/ConBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T11:43:19Z
# Vocabulary Trimmed [lmqg/mbart-large-cc25-itquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-itquad-qa): `vocabtrimmer/mbart-large-cc25-itquad-qa-trimmed-it-60000` This model is a trimmed version of [lmqg/mbart-large-cc25-itquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-itquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-itquad-qa | vocabtrimmer/mbart-large-cc25-itquad-qa-trimmed-it-60000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 416,267,264 | | parameter_size_embedding | 512,057,344 | 122,886,144 | | vocab_size | 250,028 | 60,003 | | compression_rate_full | 100.0 | 68.15 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 60000 | 2 |
Coyotl/DialoGPT-test-last-arthurmorgan
[ "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en tags: - pythae license: apache-2.0 --- ### Downloading this model from the Hub This model was trained with pythae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from multivae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name") ```
Coyotl/DialoGPT-test3-arthurmorgan
[ "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: EvaOr/DeepRL_chp5_MLAgents_SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Craftified/Bob
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T12:00:12Z
# Vocabulary Trimmed [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa): `vocabtrimmer/mt5-small-squad-qa-trimmed-en-10000` This model is a trimmed version of [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qa | vocabtrimmer/mt5-small-squad-qa-trimmed-en-10000 | |:---------------------------|:--------------------------|:---------------------------------------------------| | parameter_size_full | 300,165,504 | 54,304,128 | | parameter_size_embedding | 256,103,424 | 10,242,048 | | vocab_size | 250,101 | 10,002 | | compression_rate_full | 100.0 | 18.09 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 10000 | 2 |
Craig/paraphrase-MiniLM-L6-v2
[ "pytorch", "bert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,026
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.2233 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.2674 | 1.0 | 5533 | 1.2233 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0 - Datasets 2.8.0 - Tokenizers 0.13.2
Crasher222/kaggle-comp-test
[ "pytorch", "bert", "text-classification", "en", "dataset:Crasher222/autonlp-data-kaggle-test", "transformers", "autonlp", "co2_eq_emissions" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: apache-2.0 --- # Introduction Models in this repo are converted from https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless7-2022-11-11
CrayonShinchan/bart_fine_tune_test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 264.14 +/- 14.36 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CrayonShinchan/fine_tune_try_1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: unagui/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CrisLeaf/generador-de-historias-de-tolkien
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-03-30T12:13:05Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 269.05 +/- 21.24 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Crispy/dialopt-small-kratos
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T12:14:06Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 452.50 +/- 142.50 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Crives/distilbert-base-uncased-finetuned-emotion
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:emotion", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q_taxi results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.54 +/- 2.72 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Horken/q_taxi", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Crumped/imdb-simpleRNN
[ "keras" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa): `vocabtrimmer/mt5-small-squad-qa-trimmed-en-15000` This model is a trimmed version of [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qa | vocabtrimmer/mt5-small-squad-qa-trimmed-en-15000 | |:---------------------------|:--------------------------|:---------------------------------------------------| | parameter_size_full | 300,165,504 | 59,424,128 | | parameter_size_embedding | 256,103,424 | 15,362,048 | | vocab_size | 250,101 | 15,002 | | compression_rate_full | 100.0 | 19.8 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 15000 | 2 |
CrypticT1tan/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: whisper-large-ft-cy results: [] language: - cy pipeline_tag: automatic-speech-recognition --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-large-ft-cy This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the common_voice_11_0 dataset. It achieves the following results on the evaluation set: - Loss: 0.3488 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.4286 | 1.0 | 1622 | 0.4409 | | 0.2748 | 2.0 | 3244 | 0.3714 | | 0.1133 | 3.0 | 4866 | 0.3488 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.10.1 - Tokenizers 0.13.2
Cryptikdw/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- duplicated_from: AnaNoSleep/models_by_dalcefo ---
Culmenus/opus-mt-de-is-finetuned-de-to-is_35g65cc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: gpl-3.0 --- copy of https://arca.live/b/aiart/72796744?mode=best&p=1
Culmenus/opus-mt-de-is-finetuned-de-to-is_ekkicc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: face_discriminator-2 results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: faces_resnet split: train args: faces_resnet metrics: - name: Accuracy type: accuracy value: 0.9416243654822335 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # face_discriminator-2 This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.3598 - Accuracy: 0.9416 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5002 | 0.99 | 110 | 0.4721 | 0.8553 | | 0.3774 | 1.99 | 220 | 0.3598 | 0.9416 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 1.12.1 - Datasets 2.10.1 - Tokenizers 0.11.0
Culmenus/opus-mt-de-is-finetuned-de-to-is_nr2-finetuned-de-to-is_nr2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa): `vocabtrimmer/mt5-small-squad-qa-trimmed-en-30000` This model is a trimmed version of [lmqg/mt5-small-squad-qa](https://huggingface.co/lmqg/mt5-small-squad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mt5-small-squad-qa | vocabtrimmer/mt5-small-squad-qa-trimmed-en-30000 | |:---------------------------|:--------------------------|:---------------------------------------------------| | parameter_size_full | 300,165,504 | 74,784,128 | | parameter_size_embedding | 256,103,424 | 30,722,048 | | vocab_size | 250,101 | 30,002 | | compression_rate_full | 100.0 | 24.91 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 30000 | 2 |
Culmenus/opus-mt-de-is-finetuned-de-to-is_nr2
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: bert-multilabel-sector-classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-multilabel-sector-classifier This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0563 - Precision Micro: 0.9091 - Precision Weighted: 0.9080 - Precision Samples: 0.9149 - Recall Micro: 0.8553 - Recall Weighted: 0.8553 - Recall Samples: 0.8996 - Accuracy: 0.8026 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision Micro | Precision Weighted | Precision Samples | Recall Micro | Recall Weighted | Recall Samples | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------------:|:------------------:|:-----------------:|:------------:|:---------------:|:--------------:|:--------:| | 0.0601 | 1.0 | 464 | 0.0563 | 0.9091 | 0.9080 | 0.9149 | 0.8553 | 0.8553 | 0.8996 | 0.8026 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
CurtisASmith/GPT-JRT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T12:39:20Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 10.93 +/- 4.69 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r yumingyi/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
CurtisBowser/DialoGPT-medium-sora-two
[ "pytorch", "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: en datasets: - lmqg/qg_squad pipeline_tag: text2text-generation tags: - question generation widget: - text: "<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 1" - text: "Beyonce further expanded her acting career, starring as blues singer <hl> Etta James <hl> in the 2008 musical biopic, Cadillac Records." example_title: "Question Generation Example 2" - text: "Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, <hl> Cadillac Records <hl> ." example_title: "Question Generation Example 3" model-index: - name: vocabtrimmer/mt5-small-trimmed-en-enquad-qg results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_squad type: default args: default metrics: - name: BLEU4 (Question Generation) type: bleu4_question_generation value: 21.84 - name: ROUGE-L (Question Generation) type: rouge_l_question_generation value: 49.16 - name: METEOR (Question Generation) type: meteor_question_generation value: 23.97 - name: BERTScore (Question Generation) type: bertscore_question_generation value: 90.06 - name: MoverScore (Question Generation) type: moverscore_question_generation value: 62.83 --- # Model Card of `vocabtrimmer/mt5-small-trimmed-en-enquad-qg` This model is fine-tuned version of [ckpts/mt5-small-trimmed-en](https://huggingface.co/ckpts/mt5-small-trimmed-en) for question generation task on the [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [ckpts/mt5-small-trimmed-en](https://huggingface.co/ckpts/mt5-small-trimmed-en) - **Language:** en - **Training data:** [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="en", model="vocabtrimmer/mt5-small-trimmed-en-enquad-qg") # model prediction questions = model.generate_q(list_context="William Turner was an English painter who specialised in watercolour landscapes", list_answer="William Turner") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "vocabtrimmer/mt5-small-trimmed-en-enquad-qg") output = pipe("<hl> Beyonce <hl> further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.") ``` ## Evaluation - ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-enquad-qg/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_squad.default.json) | | Score | Type | Dataset | |:-----------|--------:|:--------|:---------------------------------------------------------------| | BERTScore | 90.06 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_1 | 54.15 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_2 | 37.79 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_3 | 28.32 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_4 | 21.84 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | METEOR | 23.97 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | MoverScore | 62.83 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | ROUGE_L | 49.16 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_squad - dataset_name: default - input_types: paragraph_answer - output_types: question - prefix_types: None - model: ckpts/mt5-small-trimmed-en - max_length: 512 - max_length_output: 32 - epoch: 14 - batch: 16 - lr: 0.0005 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 4 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-en-enquad-qg/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
CurtisBowser/DialoGPT-medium-sora
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q_taxi_v2 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Horken/q_taxi_v2", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CyberMuffin/DialoGPT-small-ChandlerBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - superb metrics: - accuracy model-index: - name: wav2vec2-base-finetuned-ks results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-finetuned-ks This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the superb dataset. It achieves the following results on the evaluation set: - Loss: 0.0895 - Accuracy: 0.9832 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6834 | 1.0 | 399 | 0.6010 | 0.8954 | | 0.307 | 2.0 | 798 | 0.1814 | 0.9750 | | 0.1782 | 3.0 | 1197 | 0.1134 | 0.9829 | | 0.1732 | 4.0 | 1597 | 0.0974 | 0.9819 | | 0.122 | 5.0 | 1995 | 0.0895 | 0.9832 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
D3vil/DialoGPT-smaall-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: tmp_trainer results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tmp_trainer This model was trained from scratch on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1 - Datasets 2.4.0 - Tokenizers 0.12.1
D3xter1922/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: pastells/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
DARKVIP3R/DialoGPT-medium-Anakin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2023-03-30T12:55:13Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 13.43 +/- 6.82 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r yumingyi/rl_course_vizdoom_health_gathering_supreme-2 ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme-2 ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme-2 --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
DHBaek/gpt2-stackoverflow-question-contents-generator
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
Trained using 10% of the python portion of the codesearchnet corpus. We used a novel cosine similarity loss function using sentence-transformers/all-MiniLM-L6-v2 to encode the sentences.
DTAI-KULeuven/robbertje-1-gb-merged
[ "pytorch", "roberta", "fill-mask", "nl", "dataset:oscar", "dataset:oscar (NL)", "dataset:dbrd", "dataset:lassy-ud", "dataset:europarl-mono", "dataset:conll2002", "arxiv:2101.05716", "transformers", "Dutch", "Flemish", "RoBERTa", "RobBERT", "RobBERTje", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 284.60 +/- 18.81 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Darren/darren
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
[[ 1524 414] [ 2035 33207]] 0.7863777089783281 0.9422563986152885 how?!?!?
Davlan/bert-base-multilingual-cased-finetuned-amharic
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
109
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="OlgaVityuk/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Davlan/bert-base-multilingual-cased-finetuned-yoruba
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 metrics: - type: mean_reward value: 0.74 +/- 0.44 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Sera47/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Davlan/m2m100_418M-yor-eng-mt
[ "pytorch", "m2m_100", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "M2M100ForConditionalGeneration" ], "model_type": "m2m_100", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
```python from modeling_deltalm import DeltalmForConditionalGeneration # modeling_deltalm: https://huggingface.co/nguyenvulebinh/deltalm-base/blob/main/modeling_deltalm.py from configuration_deltalm import DeltalmConfig # configuration_deltalm: https://huggingface.co/nguyenvulebinh/deltalm-base/blob/main/configuration_deltalm.py from transformers AutoTokenizer src_text = "i'm steve and<mask> 25 years old" encoded_hi = tokenizer(src_text, return_tensors="pt") generated_output = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.bos_token_id, max_length=20, num_beams=1, return_dict_in_generate=True, return_dict=True, output_hidden_states=True) text_output = tokenizer.batch_decode(generated_output.sequences, skip_special_tokens=True) print(text_output) ```
Davlan/mbart50-large-yor-eng-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-PixelCopter results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 37.80 +/- 25.39 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Davlan/mt5_base_eng_yor_mt
[ "pytorch", "mt5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep6_lr3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep6_lr3 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1098 - Precision: 0.7406 - Recall: 0.8132 - F1: 0.7752 - Accuracy: 0.9638 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.1408 | 0.6915 | 0.7505 | 0.7198 | 0.9556 | | 0.1799 | 2.0 | 934 | 0.1215 | 0.7135 | 0.7790 | 0.7448 | 0.9602 | | 0.1233 | 3.0 | 1401 | 0.1151 | 0.7248 | 0.8002 | 0.7606 | 0.9618 | | 0.1131 | 4.0 | 1868 | 0.1120 | 0.7362 | 0.8099 | 0.7713 | 0.9631 | | 0.1038 | 5.0 | 2335 | 0.1103 | 0.7399 | 0.8118 | 0.7742 | 0.9637 | | 0.1025 | 6.0 | 2802 | 0.1098 | 0.7406 | 0.8132 | 0.7752 | 0.9638 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Dayout/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_keras_callback model-index: - name: vjsyong/xlm-roberta-base_sentiment results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # vjsyong/xlm-roberta-base_sentiment This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.1538 - Validation Loss: 0.1913 - Train Accuracy: 0.9312 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 9375, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.4806 | 0.2300 | 0.9078 | 0 | | 0.2233 | 0.1953 | 0.9252 | 1 | | 0.1538 | 0.1913 | 0.9312 | 2 | ### Framework versions - Transformers 4.27.3 - TensorFlow 2.10.0 - Datasets 2.10.1 - Tokenizers 0.13.2
Declan/CNN_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-03-30T16:20:37Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.1583 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2193 | 1.0 | 5533 | 1.1563 | | 0.9555 | 2.0 | 11066 | 1.1350 | | 0.7587 | 3.0 | 16599 | 1.1583 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Declan/CNN_model_v7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T16:20:43Z
--- datasets: - IlyaGusev/ru_turbo_alpaca - yahma/alpaca-cleaned language: - ru - en pipeline_tag: text2text-generation inference: false --- # LLaMA 7B trained on the ru_turbo_alpaca, Russian instructions dataset **Important**: You should probably use [Saiga](https://huggingface.co/IlyaGusev/saiga_7b_lora). It has regular updates and should be better in every task. Based on [LLaMA 7B](https://huggingface.co/decapoda-research/llama-7b-hf). * This is an adapter-only version. * Merged version: [link](https://huggingface.co/IlyaGusev/llama_7b_ru_turbo_alpaca_lora_merged). * llama.cpp version: [link](https://huggingface.co/IlyaGusev/llama_7b_ru_turbo_alpaca_lora_llamacpp). April 2, 2023: A new model version was uploaded, trained with outputs capped at 512 tokens instead of 256. Also, alpaca-cleaned was mixed with ru_turbo_alpaca. To use the old version, specify this: ``` revision="dd4b2c63d6639fd9490cd1d9b0d6d8b58e708d93" ``` Colab: [link](https://colab.research.google.com/drive/1JLoHOjDJQIa8SDqsEXrGHj4Z4aTnaajN) Training code: [link](https://github.com/IlyaGusev/rulm/tree/master/self_instruct) ```python from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig MODEL_NAME = "IlyaGusev/llama_7b_ru_turbo_alpaca_lora" config = PeftConfig.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained( config.base_model_name_or_path, load_in_8bit=True, device_map="auto" ) model = PeftModel.from_pretrained(model, MODEL_NAME) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) ``` Examples: ``` Вопрос: Почему трава зеленая? Выход: Трава зеленой из-за того, что она содержит хлорофиллы, пигменты, которые помогают ей фотосинтезировать энергию из солнечного света. Хлорофилл способен перерабатывать углекислый газ и воду в органические вещества, такие как углеводы, аминокислоты и жиры, которые необходимы растениям для их роста и развития. ``` ``` Задание: Сочини длинный рассказ, обязательно упоминая следующие объекты. Вход: Таня, мяч Выход: Таня любила футбол с детства. Она играла на улице со своими друзьями и мечтала стать профессиональным футболистом. Когда ей исполнилось 12 лет, она начала тренироваться в футбольной школе. Она была очень талантливой и упорной, и вскоре ей удалось попасть в молодежную команду. Таня играла в футбол каждый день, тренируясь и участвуя в матчах. Она старалась улучшать свои навыки и стать лучшей футболисткой в своей команде. Ее талант не остался незамеченным, и ей предложили попробовать свои силы в более серьезной команде. Она приняла предложение и начала играть за взрослую команду. ``` * Final loss: 1.132 * Time: 35 hours, 34 minutes * Hardware: 1x RTX 3090
Declan/CNN_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### yngblass Dreambooth model trained by yngbless with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
Declan/ChicagoTribune_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: artbreguez/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/HuffPost_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
Baseado no facebook\opt125M Treinado em 1g do [BRWAC](https://www.inf.ufrgs.br/pln/wiki/index.php?title=BrWaC) Detalhes sobre o treinamento [aqui](https://github.com/marcusborela/deep_learning_em_buscas_unicamp/blob/main/code/aula4_treino_modelo_de_linguagem.ipynb)
Declan/NPR_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bart_finetuned_5e_5_7epoch results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart_finetuned_5e_5_7epoch This model is a fine-tuned version of [ArisuNguyen/bart_finetuned_5e_5](https://huggingface.co/ArisuNguyen/bart_finetuned_5e_5) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 7 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Declan/NPR_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - en tags: - openvino --- # bigscience/bloom-560m This is the [bigscience/bloom-560m](https://huggingface.co/bigscience/bloom-560m) model converted to [OpenVINO](https://openvino.ai), for accellerated inference. An example of how to do inference on this model: ```python from optimum.intel.openvino import OVModelForCausalLM from transformers import AutoTokenizer, pipeline # model_id should be set to either a local directory or a model available on the HuggingFace hub. model_id = "helenai/bigscience-bloom-560m-ov" tokenizer = AutoTokenizer.from_pretrained(model_id) model = OVModelForCausalLM.from_pretrained(model_id) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) result = pipe("hello world") print(result) ```
Declan/NewYorkPost_model_v1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: bsd-3-clause tags: - summarization - generated_from_trainer model-index: - name: long-t5-tglobal-base-16384-book-summary-cambiar results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Declan/WallStreetJournal_model_v6
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Declan/WallStreetJournal_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
# Vocabulary Trimmed [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base): `vocabtrimmer/xlm-v-base-trimmed-fr` This model is a trimmed version of [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | facebook/xlm-v-base | vocabtrimmer/xlm-v-base-trimmed-fr | |:---------------------------|:----------------------|:-------------------------------------| | parameter_size_full | 779,396,349 | 254,030,160 | | parameter_size_embedding | 692,451,072 | 167,768,064 | | vocab_size | 901,629 | 218,448 | | compression_rate_full | 100.0 | 32.59 | | compression_rate_embedding | 100.0 | 24.23 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | | 2 |
Declan/test_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T17:52:38Z
This are not my models. These are diffusion models pickup from https://civitai.com/ and https://huggingface.co/
Declan/test_push
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.933 - name: F1 type: f1 value: 0.9334700183474604 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1626 - Accuracy: 0.933 - F1: 0.9335 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.2254 | 1.0 | 250 | 0.1806 | 0.922 | 0.9219 | | 0.1394 | 2.0 | 500 | 0.1626 | 0.933 | 0.9335 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
DeepBasak/Slack
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# `vocabtrimmer/xlm-v-base-trimmed-pt-tweet-sentiment-pt` This model is a fine-tuned version of [/home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-pt](https://huggingface.co//home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-pt) on the [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual) (portuguese). Following metrics are computed on the `test` split of [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual)(portuguese). | | eval_f1_micro | eval_recall_micro | eval_precision_micro | eval_f1_macro | eval_recall_macro | eval_precision_macro | eval_accuracy | |---:|----------------:|--------------------:|-----------------------:|----------------:|--------------------:|-----------------------:|----------------:| | 0 | 58.62 | 58.62 | 58.62 | 52.84 | 58.62 | 57.64 | 58.62 | Check the result file [here](https://huggingface.co/vocabtrimmer/xlm-v-base-trimmed-pt-tweet-sentiment-pt/raw/main/eval.json).
DeepChem/ChemBERTa-10M-MLM
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
90
null
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | inner_optimizer.class_name | Custom>RMSprop | | inner_optimizer.config.name | RMSprop | | inner_optimizer.config.weight_decay | None | | inner_optimizer.config.clipnorm | None | | inner_optimizer.config.global_clipnorm | None | | inner_optimizer.config.clipvalue | None | | inner_optimizer.config.use_ema | False | | inner_optimizer.config.ema_momentum | 0.99 | | inner_optimizer.config.ema_overwrite_frequency | 100 | | inner_optimizer.config.jit_compile | True | | inner_optimizer.config.is_legacy_optimizer | False | | inner_optimizer.config.learning_rate | 0.0010000000474974513 | | inner_optimizer.config.rho | 0.9 | | inner_optimizer.config.momentum | 0.0 | | inner_optimizer.config.epsilon | 1e-07 | | inner_optimizer.config.centered | False | | dynamic | True | | initial_scale | 32768.0 | | dynamic_growth_steps | 2000 | | training_precision | mixed_float16 | ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
DeepChem/ChemBERTa-10M-MTR
[ "pytorch", "roberta", "arxiv:1910.09700", "transformers" ]
null
{ "architectures": [ "RobertaForRegression" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
708
2023-03-30T18:03:18Z
# Vocabulary Trimmed [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base): `vocabtrimmer/xlm-v-base-trimmed-pt-5000` This model is a trimmed version of [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | facebook/xlm-v-base | vocabtrimmer/xlm-v-base-trimmed-pt-5000 | |:---------------------------|:----------------------|:------------------------------------------| | parameter_size_full | 779,396,349 | 89,890,186 | | parameter_size_embedding | 692,451,072 | 3,841,536 | | vocab_size | 901,629 | 5,002 | | compression_rate_full | 100.0 | 11.53 | | compression_rate_embedding | 100.0 | 0.55 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | pt | vocabtrimmer/mc4_validation | text | pt | validation | 5000 | 2 |
DeepChem/ChemBERTa-5M-MTR
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "RobertaForRegression" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: helpingstar/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
DeepChem/ChemBERTa-77M-MLM
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,416
null
--- language: - is license: cc-by-4.0 datasets: - igc --- # Icelandic GPT-2 model This Icelandic GPT-2 language model was pretrained on the [Icelandic Gigaword Corpus](http://igc.arnastofnun.is/) (IGC, 2020 version), which contains approximately 1.532 million running words. The model was trained for 20 epochs on a TPU v3-8, with a total training time of 3 days and 21 hours. The hyperparameters used for training can be found in the [JAX/Flax documentation](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#train-model-1) for the Transformers library. The model uses a byte-level BPE tokenizer with a vocabulary size of 51,000. **Note**: This model was pretrained on a tokenized and sentence-segmentized version of the IGC, which is reflected by the generated text. A new version of this model, trained on a pre-tokenized version of IGC (2022 version), is forthcoming. # Acknowledgments This research was supported with Cloud TPUs from Google's TPU Research Cloud (TRC).
DeepESP/gpt2-spanish-medium
[ "pytorch", "tf", "jax", "gpt2", "text-generation", "es", "dataset:ebooks", "transformers", "GPT-2", "Spanish", "ebooks", "nlg", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
340
null
--- license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: tmVar_5e-05_30_03 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tmVar_5e-05_30_03 This model is a fine-tuned version of [microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0230 - Precision: 0.8677 - Recall: 0.8865 - F1: 0.8770 - Accuracy: 0.9964 ## Model description Trained on Token set with max_length=475 ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 500 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.3602 | 1.39 | 25 | 0.0547 | 0.4823 | 0.3676 | 0.4172 | 0.9851 | | 0.0498 | 2.78 | 50 | 0.0305 | 0.4518 | 0.5568 | 0.4988 | 0.9912 | | 0.0237 | 4.17 | 75 | 0.0198 | 0.6338 | 0.7297 | 0.6784 | 0.9942 | | 0.0089 | 5.56 | 100 | 0.0164 | 0.7895 | 0.8919 | 0.8376 | 0.9960 | | 0.0036 | 6.94 | 125 | 0.0138 | 0.7826 | 0.8757 | 0.8265 | 0.9967 | | 0.0023 | 8.33 | 150 | 0.0148 | 0.8462 | 0.8919 | 0.8684 | 0.9969 | | 0.0012 | 9.72 | 175 | 0.0159 | 0.7890 | 0.9297 | 0.8536 | 0.9966 | | 0.0012 | 11.11 | 200 | 0.0163 | 0.845 | 0.9135 | 0.8779 | 0.9970 | | 0.001 | 12.5 | 225 | 0.0165 | 0.8534 | 0.8811 | 0.8670 | 0.9967 | | 0.0012 | 13.89 | 250 | 0.0215 | 0.8020 | 0.8757 | 0.8372 | 0.9961 | | 0.0008 | 15.28 | 275 | 0.0192 | 0.875 | 0.9081 | 0.8912 | 0.9970 | | 0.0007 | 16.67 | 300 | 0.0192 | 0.875 | 0.9081 | 0.8912 | 0.9970 | | 0.0005 | 18.06 | 325 | 0.0192 | 0.875 | 0.9081 | 0.8912 | 0.9970 | | 0.0009 | 19.44 | 350 | 0.0230 | 0.8677 | 0.8865 | 0.8770 | 0.9964 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
DeepESP/gpt2-spanish
[ "pytorch", "tf", "jax", "gpt2", "text-generation", "es", "dataset:ebooks", "transformers", "GPT-2", "Spanish", "ebooks", "nlg", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,463
2023-03-30T18:15:59Z
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: story_summarizer-finetuned results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # story_summarizer-finetuned This model is a fine-tuned version of [philschmid/bart-large-cnn-samsum](https://huggingface.co/philschmid/bart-large-cnn-samsum) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.9028 - Rouge1: 30.4344 - Rouge2: 6.2601 - Rougel: 18.9971 - Rougelsum: 26.4496 - Gen Len: 95.0942 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:--------:| | No log | 1.0 | 150 | 2.8526 | 29.1919 | 5.8045 | 18.2639 | 25.4635 | 102.0117 | | No log | 2.0 | 300 | 2.8654 | 30.0355 | 6.0614 | 18.7598 | 26.1234 | 96.4292 | | No log | 3.0 | 450 | 2.9028 | 30.4344 | 6.2601 | 18.9971 | 26.4496 | 95.0942 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.2
DeepPavlov/xlm-roberta-large-en-ru-mnli
[ "pytorch", "xlm-roberta", "text-classification", "en", "ru", "dataset:glue", "dataset:mnli", "transformers", "xlm-roberta-large", "xlm-roberta-large-en-ru", "xlm-roberta-large-en-ru-mnli", "has_space" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
227
null
# Vocabulary Trimmed [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base): `vocabtrimmer/xlm-v-base-trimmed-fr-5000` This model is a trimmed version of [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | facebook/xlm-v-base | vocabtrimmer/xlm-v-base-trimmed-fr-5000 | |:---------------------------|:----------------------|:------------------------------------------| | parameter_size_full | 779,396,349 | 89,890,186 | | parameter_size_embedding | 692,451,072 | 3,841,536 | | vocab_size | 901,629 | 5,002 | | compression_rate_full | 100.0 | 11.53 | | compression_rate_embedding | 100.0 | 0.55 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 5000 | 2 |
DeepPavlov/xlm-roberta-large-en-ru
[ "pytorch", "xlm-roberta", "feature-extraction", "en", "ru", "transformers" ]
feature-extraction
{ "architectures": [ "XLMRobertaModel" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
190
null
--- license: cc-by-nc-4.0 datasets: - tatsu-lab/alpaca library_name: transformers pipeline_tag: text-generation tags: - galactica - alpaca - opt inference: false --- # GALPACA 30B (large) GALACTICA 30B fine-tuned on the Alpaca dataset. The model card from the original Galactica repo can be found [here](https://github.com/paperswithcode/galai/blob/main/docs/model_card.md), and the original paper [here](https://galactica.org/paper.pdf). The dataset card for Alpaca can be found [here](https://huggingface.co/datasets/tatsu-lab/alpaca/blob/main/README.md), and the project homepage [here](https://crfm.stanford.edu/2023/03/13/alpaca.html). The Alpaca dataset was collected with a modified version of the [Self-Instruct Framework](https://github.com/yizhongw/self-instruct), and was built using OpenAI's `text-davinci-003` model. As such it is subject to OpenAI's terms of service. ## Model Details The GALACTICA models are trained on a large-scale scientific corpus and are designed to perform scientific tasks. The Alpaca dataset is a set of 52k instruct-response pairs designed to enhace the instruction following capabilites of pre-trained language models. ## Model Use The GALACTICA model card specifies that the primary indended users of the GALACTICA models are researchers studying language models applied to the scientific domain, and it cautions against production use of GALACTICA without safeguards due to the potential for the model to produce inaccurate information. The original GALACTICA models are available under a non-commercial CC BY-NC 4.0 license, and the GALPACA model is additionally subject to the [OpenAI Terms of Service](https://openai.com/policies/terms-of-use). <details> <summary> Example Outputs </summary> *__Full Prompt:__* ``` Below is an instruction that describes a task. As a scientist, write a response that appropriately completes the request, citing specific papers whenever possible. ### Instruction: Write out Maxwell's equations and explain each one. ### Markdown and LaTeX Response: ``` *__Response:__* Maxwell's equations are a set of four equations that describe the behavior of electromagnetic fields. These equations are: $$\nabla \times \textbf{E} = -\frac{\partial \textbf{B}}{\partial t}$$ $$\nabla \times \textbf{B} = \mu_0 \epsilon_0 \frac{\partial \textbf{E}}{\partial t} + \mu_0 \textbf{J}$$ $$\nabla \bullet \textbf{E} = \frac{\rho}{\epsilon_0}$$ $$\nabla \bullet \textbf{B} = 0$$ The first two equations describe how the electric and magnetic fields, E and B, are related to each other. The third and fourth equations are Gauss's law for electric and magnetic fields, respectively. These equations describe the behavior of the electric and magnetic fields when there are charges and currents in the space. Each equation has a specific purpose and together they form a complete description of the behavior of electromagnetic fields. *__Full Prompt:__* ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: Write a python function implements the Sherman-Morrison matrix inversion lemma using numpy. ### Response: ```python ``` *__Response:__* ```python def sm_inv_lemma(A, u, v): """ Function implementing the Sherman-Morrison matrix inversion lemma using numpy. Parameters ---------- A : numpy array The original matrix. u : numpy array The first vector in the Sherman-Morrison lemma. v : numpy array The second vector in the Sherman-Morrison lemma. Returns ------- The inverse matrix of A - uv^T. """ A_inv = np.linalg.inv(A) alpha = 1 / (1 + v.T @ A_inv @ u) return A_inv - (alpha * A_inv @ u @ v.T @ A_inv) ``` </details> ## Training Data The GALPACA models are trained by fine-tuning pre-trained GALACTICA models on the Alpaca dataset. GALACTICA models were trained on 106 billion tokens of open-access scientific text and data, including papers, textbooks, scientific websites, encyclopedias, and more. Fine-tuning the base GALACTICA models on the 52k instruction-response pairs in the Alpaca dataset allows users to query the GALPACA models in an instruct-response fashion. ## How to Use The GALPACA weights are made available for use with the `transformers` library. <details> <summary> Click to expand </summary> ```python # pip install accelerate from transformers import AutoTokenizer, OPTForCausalLM tokenizer = AutoTokenizer.from_pretrained("GeorgiaTechResearchInstitute/galpaca-30b") model = OPTForCausalLM.from_pretrained("GeorgiaTechResearchInstitute/galpaca-30b", device_map="auto", torch_dtype=torch.float16) # see the original Alpaca repo for more information about the prompt templates no_input_prompt_template = ("Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response:") prompt = "Write out Maxwell's equations and explain the meaning of each one." formatted_prompt = no_input_prompt_template.format_map({'instruction': prompt}) tokenized_prompt = tokenizer(formatted_prompt, return_tensors="pt").input_ids.to(model.device) out_tokens = model.generate(tokenized_prompt) print(tokenizer.batch_decode(out_tokens, skip_special_tokens=False, clean_up_tokenization_spaces=False)) ``` </details> ## Training Resources GALPACA 30B was fine-tuned in about 6 hours using 16 A100 80GB GPUS, 16-bit mixed-precision, an effective batch-size of 1024, and with a maximum context window of 384 tokens. This model was trained using DeepSpeed ZeRO Stage 3 optimizations. ## Performance and Limitations Qualitative evaluation suggests that Galpaca frequently outperforms LLaMA-based Alpaca models on tasks related to technical knowledge and programming, while it underperforms on natural langauge tasks such as generating prose. More information about the performance and limitations of the GALACTICA family of models can be found on the original GALACTICA model card. ## Works Cited ```bibtex @inproceedings{GALACTICA, title={GALACTICA: A Large Language Model for Science}, author={Ross Taylor and Marcin Kardas and Guillem Cucurull and Thomas Scialom and Anthony Hartshorn and Elvis Saravia and Andrew Poulton and Viktor Kerkez and Robert Stojnic}, year={2022} } ``` ```bibtex @misc{alpaca, author = {Rohan Taori and Ishaan Gulrajani and Tianyi Zhang and Yann Dubois and Xuechen Li and Carlos Guestrin and Percy Liang and Tatsunori B. Hashimoto }, title = {Stanford Alpaca: An Instruction-following LLaMA model}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/tatsu-lab/stanford_alpaca}}, } ```
DeltaHub/adapter_t5-3b_cola
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - classification - generated_from_trainer datasets: - poem_sentiment metrics: - accuracy model-index: - name: clasificador-poem-sentiment results: - task: name: Text Classification type: text-classification dataset: name: poem_sentiment type: poem_sentiment config: default split: test args: default metrics: - name: Accuracy type: accuracy value: 0.8653846153846154 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # clasificador-poem-sentiment This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the poem_sentiment dataset. It achieves the following results on the evaluation set: - Loss: 0.5413 - Accuracy: 0.8654 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 112 | 0.4332 | 0.8654 | | No log | 2.0 | 224 | 0.4227 | 0.8942 | | No log | 3.0 | 336 | 0.5413 | 0.8654 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
DeltaHub/lora_t5-base_mrpc
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-03-30T18:38:31Z
--- tags: - sentence-transformers - feature-extraction - sentence-similarity language: en license: apache-2.0 datasets: - s2orc - flax-sentence-embeddings/stackexchange_xml - ms_marco - gooaq - yahoo_answers_topics - code_search_net - search_qa - eli5 - snli - multi_nli - wikihow - natural_questions - trivia_qa - embedding-data/sentence-compression - embedding-data/flickr30k-captions - embedding-data/altlex - embedding-data/simple-wiki - embedding-data/QQP - embedding-data/SPECTER - embedding-data/PAQ_pairs - embedding-data/WikiAnswers --- # ONNX version of sentence-transormers/all-MiniLM-L12-v2 This is a sentence-transformers model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. The ONNX version of this model is made for the [Metarank](https://github.com/metarank/metarank) re-ranker to do semantic similarity. Check out the [main Metarank docs](https://docs.metarank.ai) on how to configure it. TLDR: ```yaml - type: field_match name: title_query_match rankingField: ranking.query itemField: item.title distance: cos method: type: bert model: metarank/all-MiniLM-L12-v2 ``` ## Building the model ```shell $> pip install -r requirements.txt $> python convert.py ============= Diagnostic Run torch.onnx.export version 2.0.0+cu117 ============= verbose: False, log level: Level.ERROR ======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ======================== ``` ## License Apache 2.0
Denilson/gbert-base-germaner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Deliberate [Civitai](https://civitai.com/models/4823/deliberate) ![screenshot](https://imagecache.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/40943b9c-ede2-48c0-77d7-b48fee661d00/width=450/150224) ## All in One / Any Case Version This model provides you the ability to create anything you want. The more power of prompt knowledges you have, the better results you'll get. It basically means that you'll never get a perfect result with just a few words. You have to fill out your prompt line extremely detailed. ## Who find this model perfect: * NSFW masters * Meticulous anatomy artists * Creative prompters * Art designers Dive into the perfect creations world with my prompts. Your research will be appreciated, so feel free to show everyone, what you can get with this model.
Denny29/DialoGPT-medium-asunayuuki
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice_9_0 model-index: - name: wav2vec2-large-xls-r-300m-cnh-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-cnh-colab This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice_9_0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
Denver/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - classification - generated_from_trainer datasets: - rotten_tomatoes metrics: - accuracy model-index: - name: clasificador-rotten-tomatoes results: - task: name: Text Classification type: text-classification dataset: name: rotten_tomatoes type: rotten_tomatoes config: default split: test args: default metrics: - name: Accuracy type: accuracy value: 0.8527204502814258 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # clasificador-rotten-tomatoes This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the rotten_tomatoes dataset. It achieves the following results on the evaluation set: - Loss: 0.8343 - Accuracy: 0.8527 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3971 | 1.0 | 1067 | 0.4166 | 0.8377 | | 0.2056 | 2.0 | 2134 | 0.7931 | 0.8218 | | 0.0672 | 3.0 | 3201 | 0.8343 | 0.8527 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.1+cu116 - Datasets 2.11.0 - Tokenizers 0.13.2
DevsIA/imagenes
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T19:12:26Z
--- license: other --- LLaMA-7B converted to work with Transformers/HuggingFace. This is under a special license, please see the LICENSE file for details. -- license: other --- # LLaMA Model Card ## Model details **Organization developing the model** The FAIR team of Meta AI. **Model date** LLaMA was trained between December. 2022 and Feb. 2023. **Model version** This is version 1 of the model. **Model type** LLaMA is an auto-regressive language model, based on the transformer architecture. The model comes in different sizes: 7B, 13B, 33B and 65B parameters. **Paper or resources for more information** More information can be found in the paper “LLaMA, Open and Efficient Foundation Language Models”, available at https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/. **Citations details** https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/ **License** Non-commercial bespoke license **Where to send questions or comments about the model** Questions and comments about LLaMA can be sent via the [GitHub repository](https://github.com/facebookresearch/llama) of the project , by opening an issue. ## Intended use **Primary intended uses** The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations. **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** LLaMA is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Factors **Relevant factors** One of the most relevant factors for which model performance may vary is which language is used. Although we included 20 languages in the training data, most of our dataset is made of English text, and we thus expect the model to perform better for English than other languages. Relatedly, it has been shown in previous studies that performance might vary for different dialects, and we expect that it will be the case for our model. **Evaluation factors** As our model is trained on data from the Web, we expect that it reflects biases from this source. We thus evaluated on RAI datasets to measure biases exhibited by the model for gender, religion, race, sexual orientation, age, nationality, disability, physical appearance and socio-economic status. We also measure the toxicity of model generations, depending on the toxicity of the context used to prompt the model. ## Metrics **Model performance measures** We use the following measure to evaluate the model: - Accuracy for common sense reasoning, reading comprehension, natural language understanding (MMLU), BIG-bench hard, WinoGender and CrowS-Pairs, - Exact match for question answering, - The toxicity score from Perspective API on RealToxicityPrompts. **Decision thresholds** Not applicable. **Approaches to uncertainty and variability** Due to the high computational requirements of training LLMs, we trained only one model of each size, and thus could not evaluate variability of pre-training. ## Evaluation datasets The model was evaluated on the following benchmarks: BoolQ, PIQA, SIQA, HellaSwag, WinoGrande, ARC, OpenBookQA, NaturalQuestions, TriviaQA, RACE, MMLU, BIG-bench hard, GSM8k, RealToxicityPrompts, WinoGender, CrowS-Pairs. ## Training dataset The model was trained using the following source of data: CCNet [67%], C4 [15%], GitHub [4.5%], Wikipedia [4.5%], Books [4.5%], ArXiv [2.5%], Stack Exchange[2%]. The Wikipedia and Books domains include data in the following languages: bg, ca, cs, da, de, en, es, fr, hr, hu, it, nl, pl, pt, ro, ru, sl, sr, sv, uk. See the paper for more details about the training set and corresponding preprocessing. ## Quantitative analysis Hyperparameters for the model architecture <table> <thead> <tr> <th >LLaMA</th> <th colspan=6>Model hyper parameters </th> </tr> <tr> <th>Number of parameters</th><th>dimension</th><th>n heads</th><th>n layers</th><th>Learn rate</th><th>Batch size</th><th>n tokens</th> </tr> </thead> <tbody> <tr> <th>7B</th> <th>4096</th> <th>32</th> <th>32</th> <th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>13B</th><th>5120</th><th>40</th><th>40</th><th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>33B</th><th>6656</th><th>52</th><th>60</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> <tr> <th>65B</th><th>8192</th><th>64</th><th>80</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> </tbody> </table> *Table 1 - Summary of LLama Model Hyperparameters* We present our results on eight standard common sense reasoning benchmarks in the table below. <table> <thead> <tr> <th>LLaMA</th> <th colspan=9>Reasoning tasks </th> </tr> <tr> <th>Number of parameters</th> <th>BoolQ</th><th>PIQA</th><th>SIQA</th><th>HellaSwag</th><th>WinoGrande</th><th>ARC-e</th><th>ARC-c</th><th>OBQA</th><th>COPA</th> </tr> </thead> <tbody> <tr> <th>7B</th><th>76.5</th><th>79.8</th><th>48.9</th><th>76.1</th><th>70.1</th><th>76.7</th><th>47.6</th><th>57.2</th><th>93 </th> <tr><th>13B</th><th>78.1</th><th>80.1</th><th>50.4</th><th>79.2</th><th>73</th><th>78.1</th><th>52.7</th><th>56.4</th><th>94 </th> <tr><th>33B</th><th>83.1</th><th>82.3</th><th>50.4</th><th>82.8</th><th>76</th><th>81.4</th><th>57.8</th><th>58.6</th><th>92 </th> <tr><th>65B</th><th>85.3</th><th>82.8</th><th>52.3</th><th>84.2</th><th>77</th><th>81.5</th><th>56</th><th>60.2</th><th>94</th></tr> </tbody> </table> *Table 2 - Summary of LLama Model Performance on Reasoning tasks* We present our results on bias in the table below. Note that lower value is better indicating lower bias. | No | Category | FAIR LLM | | --- | -------------------- | -------- | | 1 | Gender | 70.6 | | 2 | Religion | 79 | | 3 | Race/Color | 57 | | 4 | Sexual orientation | 81 | | 5 | Age | 70.1 | | 6 | Nationality | 64.2 | | 7 | Disability | 66.7 | | 8 | Physical appearance | 77.8 | | 9 | Socioeconomic status | 71.5 | | | LLaMA Average | 66.6 | *Table 3 - Summary bias of our model output* ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Mitigations** We filtered the data from the Web based on its proximity to Wikipedia text and references. For this, we used a Kneser-Ney language model and a fastText linear classifier. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** LLaMA is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
Dhito/am
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T19:24:43Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 254.91 +/- 24.88 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Dhruva/Interstellar
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# `vocabtrimmer/xlm-v-base-trimmed-fr-10000-tweet-sentiment-fr` This model is a fine-tuned version of [/home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-fr-10000](https://huggingface.co//home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-fr-10000) on the [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual) (french). Following metrics are computed on the `test` split of [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual)(french). | | eval_f1_micro | eval_recall_micro | eval_precision_micro | eval_f1_macro | eval_recall_macro | eval_precision_macro | eval_accuracy | |---:|----------------:|--------------------:|-----------------------:|----------------:|--------------------:|-----------------------:|----------------:| | 0 | 58.28 | 58.28 | 58.28 | 57.86 | 58.28 | 57.69 | 58.28 | Check the result file [here](https://huggingface.co/vocabtrimmer/xlm-v-base-trimmed-fr-10000-tweet-sentiment-fr/raw/main/eval.json).
Dibyaranjan/nl_image_search
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-30T19:29:50Z
--- language: en tags: - exbert license: apache-2.0 datasets: - bookcorpus - wikipedia duplicated_from: bert-base-uncased --- # BERT base model (uncased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https://github.com/google-research/bert). This model is uncased: it does not make a difference between english and English. Disclaimer: The team releasing BERT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description BERT is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labeling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally masks the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Next sentence prediction (NSP): the models concatenates two masked sentences as inputs during pretraining. Sometimes they correspond to sentences that were next to each other in the original text, sometimes not. The model then has to predict if the two sentences were following each other or not. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences, for instance, you can train a standard classifier using the features produced by the BERT model as inputs. ## Model variations BERT has originally been released in base and large variations, for cased and uncased input text. The uncased models also strips out an accent markers. Chinese and multilingual uncased and cased versions followed shortly after. Modified preprocessing with whole word masking has replaced subpiece masking in a following work, with the release of two models. Other 24 smaller models are released afterward. The detailed release history can be found on the [google-research/bert readme](https://github.com/google-research/bert/blob/master/README.md) on github. | Model | #params | Language | |------------------------|--------------------------------|-------| | [`bert-base-uncased`](https://huggingface.co/bert-base-uncased) | 110M | English | | [`bert-large-uncased`](https://huggingface.co/bert-large-uncased) | 340M | English | sub | [`bert-base-cased`](https://huggingface.co/bert-base-cased) | 110M | English | | [`bert-large-cased`](https://huggingface.co/bert-large-cased) | 340M | English | | [`bert-base-chinese`](https://huggingface.co/bert-base-chinese) | 110M | Chinese | | [`bert-base-multilingual-cased`](https://huggingface.co/bert-base-multilingual-cased) | 110M | Multiple | | [`bert-large-uncased-whole-word-masking`](https://huggingface.co/bert-large-uncased-whole-word-masking) | 340M | English | | [`bert-large-cased-whole-word-masking`](https://huggingface.co/bert-large-cased-whole-word-masking) | 340M | English | ## Intended uses & limitations You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=bert) to look for fine-tuned versions of a task that interests you. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='bert-base-uncased') >>> unmasker("Hello I'm a [MASK] model.") [{'sequence': "[CLS] hello i'm a fashion model. [SEP]", 'score': 0.1073106899857521, 'token': 4827, 'token_str': 'fashion'}, {'sequence': "[CLS] hello i'm a role model. [SEP]", 'score': 0.08774490654468536, 'token': 2535, 'token_str': 'role'}, {'sequence': "[CLS] hello i'm a new model. [SEP]", 'score': 0.05338378623127937, 'token': 2047, 'token_str': 'new'}, {'sequence': "[CLS] hello i'm a super model. [SEP]", 'score': 0.04667217284440994, 'token': 3565, 'token_str': 'super'}, {'sequence': "[CLS] hello i'm a fine model. [SEP]", 'score': 0.027095865458250046, 'token': 2986, 'token_str': 'fine'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained("bert-base-uncased") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import BertTokenizer, TFBertModel tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = TFBertModel.from_pretrained("bert-base-uncased") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='bert-base-uncased') >>> unmasker("The man worked as a [MASK].") [{'sequence': '[CLS] the man worked as a carpenter. [SEP]', 'score': 0.09747550636529922, 'token': 10533, 'token_str': 'carpenter'}, {'sequence': '[CLS] the man worked as a waiter. [SEP]', 'score': 0.0523831807076931, 'token': 15610, 'token_str': 'waiter'}, {'sequence': '[CLS] the man worked as a barber. [SEP]', 'score': 0.04962705448269844, 'token': 13362, 'token_str': 'barber'}, {'sequence': '[CLS] the man worked as a mechanic. [SEP]', 'score': 0.03788609802722931, 'token': 15893, 'token_str': 'mechanic'}, {'sequence': '[CLS] the man worked as a salesman. [SEP]', 'score': 0.037680890411138535, 'token': 18968, 'token_str': 'salesman'}] >>> unmasker("The woman worked as a [MASK].") [{'sequence': '[CLS] the woman worked as a nurse. [SEP]', 'score': 0.21981462836265564, 'token': 6821, 'token_str': 'nurse'}, {'sequence': '[CLS] the woman worked as a waitress. [SEP]', 'score': 0.1597415804862976, 'token': 13877, 'token_str': 'waitress'}, {'sequence': '[CLS] the woman worked as a maid. [SEP]', 'score': 0.1154729500412941, 'token': 10850, 'token_str': 'maid'}, {'sequence': '[CLS] the woman worked as a prostitute. [SEP]', 'score': 0.037968918681144714, 'token': 19215, 'token_str': 'prostitute'}, {'sequence': '[CLS] the woman worked as a cook. [SEP]', 'score': 0.03042375110089779, 'token': 5660, 'token_str': 'cook'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The BERT model was pretrained on [BookCorpus](https://yknzhu.wixsite.com/mbweb), a dataset consisting of 11,038 unpublished books and [English Wikipedia](https://en.wikipedia.org/wiki/English_Wikipedia) (excluding lists, tables and headers). ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence A [SEP] Sentence B [SEP] ``` With probability 0.5, sentence A and sentence B correspond to two consecutive sentences in the original corpus, and in the other cases, it's another random sentence in the corpus. Note that what is considered a sentence here is a consecutive span of text usually longer than a single sentence. The only constrain is that the result with the two "sentences" has a combined length of less than 512 tokens. The details of the masking procedure for each sentence are the following: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by `[MASK]`. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. ### Pretraining The model was trained on 4 cloud TPUs in Pod configuration (16 TPU chips total) for one million steps with a batch size of 256. The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results When fine-tuned on downstream tasks, this model achieves the following results: Glue test results: | Task | MNLI-(m/mm) | QQP | QNLI | SST-2 | CoLA | STS-B | MRPC | RTE | Average | |:----:|:-----------:|:----:|:----:|:-----:|:----:|:-----:|:----:|:----:|:-------:| | | 84.6/83.4 | 71.2 | 90.5 | 93.5 | 52.1 | 85.8 | 88.9 | 66.4 | 79.6 | ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-1810-04805, author = {Jacob Devlin and Ming{-}Wei Chang and Kenton Lee and Kristina Toutanova}, title = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language Understanding}, journal = {CoRR}, volume = {abs/1810.04805}, year = {2018}, url = {http://arxiv.org/abs/1810.04805}, archivePrefix = {arXiv}, eprint = {1810.04805}, timestamp = {Tue, 30 Oct 2018 20:39:56 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1810-04805.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <a href="https://huggingface.co/exbert/?model=bert-base-uncased"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
DicoTiar/wisdomfiy
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
# `vocabtrimmer/xlm-v-base-trimmed-pt-15000-tweet-sentiment-pt` This model is a fine-tuned version of [/home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-pt-15000](https://huggingface.co//home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-pt-15000) on the [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual) (portuguese). Following metrics are computed on the `test` split of [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual)(portuguese). | | eval_f1_micro | eval_recall_micro | eval_precision_micro | eval_f1_macro | eval_recall_macro | eval_precision_macro | eval_accuracy | |---:|----------------:|--------------------:|-----------------------:|----------------:|--------------------:|-----------------------:|----------------:| | 0 | 55.17 | 55.17 | 55.17 | 53.43 | 55.17 | 53.71 | 55.17 | Check the result file [here](https://huggingface.co/vocabtrimmer/xlm-v-base-trimmed-pt-15000-tweet-sentiment-pt/raw/main/eval.json).
DiegoAlysson/opus-mt-en-ro-finetuned-en-to-ro
[ "pytorch", "tensorboard", "marian", "text2text-generation", "dataset:wmt16", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
# Vocabulary Trimmed [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base): `vocabtrimmer/xlm-v-base-trimmed-fr-15000` This model is a trimmed version of [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | facebook/xlm-v-base | vocabtrimmer/xlm-v-base-trimmed-fr-15000 | |:---------------------------|:----------------------|:-------------------------------------------| | parameter_size_full | 779,396,349 | 97,580,186 | | parameter_size_embedding | 692,451,072 | 11,521,536 | | vocab_size | 901,629 | 15,002 | | compression_rate_full | 100.0 | 12.52 | | compression_rate_embedding | 100.0 | 1.66 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 15000 | 2 |
Dkwkk/Da
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-PixelCopterBoom results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 22.90 +/- 21.29 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
DongHai/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
# Vocabulary Trimmed [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base): `vocabtrimmer/xlm-v-base-trimmed-fr-30000` This model is a trimmed version of [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | facebook/xlm-v-base | vocabtrimmer/xlm-v-base-trimmed-fr-30000 | |:---------------------------|:----------------------|:-------------------------------------------| | parameter_size_full | 779,396,349 | 109,115,186 | | parameter_size_embedding | 692,451,072 | 23,041,536 | | vocab_size | 901,629 | 30,002 | | compression_rate_full | 100.0 | 14.0 | | compression_rate_embedding | 100.0 | 3.33 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 30000 | 2 |
DongHyoungLee/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
# `vocabtrimmer/xlm-v-base-trimmed-pt-30000-tweet-sentiment-pt` This model is a fine-tuned version of [/home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-pt-30000](https://huggingface.co//home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-pt-30000) on the [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual) (portuguese). Following metrics are computed on the `test` split of [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual)(portuguese). | | eval_f1_micro | eval_recall_micro | eval_precision_micro | eval_f1_macro | eval_recall_macro | eval_precision_macro | eval_accuracy | |---:|----------------:|--------------------:|-----------------------:|----------------:|--------------------:|-----------------------:|----------------:| | 0 | 64.71 | 64.71 | 64.71 | 64.51 | 64.71 | 64.57 | 64.71 | Check the result file [here](https://huggingface.co/vocabtrimmer/xlm-v-base-trimmed-pt-30000-tweet-sentiment-pt/raw/main/eval.json).
Waynehillsdev/Waynehills-STT-doogie-server
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
61
null
# `vocabtrimmer/xlm-v-base-trimmed-fr-30000-tweet-sentiment-fr` This model is a fine-tuned version of [/home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-fr-30000](https://huggingface.co//home/c.c2042013/lm-vocab-trimmer/ckpts/xlm-v-base-trimmed-fr-30000) on the [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual) (french). Following metrics are computed on the `test` split of [cardiffnlp/tweet_sentiment_multilingual](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual)(french). | | eval_f1_micro | eval_recall_micro | eval_precision_micro | eval_f1_macro | eval_recall_macro | eval_precision_macro | eval_accuracy | |---:|----------------:|--------------------:|-----------------------:|----------------:|--------------------:|-----------------------:|----------------:| | 0 | 71.26 | 71.26 | 71.26 | 71.15 | 71.26 | 71.71 | 71.26 | Check the result file [here](https://huggingface.co/vocabtrimmer/xlm-v-base-trimmed-fr-30000-tweet-sentiment-fr/raw/main/eval.json).