modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Dawit/DialogGPT-small-ironman
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - mteb model-index: - name: all-mpnet-base-v2-negation results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 72.6268656716418 - type: ap value: 36.40585820220466 - type: f1 value: 67.06383995428979 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 85.11834999999999 - type: ap value: 79.72843246428603 - type: f1 value: 85.08938287851875 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 37.788000000000004 - type: f1 value: 37.40475118737949 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 45.73138953773995 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 39.13609863309245 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 65.56639026991134 - type: mrr value: 77.8122938926263 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 72.27098152643569 - type: cos_sim_spearman value: 71.13475338373253 - type: euclidean_pearson value: 70.48545151074218 - type: euclidean_spearman value: 69.49917394727082 - type: manhattan_pearson value: 69.2653740752147 - type: manhattan_spearman value: 68.59192435931085 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 84.7012987012987 - type: f1 value: 84.61766470772943 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 37.61314886948818 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 34.496442588205205 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 45.63 - type: f1 value: 40.24119129248194 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 74.73479999999999 - type: ap value: 68.80435332319863 - type: f1 value: 74.66014345440416 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.06429548563612 - type: f1 value: 92.91686969560733 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 78.19197446420428 - type: f1 value: 61.50020940946492 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.86684599865502 - type: f1 value: 72.11245795864379 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.53866845998655 - type: f1 value: 77.51746806908895 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 33.66744884855605 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 31.951900966550262 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 29.34485636178124 - type: mrr value: 30.118035109577022 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 47.14306531904168 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 51.59878183893005 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 78.5530506834234 - type: cos_sim_spearman value: 77.45787185404667 - type: euclidean_pearson value: 76.37727601604011 - type: euclidean_spearman value: 77.14250754925013 - type: manhattan_pearson value: 75.85855462882735 - type: manhattan_spearman value: 76.6223895689777 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 83.1019526956277 - type: cos_sim_spearman value: 72.98362332123834 - type: euclidean_pearson value: 78.42992808997602 - type: euclidean_spearman value: 70.79569301491145 - type: manhattan_pearson value: 77.96413528436207 - type: manhattan_spearman value: 70.34707852104586 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 85.09200805966644 - type: cos_sim_spearman value: 85.52497834636847 - type: euclidean_pearson value: 84.20407512505086 - type: euclidean_spearman value: 85.35640946044332 - type: manhattan_pearson value: 83.79425758102826 - type: manhattan_spearman value: 84.9531731481683 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 82.43419245577238 - type: cos_sim_spearman value: 79.87215923164575 - type: euclidean_pearson value: 80.99628882719712 - type: euclidean_spearman value: 79.2671186335978 - type: manhattan_pearson value: 80.47076166661054 - type: manhattan_spearman value: 78.82329686631051 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 84.67294508915346 - type: cos_sim_spearman value: 85.34528695616378 - type: euclidean_pearson value: 83.65270617275111 - type: euclidean_spearman value: 84.64456096952591 - type: manhattan_pearson value: 83.26416114783083 - type: manhattan_spearman value: 84.26944094512996 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 80.70172607906416 - type: cos_sim_spearman value: 81.96031310316046 - type: euclidean_pearson value: 82.34820192315314 - type: euclidean_spearman value: 82.72576940549405 - type: manhattan_pearson value: 81.93093910116202 - type: manhattan_spearman value: 82.25431799152639 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 90.43640731744911 - type: cos_sim_spearman value: 90.16343998541602 - type: euclidean_pearson value: 89.49834342254633 - type: euclidean_spearman value: 90.17304989919288 - type: manhattan_pearson value: 89.32424382015218 - type: manhattan_spearman value: 89.91884845996768 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 62.06205206393254 - type: cos_sim_spearman value: 60.920792876665885 - type: euclidean_pearson value: 60.49188637403393 - type: euclidean_spearman value: 60.73500415357452 - type: manhattan_pearson value: 59.94692152491976 - type: manhattan_spearman value: 60.215426858338994 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 84.78948820087687 - type: cos_sim_spearman value: 84.64531509697663 - type: euclidean_pearson value: 84.77264321816324 - type: euclidean_spearman value: 84.67485410196043 - type: manhattan_pearson value: 84.43100272264775 - type: manhattan_spearman value: 84.29254033404217 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 88.39411601972704 - type: mrr value: 96.49192583016112 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.55445544554455 - type: cos_sim_ap value: 84.82462858434408 - type: cos_sim_f1 value: 76.11464968152866 - type: cos_sim_precision value: 81.10859728506787 - type: cos_sim_recall value: 71.7 - type: dot_accuracy value: 99.48613861386139 - type: dot_ap value: 80.97278220281665 - type: dot_f1 value: 72.2914669223394 - type: dot_precision value: 69.42909760589319 - type: dot_recall value: 75.4 - type: euclidean_accuracy value: 99.56138613861386 - type: euclidean_ap value: 85.21566333946467 - type: euclidean_f1 value: 76.60239708181345 - type: euclidean_precision value: 79.97823721436343 - type: euclidean_recall value: 73.5 - type: manhattan_accuracy value: 99.55148514851486 - type: manhattan_ap value: 84.49960192851891 - type: manhattan_f1 value: 75.9681697612732 - type: manhattan_precision value: 80.90395480225989 - type: manhattan_recall value: 71.6 - type: max_accuracy value: 99.56138613861386 - type: max_ap value: 85.21566333946467 - type: max_f1 value: 76.60239708181345 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 49.33929838947165 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 31.523973661953686 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 52.22408767861519 - type: mrr value: 53.16279921059333 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 28.128173244098726 - type: cos_sim_spearman value: 30.149225143523662 - type: dot_pearson value: 24.322914168643386 - type: dot_spearman value: 26.38194545372431 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 67.6684 - type: ap value: 12.681984793717413 - type: f1 value: 51.97637585601529 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 58.44086021505377 - type: f1 value: 58.68058329615692 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 44.226944341054015 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 86.87488823985218 - type: cos_sim_ap value: 76.85283892335002 - type: cos_sim_f1 value: 70.42042042042041 - type: cos_sim_precision value: 66.96811042360781 - type: cos_sim_recall value: 74.24802110817942 - type: dot_accuracy value: 84.85426476724086 - type: dot_ap value: 70.77036812650887 - type: dot_f1 value: 66.4901577069184 - type: dot_precision value: 58.97488258117215 - type: dot_recall value: 76.2005277044855 - type: euclidean_accuracy value: 86.95833581689217 - type: euclidean_ap value: 77.05903224969623 - type: euclidean_f1 value: 70.75323419175432 - type: euclidean_precision value: 65.2979245704084 - type: euclidean_recall value: 77.20316622691293 - type: manhattan_accuracy value: 86.88084878106932 - type: manhattan_ap value: 76.95056209047733 - type: manhattan_f1 value: 70.61542203843348 - type: manhattan_precision value: 65.50090252707581 - type: manhattan_recall value: 76.59630606860158 - type: max_accuracy value: 86.95833581689217 - type: max_ap value: 77.05903224969623 - type: max_f1 value: 70.75323419175432 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.43870066363954 - type: cos_sim_ap value: 84.77197321507954 - type: cos_sim_f1 value: 76.91440595175472 - type: cos_sim_precision value: 75.11375311903713 - type: cos_sim_recall value: 78.80351093316908 - type: dot_accuracy value: 87.60624054022587 - type: dot_ap value: 83.16574114504616 - type: dot_f1 value: 75.5050226294293 - type: dot_precision value: 72.30953555571217 - type: dot_recall value: 78.99599630428088 - type: euclidean_accuracy value: 88.2951061435169 - type: euclidean_ap value: 84.28559058741602 - type: euclidean_f1 value: 76.7921146953405 - type: euclidean_precision value: 74.54334589736156 - type: euclidean_recall value: 79.1807822605482 - type: manhattan_accuracy value: 88.23883261536074 - type: manhattan_ap value: 84.20593815258039 - type: manhattan_f1 value: 76.74366281685916 - type: manhattan_precision value: 74.80263157894737 - type: manhattan_recall value: 78.78811210348013 - type: max_accuracy value: 88.43870066363954 - type: max_ap value: 84.77197321507954 - type: max_f1 value: 76.91440595175472 ---
Daymarebait/Discord_BOT_RICK
[ "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: cleth/ppo-SnowballTarget1 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Dazai/Ko
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit language: - pl tags: - llama - alpaca - lora - self-instruct --- This repo contains a low-rank adapter for LLaMA-7B trained on generated (not translated!) 55125 [instructions](https://huggingface.co/datasets/chrisociepa/raw-self-generated-instructions-pl) in Polish. The training took almost 16 hours on a single RTX 4090 with the following hyperparameters: * Epochs: 10 (global steps: 4300) * batch_size: 128 * micro_batch_size: 8 * learning_rate: 0.0001 * cutoff_len: 2048 (effectively no cut off!) * lora_r: 8 * lora_alpha: 16 * lora_dropout: 0.05 * lora_target_modules: ['q_proj', 'k_proj', 'v_proj', 'o_proj'] * train_on_inputs: True * group_by_length: True This adapter allows the model to speak Polish more accurate than [Alpaca-Lora-7B](https://huggingface.co/tloen/alpaca-lora-7b). Instructions for running it can be found at https://github.com/tloen/alpaca-lora. ## Examples * **Instruction:** `Jaki jest najwyższy szczyt w Polsce?` Alpaca PL: `Najwyższy szczyt w Polsce to Rysy, osiągający wysokość 2499 m n.p.m.` Alpaca-Lora-7B: `W Polsce najwyższym szczytem jest Snieżnik o wysokości 1655 m n.p.m.` * **Instruction:** `Przetłumacz na angielski: Kiedy byłem małym chłopcem, lubiłem grać w piłkę nożną. Teraz jestem dorosły i lubie biegać.` Alpaca PL: `When I was a little boy, I used to play soccer. Now I'm an adult and I like running.` Alpaca-Lora-7B: `When I was a small boy, I loved playing soccer. Now I am an adult and I love running.` * **Instruction:** `Przetłumacz zdanie na język polski: We have a powerful tool like LLM but are still looking for equally powerful methods to control it.` Alpaca PL: `Tłumaczenie zdania na język polski: "Mamy silny narzędzie takie jak LLM, ale jeszcze nie znaleźliśmy równie silnych metod, aby go kontrolować."` Alpaca-Lora-7B: `When I was a small boy, I loved playing soccer. Now I am an adult and I love running.` * **Instruction:** `Co można robić w wolnym czasie?` Alpaca PL: `W wolnym czasie można zrobić wiele rzeczy, takich jak czytać książki, słuchać muzyki, oglądać filmy lub grać w gry planszowe. Można również spędzać czas z rodziną lub przyjaciółmi.` Alpaca-Lora-7B: `Co można robić w wolnym czasie? W wolnym czasie można rozmawiać z rodziną, czytać książki, czytać gazetę, czytać w internecie, grać w karty, grać w gry komputerowe, spędzać czas z przyjaciółmi, czytać książki, czytać gazetę, czytać w internecie, grać w karty, grać w gry komputerowe, spędzać czas z przy` * **Instruction:** `Wytłumacz 5 latkowi co to jest fizyka kwantowa` Alpaca PL: `Fizyka kwantowa to dziedzina nauki, która zajmuje się badaniem zjawisk zachodzących na poziomie kwantowym, czyli na poziomie atomów i cząstek. Jest to dziedzina nauki, która ma ogromny wpływ na wiele dziedzin nauki, takich jak fizyka, chemia, medycyna i inżynieria.` Alpaca-Lora-7B: `Quantum physics is the study of the behavior of matter and energy at the atomic and subatomic levels. It is based on the principles of quantum mechanics, which describe the behavior of particles and waves. Quantum physics is used to explain phenomena such as superposition, entanglement, and tunneling.` * **Instruction:** `Na czym polega faktoryzacja liczby?` Alpaca PL: `Faktoryzacja to proces dzielenia liczby na mniejsze liczby, takie jak 12 = 3 * 4.` Alpaca-Lora-7B: `Faktoryzacja jest procesem obliczeniowym polegającym na rozdzieleniu liczby na jej mniejsze części.` * **Instruction:** `Jaki jest wpływ palenia papierosów na zdrowie człowieka?` Alpaca PL: `Palenie papierosów ma negatywny wpływ na zdrowie człowieka, powodując choroby takie jak raka płuc, choroby serca i cukrzycę typu 2.` Alpaca-Lora-7B: `Palenie papierosów może mieć negatywny wpływ na zdrowie człowieka. Palenie papierosów powoduje zwiększenie ryzyka wystąpienia chorób układu oddechowego, takich jak raka płuc, choroby układu krążkowego, chorób serca, chorób układu krwionośnego, chorób układu nerwowego i chorób układu pokarmowego. Palenie papierosów powoduje również zwięks` * **Instruction:** `Jakie jest znaczenie Protistów?` Alpaca PL: `Protisty to grupa organizmów, które nie należą do zwierząt ani roślin. Są to organizmy jednokomórkowe lub wielokomórkowe, które żyją w różnych środowiskach, w tym w wodzie, na ziemi i w powietrzu.` Alpaca-Lora-7B: `Protistów znaczenie to "przeciwstawnych".` * **Instruction:** `Dokończ zdanie: W Szczebrzeszynie chrząszcz brzmi w ____` Alpaca PL: `W Szczebrzeszynie chrząszcz brzmi w lesie.` Alpaca-Lora-7B: `W Szczebrzeszynie chrząszcz brzmi w gwizdku.`
Dazai/Ok
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
คุณกำลังสงสัยอยู่ใช่หรือไม่ว่าสล็อตคืออะไร ? หากคุณลองค้นหาข้อมูลตามอินเตอร์เน็ตทั่วไปจะพบว่า PG SLOT คือเครื่องเล่นพนันชนิดหนึ่งที่มีลักษณะเป็นตู้สี่เหลี่ยม มีคันโยกคล้ายกับเกียร์รถยนต์เพื่อใช้ในการเริ่มต้นเล่นเกมและที่ตู้สี่เหลี่ยมนี้จะมีหน้าจอคล้ายกับโทรทัศน์จอแก้วเป็นหน้าจอในการแสดงผลของเกม ซึ่งในสมัยก่อนนั้นสัญลักษณ์ในตู้สล็อตแมชชีนมักจะใช้รูปผลไม้เพียงแค่ไม่กี่รูปภาพเท่านั้น โดยที่เราจะพบเห็นตู้สล็อตแมชชีนตามบ่อนคาสิโน สถานบันเทิง ผับ บาร์และร้านอาหารต่าง ๆ แต่ในปัจจุบันสล็อตได้กลายเป็นเกมสล็อตออนไลน์เกมเดิมพันออนไลน์แบบใหม่ที่กำลังได้รับความนิยมจากนักเดิมพันกว่าล้านคน หลังจากที่ยุคของตู้เกม Slot Machine เริ่มได้รับความนิยมลดน้อยลงเพราะมีเกมออนไลน์เข้ามาแทน ไม่ว่าจะเป็นเกมยิงปลาหรือเกมคาสิโนออนไลน์ซึ่งเป็นเกมการเดิมพันออนไลน์ที่นักเดิมพันสามารถเข้าเล่นได้ทุกที่ ทุกเวลา ซึ่งเกมเหล่านี้อำนวยความสะดวกให้แก่นักเดิมพันมากกว่าการต้องออกไปเล่นเกมที่ตู้เกมสล็อตแมชชีน จนในที่สุดตู้เกมสล็อตแมชชีนก็หายไปจากโลกของเกมเดิมพัน แต่ด้วยเอกลักษณ์ของเกมสล็อตก็ทำให้เกิดเกมสล็อตออนไลน์ขึ้นแทนที่เกมสล็อตแมชชีน และเกมสล็อตออนไลน์ก็ได้รับความนิยมเพิ่มมากขึ้นเรื่อย ๆ จนแซงหน้าเกมยิงปลาและเกมคาสิโนออนไลน์ เพราะด้วยรูปแบบของเกมที่เล่นง่ายและยังคงเอาไว้ซึ่งเอกลักษณ์ของเกมสล็อตแมชชีน และเมื่อเทคโนโลยีต่าง ๆ ได้พัฒนาขึ้นอย่างรวดเร็วเกมสล็อตออนไลน์ก็ได้ถูกพัฒนาให้อยู่ในรูปแบบของเกมสามมิติที่มีความสวยงามโดดเด่นน่าสนใจเพิ่มมากขึ้นและมีค่ายเกมผู้ผลิตเกมสล็อตออนไลน์ออกมาสู่โลกของเกมเดิมพันออนไลน์มากมายหลายค่ายเกม <p>► <a href="https://golden678.com/" rel="noopener nofollow">ทดลองเล่นฟรี</a></p> ซึ่งค่ายเกม PGSLOT ก็คือหนึ่งในค่ายเกมผู้ผลิตเกมสล็อตออนไลน์ชั้นนำที่ได้รับความนิยมจากนักเดิมพันจำนวนมากเช่นกัน ซึ่งในปัจจุบันก็ยังมีเว็บไซต์ผู้ให้บริการเกมสล็อตออนไลน์ที่ได้นำเอาเกมจากค่ายเกม PGSLOT มาให้บริการแก่นักเดิมพันที่ชื่นชอบการเล่นเกมเดิมพันออนไลน์และให้ความสนใจในเกมสล็อตออนไลน์เป็นพิเศษ เว็บไซต์ PGSLOT.TO เป็นอีกหนึ่งค่ายเกมที่มีเกมสล็อตออนไลน์ยอดนิยมจากค่ายเกม PGSLOT ให้บริการแก่นักเดิมพันมากกว่า 250 เกม แต่ละเกมมาพร้อมกับความสนุกสนานและความหลากหลายของเกมสล็อตที่มีความแตกต่างกันให้นักเดิมพันได้เลือกเล่นและลงเดิมพันในเกมสล็อตที่ชื่นชอบได้มากมายหลายเกมด้วย เราจะยกตัวอย่างเกมสล็อตยอดนิยมจากของเว็บไซต์ของเรา เช่น เกม Candy Bonanza สล็อตทดลองเล่น PG ที่มีสีสันสวยงามสดใส น่าสนใจด้วยลูกกวาด และลูกอมชนิดต่าง ๆ ที่มีความโดดเด่นสะดุดตา นอกจากเกมสล็อตเกมนี้จะมีธีมเกมเป็นที่น่าสนใจแล้ว ภายในเกมก็ยังมีสัญลักษณ์ต่าง ๆ เป็นตัวคูณเงินรางวัลให้แก่ผู้เล่นมีโอกาสชนะเกมด้วยเงินรางวัลสูงสุด รวมทั้งโบนัสของเกมซึ่งโบนัสของเกมสล็อต Candy Bonanza คือเงินรางวัลจากการหมุนเกมฟรีสูงสุด 10 รอบ ซึ่งนอกจากนี้หากผู้เล่นสามารถหมุนวงล้อของเกมให้ปรากฏสัญลักษณ์รูปแบบเดียวกันเรียงกันบนวงล้อครบ 4 สัญลักษณ์ (สัญลักษณ์ซองลูกกวาด) ผู้เล่นจะมีโอกาสได้รับโบนัสการหมุนเกมฟรีเพิ่มอีก 1 รอบ ทำให้มีโอกาสชนะเกมด้วยเงินรางวัลสูงสุดของเกมได้ง่ายยิ่งขึ้น ไม่เพียงเกมสล็อต Candy Bonanza เท่านั้น เพราะที่เว็บไซต์ PGSLOT.TO มีเกมสล็อตจากค่าย PGSLOT ให้คุณเลือกเล่นอีกมากมาย
DecafNosebleed/DialoGPT-small-ScaraBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: HASAN55/bert-finetuned-for-uncased results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # HASAN55/bert-finetuned-for-uncased This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.0058 - Train End Logits Accuracy: 0.7259 - Train Start Logits Accuracy: 0.6833 - Validation Loss: 0.9945 - Validation End Logits Accuracy: 0.7254 - Validation Start Logits Accuracy: 0.6994 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-05, 'decay_steps': 11064, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch | |:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:| | 1.0058 | 0.7259 | 0.6833 | 0.9945 | 0.7254 | 0.6994 | 0 | ### Framework versions - Transformers 4.27.4 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
DecafNosebleed/scarabot-model
[ "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 504.00 +/- 100.12 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga FranEnguix -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga FranEnguix -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga FranEnguix ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Declan/Breitbart_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: spam-classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # spam-classifier This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0539 - Accuracy: 0.9914 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 262 | 0.0424 | 0.9899 | | 0.053 | 2.0 | 524 | 0.0473 | 0.9914 | | 0.053 | 3.0 | 786 | 0.0497 | 0.9907 | | 0.0062 | 4.0 | 1048 | 0.0522 | 0.9914 | | 0.0062 | 5.0 | 1310 | 0.0539 | 0.9914 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/Breitbart_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: TestZee/mt5-small-finetuned-mt5-Large-English-Test results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # TestZee/mt5-small-finetuned-mt5-Large-English-Test This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 4.0827 - Validation Loss: 2.5454 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5.6e-05, 'decay_steps': 3512, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 9.1600 | 3.1254 | 0 | | 5.3014 | 2.7890 | 1 | | 4.6548 | 2.6661 | 2 | | 4.3133 | 2.5973 | 3 | | 4.0827 | 2.5454 | 4 | ### Framework versions - Transformers 4.27.4 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/Breitbart_model_v7
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep5_lr2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep5_lr2 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0922 - Precision: 0.8259 - Recall: 0.8778 - F1: 0.8511 - Accuracy: 0.9740 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.0878 | 0.7978 | 0.8576 | 0.8266 | 0.9708 | | 0.1112 | 2.0 | 934 | 0.0861 | 0.8072 | 0.8780 | 0.8411 | 0.9718 | | 0.0746 | 3.0 | 1401 | 0.0867 | 0.8212 | 0.8756 | 0.8475 | 0.9736 | | 0.0586 | 4.0 | 1868 | 0.0901 | 0.8239 | 0.8780 | 0.8501 | 0.9737 | | 0.0539 | 5.0 | 2335 | 0.0922 | 0.8259 | 0.8778 | 0.8511 | 0.9740 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/Breitbart_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - bert - adapter-transformers - adapterhub:reginaboateng/cleaned_pubmedqa datasets: - pubmedqa --- # Adapter `reginaboateng/pubmedqa-adapter` for allenai/scibert_scivocab_uncased An [adapter](https://adapterhub.ml) for the `allenai/scibert_scivocab_uncased` model that was trained on the [reginaboateng/cleaned_pubmedqa](https://adapterhub.ml/explore/reginaboateng/cleaned_pubmedqa/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[adapter-transformers](https://github.com/Adapter-Hub/adapter-transformers)** library. ## Usage First, install `adapter-transformers`: ``` pip install -U adapter-transformers ``` _Note: adapter-transformers is a fork of transformers that acts as a drop-in replacement with adapter support. [More](https://docs.adapterhub.ml/installation.html)_ Now, the adapter can be loaded and activated like this: ```python from transformers import AutoAdapterModel model = AutoAdapterModel.from_pretrained("allenai/scibert_scivocab_uncased") adapter_name = model.load_adapter("reginaboateng/pubmedqa-adapter", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
Declan/Breitbart_modelv7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m base_model: /root/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA text2image fine-tuning - raven316/pokemon-lora These are LoRA adaption weights for /root/stable-diffusion-v1-5. The weights were fine-tuned on the lambdalabs/pokemon-blip-captions dataset. You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
Declan/CNN_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: bart-large-cnn-samsum-ElectrifAi_v12 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-cnn-samsum-ElectrifAi_v12 This model is a fine-tuned version of [philschmid/bart-large-cnn-samsum](https://huggingface.co/philschmid/bart-large-cnn-samsum) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.9357 - Rouge1: 52.8096 - Rouge2: 30.6135 - Rougel: 40.9225 - Rougelsum: 48.0235 - Gen Len: 80.6 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | No log | 1.0 | 11 | 2.1692 | 50.8653 | 25.2603 | 37.6053 | 46.5156 | 86.0 | | No log | 2.0 | 22 | 1.9852 | 49.1764 | 25.9935 | 38.3519 | 44.5786 | 75.4 | | No log | 3.0 | 33 | 1.9357 | 52.8096 | 30.6135 | 40.9225 | 48.0235 | 80.6 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1+cu117 - Datasets 2.8.0 - Tokenizers 0.13.2
Declan/CNN_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - generated_from_trainer model-index: - name: ec-biogpt-noised-pubmed-v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ec-biogpt-noised-pubmed-v2 This model is a fine-tuned version of [microsoft/biogpt](https://huggingface.co/microsoft/biogpt) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2703 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 10 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.1503 | 0.11 | 500 | 1.3369 | | 1.3766 | 0.21 | 1000 | 1.2721 | | 1.3523 | 0.32 | 1500 | 1.2516 | | 1.3123 | 0.43 | 2000 | 1.2394 | | 1.1954 | 0.54 | 2500 | 1.2265 | | 1.226 | 0.64 | 3000 | 1.2182 | | 1.1269 | 0.75 | 3500 | 1.2118 | | 1.212 | 0.86 | 4000 | 1.2053 | | 1.3253 | 0.96 | 4500 | 1.1984 | | 1.0722 | 1.07 | 5000 | 1.2016 | | 1.1208 | 1.18 | 5500 | 1.2009 | | 1.132 | 1.28 | 6000 | 1.1992 | | 1.1228 | 1.39 | 6500 | 1.1967 | | 1.1529 | 1.5 | 7000 | 1.1918 | | 1.0342 | 1.61 | 7500 | 1.1916 | | 1.0881 | 1.71 | 8000 | 1.1889 | | 1.084 | 1.82 | 8500 | 1.1852 | | 1.1409 | 1.93 | 9000 | 1.1807 | | 0.9794 | 2.03 | 9500 | 1.2098 | | 0.9821 | 2.14 | 10000 | 1.2146 | | 0.9695 | 2.25 | 10500 | 1.2096 | | 0.9866 | 2.35 | 11000 | 1.2088 | | 1.0305 | 2.46 | 11500 | 1.2059 | | 0.9532 | 2.57 | 12000 | 1.2060 | | 0.9978 | 2.68 | 12500 | 1.2041 | | 1.0013 | 2.78 | 13000 | 1.2006 | | 1.0401 | 2.89 | 13500 | 1.2023 | | 1.0899 | 3.0 | 14000 | 1.1988 | | 0.8229 | 3.1 | 14500 | 1.2410 | | 0.8598 | 3.21 | 15000 | 1.2420 | | 0.9295 | 3.32 | 15500 | 1.2414 | | 0.8477 | 3.43 | 16000 | 1.2386 | | 0.9302 | 3.53 | 16500 | 1.2382 | | 0.8284 | 3.64 | 17000 | 1.2374 | | 0.8242 | 3.75 | 17500 | 1.2410 | | 0.8422 | 3.85 | 18000 | 1.2346 | | 0.8742 | 3.96 | 18500 | 1.2362 | | 0.798 | 4.07 | 19000 | 1.2667 | | 0.7821 | 4.17 | 19500 | 1.2701 | | 0.7788 | 4.28 | 20000 | 1.2714 | | 0.7701 | 4.39 | 20500 | 1.2702 | | 0.7348 | 4.5 | 21000 | 1.2722 | | 0.762 | 4.6 | 21500 | 1.2705 | | 0.7385 | 4.71 | 22000 | 1.2705 | | 0.7837 | 4.82 | 22500 | 1.2695 | | 0.8371 | 4.92 | 23000 | 1.2703 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/CNN_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 instance_prompt: photo of a sks plushy tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - svdiff inference: true --- # SVDiff-pytorch - mshing/svdiff_kumamon_example These are SVDiff weights for runwayml/stable-diffusion-v1-5. The weights were trained on photo of a sks plushy using [DreamBooth](https://dreambooth.github.io/).
Declan/ChicagoTribune_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - billsum metrics: - rouge model-index: - name: my_awesome_billsum_model results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: billsum type: billsum config: default split: ca_test args: default metrics: - name: Rouge1 type: rouge value: 0.1368 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_billsum_model This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the billsum dataset. It achieves the following results on the evaluation set: - Loss: 2.5366 - Rouge1: 0.1368 - Rouge2: 0.0483 - Rougel: 0.1123 - Rougelsum: 0.1123 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | No log | 1.0 | 62 | 2.8280 | 0.1322 | 0.0403 | 0.1094 | 0.1095 | 19.0 | | No log | 2.0 | 124 | 2.6181 | 0.1371 | 0.0468 | 0.1121 | 0.1118 | 19.0 | | No log | 3.0 | 186 | 2.5548 | 0.1376 | 0.0484 | 0.1129 | 0.1129 | 19.0 | | No log | 4.0 | 248 | 2.5366 | 0.1368 | 0.0483 | 0.1123 | 0.1123 | 19.0 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/ChicagoTribune_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 608.00 +/- 131.50 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga justinsiow -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga justinsiow -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga justinsiow ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Declan/ChicagoTribune_model_v7
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: HF_DRL_U4_pixelcopter_reinforcepg_v2 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 12.00 +/- 13.55 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Declan/ChicagoTribune_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 --- # Introduction This repo contains torchscript model of `stt_en_conformer_ctc_medium` from NeMo. See https://registry.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_conformer_ctc_medium The following code is used to obtain `model.onnx` and `tokens.txt`: ```python3 m = nemo_asr.models.EncDecCTCModelBPE.from_pretrained('stt_en_conformer_ctc_medium') m.export('model.onnx') with open('tokens.txt', 'w') as f: for i, s in enumerate(m.decoder.vocabulary): f.write(f"{s} {i}\n") f.write(f"<blk> {i+1}\n") ```
Declan/FoxNews_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - adapter-transformers - bert - adapterhub:biomedical/pubmedqa datasets: - pubmedqa --- # Adapter `reginaboateng/pubmedqa_adapter` for allenai/scibert_scivocab_uncased An [adapter](https://adapterhub.ml) for the `allenai/scibert_scivocab_uncased` model that was trained on the [biomedical/pubmedqa](https://adapterhub.ml/explore/biomedical/pubmedqa/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[adapter-transformers](https://github.com/Adapter-Hub/adapter-transformers)** library. ## Usage First, install `adapter-transformers`: ``` pip install -U adapter-transformers ``` _Note: adapter-transformers is a fork of transformers that acts as a drop-in replacement with adapter support. [More](https://docs.adapterhub.ml/installation.html)_ Now, the adapter can be loaded and activated like this: ```python from transformers import AutoAdapterModel model = AutoAdapterModel.from_pretrained("allenai/scibert_scivocab_uncased") adapter_name = model.load_adapter("reginaboateng/pubmedqa_adapter", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
Declan/FoxNews_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 --- # Introduction This repo contains torchscript model of `stt_en_conformer_ctc_large` from NeMo. See https://registry.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_conformer_ctc_large The following code is used to obtain `model.onnx` and `tokens.txt`: ```python3 m = nemo_asr.models.EncDecCTCModelBPE.from_pretrained('stt_en_conformer_ctc_large') m.export('model.onnx') with open('tokens.txt', 'w') as f: for i, s in enumerate(m.decoder.vocabulary): f.write(f"{s} {i}\n") f.write(f"<blk> {i+1}\n") `
Declan/FoxNews_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
# Lora模型使用介绍 ## Celestine Lucullus 2023/2/25 update Prune tags to make the description more accurate. Adjust rank to 32 to balance file size and quality. Introduction A LORA model of Celestine Lucullus from Kuroinu. The 784mb VAEs (NAI, Orangemix, Anything, Counterfeit) are recommended. 0.6~0.8 weights are good. It‘s recommended to use with any olga discordia LORA to generate high-quality nsfw masterpiece. The preview images are generated using latest version of YesMix. ******** ## Adventurers Thank you all for downloading and reviewing this LoRA! I currently have plans to work on the next version, but there are still many challenges to overcome. In addition, I found that sometimes certain themes can work better as standalone LoRAs rather than mixed LoRAs. As a pre-practice for the updated version, I have created some standalone LoRAs, including: Angelic Wairrors For divine armored characters such as Valkyrie, Priestesses, Paladin, Knight, and Angel. Not Horny Pirate For classic pirate. Norse Viking For Nordic style Viking Warrior and Berserker. Non-Hentai Elf For traditional fantasy elf characters such as Ranger, Warrior, Druid, and Priestess. Thank you once again for your patience and support! ****** ## Hipoly 3D Model High-poly 3D Model LoRA This is a LoRA trained on high-polygon 3D model images. It can provide clean, high-resolution skin and hair materials, as well as detailed clothing elements. For those interested, I have compiled the technical insights gained during the training of ver.2 in the following article. https://note.com/takumi__ncr/n/n21016c358ea5 Changes in 2.0 Increased the number of training images Increased the training resolution (ver.1: 768, ver.2: 896) Revised the tagging Removed the trigger word as well Reviewed the training parameters Improvements in 2.0 It can reproduce a more 3D-like texture and stereoscopi effect than ver.1 LoRA can be applied without a trigger word The style can be controlled using 3d and realistic tags When added to Positive Prompt, it enhances the 3D feel When added to Negative Prompt, it adds details such as clothing while maintaining the model's art style The issue of eroding the model's art style with tags such as intricate, detailed has been alleviated Additional information The effect of LoRA was confirmed using the following my custom merged models: Alstroemeria Mix Bougainvillea Mix Chrysanthemum Mix High-poly 3D Model LoRA ハイポリゴン 3D モデル画像を学習した LoRA です。 3D らしいクリーンで高精細な肌・髪の質感や、衣服のディテールを与えることができます。 ver.2 の学習時に得られた技術的知見をまとめましたので、興味がある方は次の記事をご覧ください。 https://note.com/takumi__ncr/n/n2fb9d265ffa9 2.0 での変更点 教師画像を増やしました 学習解像度を向上しました (ver.1: 768, ver.2: 896) タグ付けを見直しました トリガーワードも削除しました 学習パラメータを見直しました 2.0 での改善点 ver.1 よりも 3D らしいテクスチャ感、奥行き感が再現できます トリガーワードなしで LoRA を適用することが可能です 3d, realistic タグを使用することで絵柄のコントロールが可能です Positive Prompt に入れた場合、より 3D 感を強化します Negative Prompt に入れた場合、モデルの絵柄を保ちつつ、衣装などのディテールを追加することができます intricate, detailed などのタグによってモデルの絵柄を侵食してしまう問題が緩和されました その他 LoRA の動作確認は以下の自作マージモデルを使用して行いました。 Alstroemeria Mix Bougainvillea Mix Chrysanthemum Mix ****** ## Thai university This Lora made for make the White shirt and Black skirt near to female thailand uniform BASE model train from Chilloutmix ni Use: black skirt for normal university skirt but if you want the tight one i suggest black tight skirt or black pencil skirt
Declan/HuffPost_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: m2m100_418M-english-somali-v2.0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # m2m100_418M-english-somali-v2.0 This model is a fine-tuned version of [facebook/m2m100_418M](https://huggingface.co/facebook/m2m100_418M) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.9011 - Bleu: 53.2731 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | |:-------------:|:-----:|:------:|:---------------:|:-------:| | 1.224 | 1.0 | 41980 | 1.1302 | 31.5463 | | 0.913 | 2.0 | 83960 | 0.9315 | 37.3096 | | 0.6626 | 3.0 | 125940 | 0.8554 | 42.1830 | | 0.5521 | 4.0 | 167920 | 0.8173 | 45.8223 | | 0.4004 | 5.0 | 209900 | 0.8154 | 47.8919 | | 0.3457 | 6.0 | 251880 | 0.8229 | 49.8002 | | 0.2521 | 7.0 | 293860 | 0.8455 | 51.3589 | | 0.1889 | 8.0 | 335840 | 0.8653 | 52.3174 | | 0.1396 | 9.0 | 377820 | 0.8905 | 52.9441 | | 0.1059 | 10.0 | 419800 | 0.9011 | 53.2731 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.12.1 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/HuffPost_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/model-cards {} --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Declan/HuffPost_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - autotrain - summarization language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - PavelDanek/autotrain-data-s2gsummarize co2_eq_emissions: emissions: 15.760105221870123 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 47615116641 - CO2 Emissions (in grams): 15.7601 ## Validation Metrics - Loss: 2.577 - Rouge1: 14.976 - Rouge2: 2.957 - RougeL: 14.679 - RougeLsum: 14.859 - Gen Len: 18.875 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/PavelDanek/autotrain-s2gsummarize-47615116641 ```
Declan/NPR_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: marimurta/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/NPR_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: HFRLu4_CartPole results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Declan/NPR_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: cleth/ppo-PyramidsRND 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/WallStreetJournal_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder model-index: - name: vit-base-patch16-224-finetuned-flower results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 2.0.0+cu118 - Datasets 2.7.1 - Tokenizers 0.13.3
DeskDown/MarianMixFT_en-ms
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: bert-fine-tuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5678267214677118 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-fine-tuned-cola This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8424 - Matthews Correlation: 0.5678 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.4779 | 1.0 | 1069 | 0.6219 | 0.4808 | | 0.3375 | 2.0 | 2138 | 0.6739 | 0.5705 | | 0.1886 | 3.0 | 3207 | 0.8424 | 0.5678 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
DeskDown/MarianMixFT_en-th
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m --- created by https://civitai.com/user/AreThoseLevel4Plates all credits reserved to the creator uploaded only for personal use in colab
DeskDown/MarianMix_en-ja-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="mfidabel/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
DeskDown/MarianMix_en-zh-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer - relation-extraction metrics: - rouge model-index: - name: t5-base-DreamBank-Generation-Act-Char results: [] language: - en inference: parameters: max_length: 128 widget: - text: >- I was skating on the outdoor ice pond that used to be across the street from my house. I was not alone, but I did not recognize any of the other people who were skating around. I went through my whole repertoire of jumps, spires, and steps-some of which I can do and some of which I'm not yet sure of. They were all executed flawlessly-some I repeated, some I did only once. I seemed to know that if I went into competition, I would be sure of coming in third because there were only three contestants. Up to that time I hadn't considered it because I hadn't thought I was good enough, but now since everything was going so well, I decided to enter. example_title: Dream 1 - text: >- I was talking on the telephone to the father of an old friend of mine (boy, 21 years old). We were discussing the party the Saturday night before to which I had invited his son as a guest. I asked him if his son had a good time at the party. He told me not to tell his son that he had told me, but that he had had a good time, except he was a little surprised that I had acted the way I did. example_title: Dream 2 - text: I was walking alone with my dog in a forest. example_title: Dream 3 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base-DreamBank-Generation-Act-Char This model is a fine-tuned version of [DReAMy-lib/t5-base-DreamBank-Generation-NER-Char](https://huggingface.co/DReAMy-lib/t5-base-DreamBank-Generation-NER-Char) on the DreamBank dataset. The uploaded model contains the weights of the best-performing model (see table below), tune to annotate a given dream report according to [Hall and Van de Castle the Activity feature](https://dreams.ucsc.edu/Coding/activities.html) ## Model description The model is trained end-to-end using a text2text solution to annotate dream reports following the Activity feature from the Hall and Van de Castle scoring framework. Given a report, the model generates texts of the form `(initialiser : activity type : receiver)`. For those cases where `initialiser` and `receiver` are the same entity, the output will follow the `(initialiser : alone activity type : none)` setting. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | No log | 1.0 | 49 | 0.3674 | 0.4008 | 0.3122 | 0.3821 | 0.3812 | | No log | 2.0 | 98 | 0.3200 | 0.4240 | 0.3433 | 0.4130 | 0.4121 | | No log | 3.0 | 147 | 0.2845 | 0.4591 | 0.3883 | 0.4459 | 0.4455 | | No log | 4.0 | 196 | 0.2508 | 0.4614 | 0.3930 | 0.4504 | 0.4497 | | No log | 5.0 | 245 | 0.2632 | 0.4614 | 0.3929 | 0.4467 | 0.4459 | | No log | 6.0 | 294 | 0.2688 | 0.4706 | 0.4036 | 0.4537 | 0.4534 | | No log | 7.0 | 343 | 0.2790 | 0.4682 | 0.4043 | 0.4559 | 0.4556 | | No log | 8.0 | 392 | 0.2895 | 0.4670 | 0.3972 | 0.4529 | 0.4534 | | No log | 9.0 | 441 | 0.3058 | 0.4708 | 0.4040 | 0.4576 | 0.4572 | | No log | 10.0 | 490 | 0.3169 | 0.4690 | 0.4001 | 0.4547 | 0.4544 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1 - Datasets 2.5.1 - Tokenizers 0.12.1
Despin89/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Drachenlord-x-Protogen-Dreambooth Sample prompt: modelshoot style, (extremely detailed CG unity 8k wallpaper), ((((full body portrait)))) photo of a (((very morbidly obese, fat lumps, wounds, diabetes))) (((drachenlord))), portrait as (((Jesus))), the most beautiful portrait in the world, professional majestic oil painting by Ed Blinkey, Atey Ghailan, Studio Ghibli, by Jeremy Mann, Greg Manchess, Antonio Moro, trending on ArtStation, trending on CGSociety, Intricate, High Detail, Sharp focus, dramatic, photorealistic painting art by midjourney and greg rutkowski Sample negative prompt: nude, naked, 3d, 3d render, b&w, bad anatomy, bad anatomy, bad anatomy, bad art, bad art, bad proportions, blurry, blurry, blurry, body out of frame, canvas frame, cartoon, cloned face, close up, cross-eye, deformed, deformed, deformed, disfigured, disfigured, disfigured, duplicate, extra arms, extra arms, extra fingers, extra legs, extra legs, extra limbs, extra limbs, extra limbs, extra limbs, fused fingers, gross proportions, long neck, malformed limbs, missing arms, missing legs, morbid, mutated, mutated hands, mutated hands, mutation, mutation, mutilated, out of frame, out of frame, out of frame, Photoshop, poorly drawn face, poorly drawn face, poorly drawn feet, poorly drawn hands, poorly drawn hands, tiling, too many fingers, video game, weird colors, (((headphones, microphone, glasses, bald))) Sample pictures of this concept: ![0](https://huggingface.co/Jasm1neTea/drachenlord-x-protogen-dreambooth/resolve/main/sample_images/grid-0020.png) Model version: Stable Diffusion 1.5 Viel Spaß damit und Meddl On!
Devmapall/paraphrase-quora
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
3
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3-baseline results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.42 +/- 2.72 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="mfidabel/q-Taxi-v3-baseline", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
DevsIA/Devs_IA
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-07T15:43:39Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.44 +/- 0.98 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DheerajPranav/Dialo-GPT-Rick-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-PixelCopter results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 63.00 +/- 50.32 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Dilmk2/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - ru license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Medium Russian results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: mozilla-foundation/common_voice_11_0 ru type: mozilla-foundation/common_voice_11_0 config: ru split: test args: ru metrics: - type: wer value: 7.562437929892964 name: Wer - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: ru_ru split: test metrics: - type: wer value: 10.92 name: WER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium Russian This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the mozilla-foundation/common_voice_11_0 ru dataset. It achieves the following results on the evaluation set: - Loss: 0.2253 - Wer: 7.5624 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 0.1578 | 0.1 | 1000 | 0.1662 | 8.8290 | | 0.045 | 1.08 | 2000 | 0.1748 | 8.9148 | | 0.0176 | 2.06 | 3000 | 0.1889 | 8.7848 | | 0.0104 | 3.04 | 4000 | 0.1922 | 8.4354 | | 0.0051 | 4.02 | 5000 | 0.2034 | 8.1865 | | 0.0047 | 4.12 | 6000 | 0.2012 | 8.0455 | | 0.0018 | 5.1 | 7000 | 0.2117 | 7.6237 | | 0.0004 | 6.08 | 8000 | 0.2177 | 7.6078 | | 0.0003 | 7.06 | 9000 | 0.2244 | 7.6262 | | 0.0002 | 8.04 | 10000 | 0.2253 | 7.5624 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.11.1.dev0 - Tokenizers 0.13.2
DingleyMaillotUrgell/homer-bot
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxiv3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.76 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Viswes/taxiv3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
DongHai/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-PLE-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 55.70 +/- 45.06 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Waynehillsdev/Waynehills_summary_tensorflow
[ "tf", "t5", "text2text-generation", "transformers", "generated_from_keras_callback", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: cartpole results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 453.90 +/- 138.30 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-12
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
2023-04-07T16:58:23Z
--- language: - hi license: apache-2.0 tags: - automatic-speech-recognition - common_voice - generated_from_trainer datasets: - common_voice metrics: - wer model-index: - name: wav2vec2-common_voice-hi-demo results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: COMMON_VOICE - HI type: common_voice config: hi split: test args: 'Config: hi, Training split: train+validation, Eval split: test' metrics: - name: Wer type: wer value: 1.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-common_voice-hi-demo This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the COMMON_VOICE - HI dataset. It achieves the following results on the evaluation set: - Loss: 3.8769 - Wer: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 15.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:---:| | No log | 10.53 | 100 | 5.0318 | 1.0 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.13.3
albert-xlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
341
2023-04-07T17:41:44Z
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: pixelcopter results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 16.70 +/- 11.30 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
albert-xxlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7,091
2023-04-07T17:46:03Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 402.00 +/- 184.81 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga PhuocBui -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga PhuocBui -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga PhuocBui ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 10000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 10000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
albert-xxlarge-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42,640
2023-04-07T17:47:42Z
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: Viswes/ppo-SnowballTargetTESTCOLAB 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
bert-base-cased-finetuned-mrpc
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11,644
2023-04-07T17:47:47Z
--- tags: - autotrain - text-classification language: - en widget: - text: "I love AutoTrain 🤗" datasets: - billster45/autotrain-data-news_headlines co2_eq_emissions: emissions: 0.47291454067852207 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 47662116693 - CO2 Emissions (in grams): 0.4729 ## Validation Metrics - Loss: 0.474 - Accuracy: 0.866 - Macro F1: 0.836 - Micro F1: 0.866 - Weighted F1: 0.865 - Macro Precision: 0.844 - Micro Precision: 0.866 - Weighted Precision: 0.865 - Macro Recall: 0.830 - Micro Recall: 0.866 - Weighted Recall: 0.866 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/billster45/autotrain-news_headlines-47662116693 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("billster45/autotrain-news_headlines-47662116693", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("billster45/autotrain-news_headlines-47662116693", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,377,486
2023-04-07T17:55:00Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-mseva results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-mseva This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 300 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 2.0.0+cu118 - Datasets 1.18.3 - Tokenizers 0.13.3
bert-base-german-dbmdz-cased
[ "pytorch", "jax", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,814
2023-04-07T17:58:58Z
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 metrics: - type: mean_reward value: 0.12 +/- 0.32 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="MohammedDhiyaEddine/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
bert-large-cased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,214
2023-04-07T18:09:23Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 277.82 +/- 18.73 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import PPO from huggingface_sb3 import load_from_hub from huggingface_sb3 import load_from_hub repo_id = "Flooow/ppo-LunarLander-v2" # The repo_id filename = "ppo-LunarLander-v2.zip" # The model filename.zip # When the model was trained on Python 3.8 the pickle protocol is 5 # But Python 3.6, 3.7 use protocol 4 # In order to get compatibility we need to: # 1. Install pickle5 (we done it at the beginning of the colab) # 2. Create a custom empty object we pass as parameter to PPO.load() custom_objects = { "learning_rate": 0.0, "lr_schedule": lambda _: 0.0, "clip_range": lambda _: 0.0, } checkpoint = load_from_hub(repo_id, filename) model = PPO.load(checkpoint, custom_objects=custom_objects, print_system_info=True) ```
camembert-base
[ "pytorch", "tf", "safetensors", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,440,898
2023-04-07T19:34:59Z
--- license: cc-by-4.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: opus-mt-en-de-finetuned-en-to-de results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: wmt16 type: wmt16 config: de-en split: validation args: de-en metrics: - name: Bleu type: bleu value: 30.529 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-en-de-finetuned-en-to-de This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-de](https://huggingface.co/Helsinki-NLP/opus-mt-en-de) on the wmt16 dataset. It achieves the following results on the evaluation set: - Loss: 1.2849 - Bleu: 30.529 - Rougelsum: 0.5587 - Gen Len: 27.0521 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:---------:|:-------:| | 1.5584 | 1.0 | 12500 | 1.2921 | 30.5519 | 0.5601 | 27.0549 | | 1.5649 | 2.0 | 25000 | 1.2877 | 30.578 | 0.5591 | 27.0415 | | 1.5686 | 3.0 | 37500 | 1.2859 | 30.5509 | 0.5591 | 27.0401 | | 1.5507 | 4.0 | 50000 | 1.2851 | 30.5396 | 0.5589 | 27.0526 | | 1.5532 | 5.0 | 62500 | 1.2849 | 30.529 | 0.5587 | 27.0521 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
distilbert-base-cased-distilled-squad
[ "pytorch", "tf", "rust", "safetensors", "openvino", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "model-index", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
257,745
2023-04-07T18:23:12Z
--- license: mit tags: - generated_from_trainer datasets: - samsum metrics: - rouge model-index: - name: test-dialogue-summarization results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: samsum type: samsum config: samsum split: validation args: samsum metrics: - name: Rouge1 type: rouge value: 53.8085 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test-dialogue-summarization This model is a fine-tuned version of [facebook/bart-large-xsum](https://huggingface.co/facebook/bart-large-xsum) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.7499 - Rouge1: 53.8085 - Rouge2: 28.9627 - Rougel: 44.3913 - Rougelsum: 49.5056 - Gen Len: 30.2237 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.3909 | 1.0 | 1841 | 1.5591 | 52.8029 | 28.5133 | 43.7896 | 48.3986 | 25.8386 | | 1.0938 | 2.0 | 3683 | 1.4337 | 54.5987 | 29.195 | 44.2764 | 49.9201 | 31.9071 | | 0.856 | 3.0 | 5524 | 1.5402 | 54.2077 | 29.0463 | 44.3965 | 49.6877 | 29.4597 | | 0.6704 | 4.0 | 7366 | 1.6280 | 54.2616 | 29.5501 | 44.5626 | 49.7599 | 29.967 | | 0.548 | 5.0 | 9205 | 1.7499 | 53.8085 | 28.9627 | 44.3913 | 49.5056 | 30.2237 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
distilbert-base-uncased-finetuned-sst-2-english
[ "pytorch", "tf", "rust", "safetensors", "distilbert", "text-classification", "en", "dataset:sst2", "dataset:glue", "arxiv:1910.01108", "doi:10.57967/hf/0181", "transformers", "license:apache-2.0", "model-index", "has_space" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,060,704
2023-04-07T18:30:45Z
--- license: other tags: - image-segmentation - vision - generated_from_trainer model-index: - name: segformer-finetuned-lane-10k-steps results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # segformer-finetuned-lane-10k-steps This model is a fine-tuned version of [nvidia/segformer-b0-finetuned-cityscapes-512-1024](https://huggingface.co/nvidia/segformer-b0-finetuned-cityscapes-512-1024) on the Efferbach/lane_master dataset. It achieves the following results on the evaluation set: - Loss: 0.0365 - Mean Iou: 0.4899 - Mean Accuracy: 0.7371 - Overall Accuracy: 0.7371 - Accuracy Background: nan - Accuracy Left: 0.7394 - Accuracy Right: 0.7348 - Iou Background: 0.0 - Iou Left: 0.7371 - Iou Right: 0.7325 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: polynomial - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Accuracy Background | Accuracy Left | Accuracy Right | Iou Background | Iou Left | Iou Right | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:-------------:|:----------------:|:-------------------:|:-------------:|:--------------:|:--------------:|:--------:|:---------:| | 0.0792 | 1.0 | 308 | 0.0714 | 0.0148 | 0.0229 | 0.0225 | nan | 0.0373 | 0.0085 | 0.0 | 0.0362 | 0.0083 | | 0.0437 | 2.0 | 616 | 0.0502 | 0.1687 | 0.2775 | 0.2784 | nan | 0.2492 | 0.3058 | 0.0 | 0.2343 | 0.2718 | | 0.0326 | 3.0 | 924 | 0.0445 | 0.2614 | 0.4441 | 0.4479 | nan | 0.3134 | 0.5748 | 0.0 | 0.3100 | 0.4742 | | 0.0224 | 4.0 | 1232 | 0.0370 | 0.4048 | 0.6098 | 0.6100 | nan | 0.6043 | 0.6153 | 0.0 | 0.6031 | 0.6113 | | 0.0184 | 5.0 | 1540 | 0.0346 | 0.3820 | 0.5858 | 0.5870 | nan | 0.5421 | 0.6295 | 0.0 | 0.5400 | 0.6060 | | 0.0159 | 6.0 | 1848 | 0.0319 | 0.4367 | 0.6567 | 0.6573 | nan | 0.6343 | 0.6791 | 0.0 | 0.6341 | 0.6760 | | 0.0139 | 7.0 | 2156 | 0.0317 | 0.4555 | 0.6855 | 0.6860 | nan | 0.6691 | 0.7019 | 0.0 | 0.6680 | 0.6986 | | 0.0129 | 8.0 | 2464 | 0.0321 | 0.4348 | 0.6533 | 0.6535 | nan | 0.6479 | 0.6588 | 0.0 | 0.6474 | 0.6571 | | 0.0122 | 9.0 | 2772 | 0.0275 | 0.4541 | 0.6827 | 0.6830 | nan | 0.6710 | 0.6943 | 0.0 | 0.6697 | 0.6927 | | 0.0111 | 10.0 | 3080 | 0.0305 | 0.4609 | 0.6928 | 0.6927 | nan | 0.6969 | 0.6887 | 0.0 | 0.6963 | 0.6865 | | 0.011 | 11.0 | 3388 | 0.0286 | 0.4646 | 0.6988 | 0.6991 | nan | 0.6890 | 0.7087 | 0.0 | 0.6883 | 0.7055 | | 0.0103 | 12.0 | 3696 | 0.0298 | 0.4693 | 0.7058 | 0.7062 | nan | 0.6939 | 0.7177 | 0.0 | 0.6932 | 0.7148 | | 0.0097 | 13.0 | 4004 | 0.0293 | 0.4717 | 0.7090 | 0.7087 | nan | 0.7184 | 0.6996 | 0.0 | 0.7176 | 0.6975 | | 0.0093 | 14.0 | 4312 | 0.0330 | 0.4537 | 0.6835 | 0.6836 | nan | 0.6775 | 0.6894 | 0.0 | 0.6768 | 0.6843 | | 0.009 | 15.0 | 4620 | 0.0331 | 0.4804 | 0.7226 | 0.7226 | nan | 0.7194 | 0.7257 | 0.0 | 0.7178 | 0.7234 | | 0.0088 | 16.0 | 4928 | 0.0315 | 0.4890 | 0.7355 | 0.7357 | nan | 0.7275 | 0.7435 | 0.0 | 0.7259 | 0.7411 | | 0.0086 | 17.0 | 5236 | 0.0338 | 0.4813 | 0.7234 | 0.7234 | nan | 0.7224 | 0.7243 | 0.0 | 0.7216 | 0.7223 | | 0.0085 | 18.0 | 5544 | 0.0348 | 0.4743 | 0.7129 | 0.7126 | nan | 0.7225 | 0.7033 | 0.0 | 0.7217 | 0.7012 | | 0.0083 | 19.0 | 5852 | 0.0357 | 0.4812 | 0.7245 | 0.7244 | nan | 0.7281 | 0.7210 | 0.0 | 0.7254 | 0.7183 | | 0.0081 | 20.0 | 6160 | 0.0334 | 0.4829 | 0.7271 | 0.7269 | nan | 0.7337 | 0.7205 | 0.0 | 0.7305 | 0.7182 | | 0.0079 | 21.0 | 6468 | 0.0359 | 0.4773 | 0.7177 | 0.7177 | nan | 0.7184 | 0.7170 | 0.0 | 0.7174 | 0.7146 | | 0.0077 | 22.0 | 6776 | 0.0351 | 0.4874 | 0.7332 | 0.7329 | nan | 0.7440 | 0.7223 | 0.0 | 0.7432 | 0.7190 | | 0.0075 | 23.0 | 7084 | 0.0344 | 0.4855 | 0.7296 | 0.7292 | nan | 0.7437 | 0.7156 | 0.0 | 0.7425 | 0.7141 | | 0.0077 | 24.0 | 7392 | 0.0362 | 0.4799 | 0.7216 | 0.7216 | nan | 0.7236 | 0.7196 | 0.0 | 0.7223 | 0.7174 | | 0.0071 | 25.0 | 7700 | 0.0391 | 0.4775 | 0.7179 | 0.7180 | nan | 0.7173 | 0.7186 | 0.0 | 0.7161 | 0.7163 | | 0.0077 | 26.0 | 8008 | 0.0339 | 0.4895 | 0.7367 | 0.7366 | nan | 0.7405 | 0.7329 | 0.0 | 0.7388 | 0.7297 | | 0.0069 | 27.0 | 8316 | 0.0344 | 0.4858 | 0.7305 | 0.7305 | nan | 0.7291 | 0.7318 | 0.0 | 0.7278 | 0.7297 | | 0.0069 | 28.0 | 8624 | 0.0361 | 0.4844 | 0.7283 | 0.7282 | nan | 0.7324 | 0.7243 | 0.0 | 0.7309 | 0.7221 | | 0.007 | 29.0 | 8932 | 0.0371 | 0.4837 | 0.7273 | 0.7270 | nan | 0.7360 | 0.7186 | 0.0 | 0.7345 | 0.7166 | | 0.007 | 30.0 | 9240 | 0.0366 | 0.4854 | 0.7305 | 0.7303 | nan | 0.7379 | 0.7231 | 0.0 | 0.7353 | 0.7208 | | 0.0067 | 31.0 | 9548 | 0.0367 | 0.4866 | 0.7322 | 0.7321 | nan | 0.7357 | 0.7286 | 0.0 | 0.7335 | 0.7263 | | 0.0068 | 32.0 | 9856 | 0.0364 | 0.4883 | 0.7348 | 0.7347 | nan | 0.7377 | 0.7318 | 0.0 | 0.7355 | 0.7295 | | 0.0067 | 32.47 | 10000 | 0.0365 | 0.4899 | 0.7371 | 0.7371 | nan | 0.7394 | 0.7348 | 0.0 | 0.7371 | 0.7325 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
AdapterHub/bert-base-uncased-pf-squad_v2
[ "bert", "en", "dataset:squad_v2", "arxiv:2104.08247", "adapter-transformers", "question-answering", "adapterhub:qa/squad2" ]
question-answering
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
# vicuna-13b This README provides a step-by-step guide to set up and run the FastChat application with the required dependencies and model. ## Prerequisites Before you proceed, ensure that you have `git` installed on your system. ## Installation Follow the steps below to install the required packages and set up the environment. 1. Upgrade `pip`: ```bash python3 -m pip install --upgrade pip ``` 2. Install `accelerate`: ```bash python3 -m pip install accelerate ``` 3. Install `bitsandbytes` 3.1 install by pip ```bash python3 -m pip install bitsandbytes ``` 3.2 Clone the `bitsandbytes` repository and install it: ```bash git clone https://github.com/TimDettmers/bitsandbytes.git cd bitsandbytes CUDA_VERSION=118 make cuda11x python3 -m pip install . cd .. ``` use the following command to find `CUDA_VERSION`: ```bash nvcc --version ``` 4. Clone the `FastChat` repository and install it: ```bash git clone https://github.com/lm-sys/FastChat.git cd FastChat python3 -m pip install -e . cd .. ``` 5. Install `git-lfs`: ```bash curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash sudo apt-get install git-lfs git lfs install ``` 6. Clone the `vicuna-13b` model: ```bash git clone https://huggingface.co/helloollel/vicuna-13b ``` ## Running FastChat After completing the installation, you can run FastChat with the following command: ```bash python3 -m fastchat.serve.cli --model-path ./vicuna-13b ``` This will start FastChat using the `vicuna-13b` model. ## Running in Notebook ```python import argparse import time import torch from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer from fastchat.conversation import conv_templates, SeparatorStyle from fastchat.serve.monkey_patch_non_inplace import replace_llama_attn_with_non_inplace_operations def load_model(model_name, device, num_gpus, load_8bit=False): if device == "cpu": kwargs = {} elif device == "cuda": kwargs = {"torch_dtype": torch.float16} if load_8bit: if num_gpus != "auto" and int(num_gpus) != 1: print("8-bit weights are not supported on multiple GPUs. Revert to use one GPU.") kwargs.update({"load_in_8bit": True, "device_map": "auto"}) else: if num_gpus == "auto": kwargs["device_map"] = "auto" else: num_gpus = int(num_gpus) if num_gpus != 1: kwargs.update({ "device_map": "auto", "max_memory": {i: "13GiB" for i in range(num_gpus)}, }) elif device == "mps": # Avoid bugs in mps backend by not using in-place operations. kwargs = {"torch_dtype": torch.float16} replace_llama_attn_with_non_inplace_operations() else: raise ValueError(f"Invalid device: {device}") tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True, **kwargs) # calling model.cuda() mess up weights if loading 8-bit weights if device == "cuda" and num_gpus == 1 and not load_8bit: model.to("cuda") elif device == "mps": model.to("mps") return model, tokenizer @torch.inference_mode() def generate_stream(tokenizer, model, params, device, context_len=2048, stream_interval=2): """Adapted from fastchat/serve/model_worker.py::generate_stream""" prompt = params["prompt"] l_prompt = len(prompt) temperature = float(params.get("temperature", 1.0)) max_new_tokens = int(params.get("max_new_tokens", 256)) stop_str = params.get("stop", None) input_ids = tokenizer(prompt).input_ids output_ids = list(input_ids) max_src_len = context_len - max_new_tokens - 8 input_ids = input_ids[-max_src_len:] for i in range(max_new_tokens): if i == 0: out = model( torch.as_tensor([input_ids], device=device), use_cache=True) logits = out.logits past_key_values = out.past_key_values else: attention_mask = torch.ones( 1, past_key_values[0][0].shape[-2] + 1, device=device) out = model(input_ids=torch.as_tensor([[token]], device=device), use_cache=True, attention_mask=attention_mask, past_key_values=past_key_values) logits = out.logits past_key_values = out.past_key_values last_token_logits = logits[0][-1] if device == "mps": # Switch to CPU by avoiding some bugs in mps backend. last_token_logits = last_token_logits.float().to("cpu") if temperature < 1e-4: token = int(torch.argmax(last_token_logits)) else: probs = torch.softmax(last_token_logits / temperature, dim=-1) token = int(torch.multinomial(probs, num_samples=1)) output_ids.append(token) if token == tokenizer.eos_token_id: stopped = True else: stopped = False if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped: output = tokenizer.decode(output_ids, skip_special_tokens=True) pos = output.rfind(stop_str, l_prompt) if pos != -1: output = output[:pos] stopped = True yield output if stopped: break del past_key_values args = dict( model_name='./vicuna-13b', device='cuda', num_gpus='1', load_8bit=True, conv_template='vicuna_v1.1', temperature=0.7, max_new_tokens=512, debug=False ) args = argparse.Namespace(**args) model_name = args.model_name # Model model, tokenizer = load_model(args.model_name, args.device, args.num_gpus, args.load_8bit) # Chat conv = conv_templates[args.conv_template].copy() def chat(inp): conv.append_message(conv.roles[0], inp) conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() params = { "model": model_name, "prompt": prompt, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "stop": conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2, } print(f"{conv.roles[1]}: ", end="", flush=True) pre = 0 for outputs in generate_stream(tokenizer, model, params, args.device): outputs = outputs[len(prompt) + 1:].strip() outputs = outputs.split(" ") now = len(outputs) if now - 1 > pre: print(" ".join(outputs[pre:now-1]), end=" ", flush=True) pre = now - 1 print(" ".join(outputs[pre:]), flush=True) conv.messages[-1][-1] = " ".join(outputs) ``` ```python chat("what's the meaning of life?") ```
AdapterHub/roberta-base-pf-cola
[ "roberta", "en", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:lingaccept/cola" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer datasets: - aeslc metrics: - rouge model-index: - name: pegasus-large-finetuned-aeslc results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: aeslc type: aeslc config: default split: validation args: default metrics: - name: Rouge1 type: rouge value: 32.9341 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-large-finetuned-aeslc This model is a fine-tuned version of [google/pegasus-large](https://huggingface.co/google/pegasus-large) on the aeslc dataset. It achieves the following results on the evaluation set: - Loss: 3.0274 - Rouge1: 32.9341 - Rouge2: 17.7876 - Rougel: 32.0858 - Rougelsum: 32.0886 - Gen Len: 11.6577 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 3.3247 | 1.0 | 7218 | 3.0274 | 32.9341 | 17.7876 | 32.0858 | 32.0886 | 11.6577 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Aleksandar/distilbert-srb-ner-setimes
[ "pytorch", "distilbert", "token-classification", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 233.56 +/- 30.24 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Aleksandar/electra-srb-ner
[ "pytorch", "safetensors", "electra", "token-classification", "dataset:wikiann", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "ElectraForTokenClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### RFIA2 Dreambooth model trained by HuggM3 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
Alexander-Learn/bert-finetuned-squad-accelerate
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: Agneev/ppo-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Andrija/M-bert-NER
[ "pytorch", "bert", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.97 +/- 0.56 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/SR_EManuals-BERT
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - billsum metrics: - rouge model-index: - name: t5-small_finetuned_billsum_model_bs8_lr5e-05 results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: billsum type: billsum config: default split: ca_test args: default metrics: - name: Rouge1 type: rouge value: 0.1967 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small_finetuned_billsum_model_bs8_lr5e-05 This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the billsum dataset. It achieves the following results on the evaluation set: - Loss: 2.1736 - Rouge1: 0.1967 - Rouge2: 0.0984 - Rougel: 0.1675 - Rougelsum: 0.1677 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | No log | 1.0 | 124 | 2.2775 | 0.1968 | 0.0943 | 0.1674 | 0.1674 | 19.0 | | No log | 2.0 | 248 | 2.2187 | 0.2001 | 0.0981 | 0.1705 | 0.1707 | 19.0 | | No log | 3.0 | 372 | 2.1969 | 0.1965 | 0.0987 | 0.1686 | 0.1687 | 19.0 | | No log | 4.0 | 496 | 2.1736 | 0.1967 | 0.0984 | 0.1675 | 0.1677 | 19.0 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
AnonymousSub/SR_EManuals-RoBERTa
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 233.59 +/- 36.15 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/cline-s10-AR
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforece-cartpole_policyV1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
AnonymousSub/rule_based_bert_hier_diff_equal_wts_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2021-11-04T21:11:11Z
--- language: en tags: - azbert - pretraining - fill-mask widget: - text: "$f$ $($ $x$ [MASK] $y$ $)$" example_title: "mathy" - text: "$x$ [MASK] $x$ $equal$ $2$ $x$" example_title: "mathy" - text: "Proof by [MASK] that $n$ $fact$ $gt$ $3$ $n$ for $n$ $gt$ $6$" example_title: "mathy" - text: "Proof by induction that $n$ [MASK] $gt$ $3$ $n$ for $n$ $gt$ $6$" example_title: "mathy" - text: "The goal of life is [MASK]." example_title: "philosophical" license: mit --- ## About This [repository](https://github.com/approach0/azbert) is a boilerplate to push a mask-filling model to the HuggingFace Model Hub. ### Upload to huggingface Download your tokenizer, model checkpoints, and optionally the training logs (`events.out.*`) to the `./ckpt` directory (do not include any large files except `pytorch_model.bin` and log files `events.out.*`). Optionally, test model using the MLM task: ```sh pip install pya0 # for math token preprocessing # testing local checkpoints: python test.py ./ckpt/math-tokenizer ./ckpt/2-2-0/encoder.ckpt # testing Model Hub checkpoints: python test.py approach0/coco-mae-220 approach0/coco-mae-220 ``` > **Note** > Modify the test examples in `test.txt` to play with it. > The test file is tab-separated, the first column is additional positions you want to mask for the right-side sentence (useful for masking tokens in math markups). > A zero means no additional mask positions. To upload to huggingface, use the `upload2hgf.sh` script. Before runnig this script, be sure to check: * `git-lfs` is installed * having git-remote named `hgf` reference to `https://huggingface.co/your/repo` * model contains all the files needed: `config.json` and `pytorch_model.bin` * tokenizer contains all the files needed: `added_tokens.json`, `special_tokens_map.json`, `tokenizer_config.json`, `vocab.txt` and `tokenizer.json` * no `tokenizer_file` field in `tokenizer_config.json` (sometimes it is located locally at `~/.cache`)
AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - esnli metrics: - accuracy - f1 - rouge - bleu model-index: - name: google-flan-t5-small-e-snli-generation-label_and_explanation-selected-b48 results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: esnli type: esnli config: plain_text split: validation args: plain_text metrics: - name: Accuracy type: accuracy value: 0.8622231253810201 - name: F1 type: f1 value: 0.8623314280769628 - name: Rouge1 type: rouge value: 0.605873896307076 - name: Bleu type: bleu value: 0.40472213589689604 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # google-flan-t5-small-e-snli-generation-label_and_explanation-selected-b48 This model is a fine-tuned version of [google/flan-t5-small](https://huggingface.co/google/flan-t5-small) on the esnli dataset. It achieves the following results on the evaluation set: - Loss: 1.8720 - Accuracy: 0.8622 - F1: 0.8623 - Bertscore F1: 0.9329 - Rouge1: 0.6059 - Rouge2: 0.3988 - Rougel: 0.5475 - Rougelsum: 0.5496 - Bleu: 0.4047 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.05 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Bertscore F1 | Rouge1 | Rouge2 | Rougel | Rougelsum | Bleu | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:------------:|:------:|:------:|:------:|:---------:|:------:| | 1.5084 | 0.17 | 2000 | 1.7484 | 0.8001 | 0.7997 | 0.9271 | 0.5768 | 0.3695 | 0.5209 | 0.5229 | 0.3703 | | 1.2745 | 0.35 | 4000 | 1.8137 | 0.8113 | 0.8110 | 0.9304 | 0.5881 | 0.3804 | 0.5305 | 0.5325 | 0.3853 | | 1.2287 | 0.52 | 6000 | 1.8358 | 0.8392 | 0.8403 | 0.9298 | 0.5828 | 0.3747 | 0.5282 | 0.5301 | 0.3778 | | 1.1964 | 0.7 | 8000 | 1.8432 | 0.8430 | 0.8437 | 0.9326 | 0.5974 | 0.3905 | 0.5447 | 0.5462 | 0.3998 | | 1.1674 | 0.87 | 10000 | 1.8567 | 0.8507 | 0.8485 | 0.9310 | 0.5947 | 0.3888 | 0.5383 | 0.5402 | 0.3892 | | 1.1371 | 1.05 | 12000 | 1.8720 | 0.8622 | 0.8623 | 0.9329 | 0.6059 | 0.3988 | 0.5475 | 0.5496 | 0.4047 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: polev1-basic results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 258.36 +/- 13.96 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - stable diffusion --- Just a place where i upload the models i mainly use for lora/lycrois training !! I only upload models that arent already on hugging face btw. all of the models here are availble on civitai -1 of them :3 Porn Merge https://civitai.com/models/2661/uber-realistic-porn-merge-urpm
AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: globophobe/ppo-SnowballTargetTESTCOLAB 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/rule_based_twostagequadruplet_hier_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # rithwik-db/triplets-bert-base-cased-500 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('rithwik-db/triplets-bert-base-cased-500') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('rithwik-db/triplets-bert-base-cased-500') model = AutoModel.from_pretrained('rithwik-db/triplets-bert-base-cased-500') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=rithwik-db/triplets-bert-base-cased-500) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 8245 with parameters: ``` {'batch_size': 4, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.TripletLoss.TripletLoss` with parameters: ``` {'distance_metric': 'TripletDistanceMetric.EUCLIDEAN', 'triplet_margin': 5} ``` Parameters of the fit()-Method: ``` { "epochs": 3, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: globophobe/ppo-PyramidsRND 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/specter-bert-model_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
Access to model Ekittl01/Illuminate is restricted and you are not in the authorized list. Visit https://huggingface.co/Ekittl01/Illuminate to ask for access.
AnonymousSub/specter-bert-model_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: mit tags: - generated_from_trainer datasets: - imagefolder model-index: - name: donut-sroie results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # donut-sroie This model is a fine-tuned version of [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
AnonymousSub/unsup-consert-base_copy_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Cartpolev1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
AnonymousSub/unsup-consert-base_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
Access to model pathanziyankhan989bba/Hackerdit is restricted and you are not in the authorized list. Visit https://huggingface.co/pathanziyankhan989bba/Hackerdit to ask for access.
AnonymousSub/unsup-consert-papers
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: creativeml-openrail-m --- https://civitai.com/models/32537/caulkinumv2ar-for-anime-style
Anthos23/distilbert-base-uncased-finetuned-sst2
[ "tf", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_keras_callback", "license:apache-2.0" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: disfluency-large results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # disfluency-large This model is a fine-tuned version of [vinai/phobert-large](https://huggingface.co/vinai/phobert-large) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0438 - Precision: 0.9698 - Recall: 0.9663 - F1: 0.9681 - Accuracy: 0.9921 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 140 | 0.0422 | 0.9651 | 0.9627 | 0.9639 | 0.9902 | | No log | 2.0 | 280 | 0.0315 | 0.9718 | 0.9730 | 0.9724 | 0.9923 | | No log | 3.0 | 420 | 0.2221 | 0.8079 | 0.7530 | 0.7795 | 0.9355 | | 0.024 | 4.0 | 560 | 0.0379 | 0.9693 | 0.9675 | 0.9684 | 0.9926 | | 0.024 | 5.0 | 700 | 0.0499 | 0.9657 | 0.9639 | 0.9648 | 0.9905 | | 0.024 | 6.0 | 840 | 0.0388 | 0.9688 | 0.9688 | 0.9688 | 0.9925 | | 0.024 | 7.0 | 980 | 0.0438 | 0.9698 | 0.9663 | 0.9681 | 0.9921 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Anthos23/my-awesome-model
[ "pytorch", "tf", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1087.72 +/- 88.11 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
ArBert/albert-base-v2-finetuned-ner-agglo-twitter
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: mit --- ### ahx-beta-4322d99 on Stable Diffusion This is the `<ahx-beta-4322d99>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<ahx-beta-4322d99> 0](https://huggingface.co/sd-concepts-library/ahx-beta-4322d99/resolve/main/concept_images/3.jpeg) ![<ahx-beta-4322d99> 1](https://huggingface.co/sd-concepts-library/ahx-beta-4322d99/resolve/main/concept_images/5.jpeg) ![<ahx-beta-4322d99> 2](https://huggingface.co/sd-concepts-library/ahx-beta-4322d99/resolve/main/concept_images/1.jpeg) ![<ahx-beta-4322d99> 3](https://huggingface.co/sd-concepts-library/ahx-beta-4322d99/resolve/main/concept_images/4.jpeg) ![<ahx-beta-4322d99> 4](https://huggingface.co/sd-concepts-library/ahx-beta-4322d99/resolve/main/concept_images/0.jpeg) ![<ahx-beta-4322d99> 5](https://huggingface.co/sd-concepts-library/ahx-beta-4322d99/resolve/main/concept_images/2.jpeg) ![<ahx-beta-4322d99> 6](https://huggingface.co/sd-concepts-library/ahx-beta-4322d99/resolve/main/concept_images/6.jpeg)
ArBert/albert-base-v2-finetuned-ner
[ "pytorch", "tensorboard", "albert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19
null
--- tags: - autotrain - vision - image-classification datasets: - cledoux42/autotrain-data-ethnicity-test_v003 widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 6.022813032092885 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 47959117029 - CO2 Emissions (in grams): 6.0228 ## Validation Metrics - Loss: 0.530 - Accuracy: 0.796 - Macro F1: 0.797 - Micro F1: 0.796 - Weighted F1: 0.796 - Macro Precision: 0.797 - Micro Precision: 0.796 - Weighted Precision: 0.796 - Macro Recall: 0.798 - Micro Recall: 0.796 - Weighted Recall: 0.796
ArBert/bert-base-uncased-finetuned-ner-gmm
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer datasets: - odinsynth_sequence_dataset model-index: - name: odinsynth_encoder_decoder_test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # odinsynth_encoder_decoder_test This model is a fine-tuned version of [](https://huggingface.co/) on the odinsynth_sequence_dataset dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 3 - eval_batch_size: 3 - seed: 42 - gradient_accumulation_steps: 200 - total_train_batch_size: 600 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20.0 ### Training results ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0 - Datasets 2.11.0 - Tokenizers 0.11.0
ArBert/bert-base-uncased-finetuned-ner-kmeans
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2023-04-09T04:42:33Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 280.03 +/- 20.25 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
ArthurBaia/bert-base-portuguese-cased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep5_lr4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep5_lr4 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2107 - Precision: 0.6683 - Recall: 0.7027 - F1: 0.6851 - Accuracy: 0.9463 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-08 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.2604 | 0.6839 | 0.6767 | 0.6803 | 0.9435 | | 0.2766 | 2.0 | 934 | 0.2361 | 0.6769 | 0.6897 | 0.6832 | 0.9448 | | 0.2401 | 3.0 | 1401 | 0.2212 | 0.6732 | 0.6994 | 0.6861 | 0.9456 | | 0.2253 | 4.0 | 1868 | 0.2131 | 0.6695 | 0.7016 | 0.6852 | 0.9462 | | 0.2103 | 5.0 | 2335 | 0.2107 | 0.6683 | 0.7027 | 0.6851 | 0.9463 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Atampy26/GPT-Glacier
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: ManishW/ppo-Pyramids-v1 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Atlasky/turkish-negator-nn
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: roberta-base-value-determinator results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-value-determinator This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the 0x7194633/value_determinant dataset. It achieves the following results on the evaluation set: - Loss: 0.0000 - Accuracy: 1.0 - F1: 1.0 - Precision: 1.0 - Recall: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Augustvember/WokkaBot7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-09T07:31:43Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-medium-hi results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-medium-hi This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3957 - Wer: 0.2425 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 32 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.0234 | 4.88 | 1000 | 0.2762 | 0.2750 | | 0.003 | 9.76 | 2000 | 0.3342 | 0.2518 | | 0.0001 | 14.63 | 3000 | 0.3636 | 0.2424 | | 0.0 | 19.51 | 4000 | 0.3877 | 0.2426 | | 0.0 | 24.39 | 5000 | 0.3957 | 0.2425 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.13.3
AvatarXD/DialoGPT-medium-Blitzo
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- language: - ru license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Russian results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 ru type: mozilla-foundation/common_voice_11_0 config: ru split: test args: ru metrics: - name: Wer type: wer value: 12.237466436164343 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Russian This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 ru dataset. It achieves the following results on the evaluation set: - Loss: 0.3060 - Wer: 12.2375 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:-------:| | 0.0731 | 1.04 | 1000 | 0.2183 | 13.0589 | | 0.0194 | 3.02 | 2000 | 0.2390 | 12.8027 | | 0.0067 | 4.06 | 3000 | 0.2524 | 12.5832 | | 0.0025 | 6.04 | 4000 | 0.2725 | 12.3245 | | 0.0017 | 8.02 | 5000 | 0.2854 | 12.7046 | | 0.0009 | 9.06 | 6000 | 0.2915 | 12.5072 | | 0.0005 | 11.04 | 7000 | 0.3006 | 12.2473 | | 0.0004 | 13.02 | 8000 | 0.3060 | 12.2375 | | 0.0003 | 14.06 | 9000 | 0.3129 | 12.2963 | | 0.0003 | 16.04 | 10000 | 0.3157 | 12.2988 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.11.1.dev0 - Tokenizers 0.13.2
Ayham/bert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 254.38 +/- 18.03 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Ayham/bert_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- datasets: - hackathon-somos-nlp-2023/podcasts-ner-es license: mit language: - es pipeline_tag: text-generation --- # Named-entity recognition for Spanish Podcasts This model is a fine-tuned version for named-entity recognition of the Spanish [bertin-project/bertin-gpt-j-6B](https://huggingface.co/bertin-project/bertin-gpt-j-6B) checkpoint. This model has been developed during the 2023 Hackaton organized by SomosNLP with the GPUs RTX 3090 provided by Q Blocks. ## Motivation of the project Podcasts are an incredible source of information and inspiration. We can listen to them while commuting, practising sport or cooking our favourite recipe. However, it can be difficult to retain specific facts, dates or people mentioned in them. The aim of this project has been to explore how to capture all those facts using named-entity recognition. Instead of using a language model fine-tuned with a specific NER head, we have reframed the problem as text generation from a prompt of the kind: ``` text: Yo hoy voy a hablar de mujeres en el mundo del arte, porque me ha leído un libro fantástico que se llama Historia del arte sin hombres, de Katie Hesel. entities: (people, Katie Hesel), (books, Historia del arte sin hombres) ``` By fine-tuning a large generative model with this prompt, we are able to capture the entities mentioned in the podcast. We fine-tuned the [bertin-gpt-j-6B](https://huggingface.co/bertin-project/bertin-gpt-j-6B) following this strategy. Similar projects with podcasts have been conducted by Andrej Karpathy (https://karpathy.ai/lexicap/) and Aleksa Gordic (https://www.hubermantranscripts.com/). ## Dataset creation For full details of the dataset, check [this page](https://huggingface.co/datasets/hackathon-somos-nlp-2023/podcasts-ner-es). A brief summary is: 1) Transcribe the audio from a youtube playlist by employing whisper ([check this notebook to understand how we did it](https://github.com/sergiopperez/hackathon_podcast/blob/main/src/NER/get_transcriptions.ipynb)). For the podcast we chose "Deforme Semanal" and the audios from this [playlist](https://www.youtube.com/playlist?list=PLLbN7SMQhMVZoXhtQ00AyebQE_-ttDrs9). 2) Gather all the transcriptions, unify all of them into a dataset, and divide them into sentences of 512 characters. 3) For each sentence, we label the entities in it by using the `text-davinci-003` API from OpenAI ([check this notebook to understand how we did it](https://github.com/sergiopperez/hackathon_podcast/blob/main/src/NER/create_entities_json.ipynb). ## Fine-tuning Training was performed in a RTX 3090 kindly provided by Q Blocks. It took 2h20m. We employed the Low-Rank Adaptation (LoRA) strategy to substantially reduce the number of trainable parameters for downstream tasks while maintaining model quality. The pre-trained checkpoint employed was [bertin-project/bertin-gpt-j-6B](https://huggingface.co/bertin-project/bertin-gpt-j-6B). We didn't perform an extensive hyperparameter sweep so there's room to improve it. Check [this script](https://github.com/sergiopperez/hackathon_podcast/blob/main/src/NER/peft-gpt-j.ipynb) to understand how we did it. ## Evaluation Disclaimer: There was no formal evaluation for the training ## Team members [David Mora](https://huggingface.co/DavidFM43) [Sergio Perez](https://huggingface.co/sergiopperez) [Albeto Fernandez](https://huggingface.co/AlbertoFH98) --- datasets: - hackathon-somos-nlp-2023/podcasts-ner-es ---
Ayham/bertgpt2_cnn
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: bigscience-bloom-rail-1.0 datasets: - JosephusCheung/GuanacoDataset language: - am metrics: - character library_name: flair ---
Ayham/roberta_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-04-09T09:02:21Z
--- license: mit datasets: - vicgalle/alpaca-gpt4 language: - en tags: - gpt-j - instruction-tuning - alpaca - gpt4 --- # GPT-J-6B instruction-tuned on Alpaca-GPT4 This model was finetuned on GPT-4 generations of the Alpaca prompts, using LoRA for 30.000 steps (batch size of 128), taking over 7 hours in four V100S. ![loss](loss.png) ### Examples The following samples were generated with `{"temperature" : 0.7, "top_p" : 0.92, "top_k" : 0}`: ##### **Give three tips for a good meal** ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: Give three tips for a good meal. ### Response: Tip 1: Make sure to enjoy your meal. It's easy to forget about enjoying yourself when you're hungry. Tip 2: Try to eat a healthy meal that contains lots of vegetables and fruits. Tip 3: Eat slowly and chew your food thoroughly. This will help you digest your meal better. ``` ##### **Write a poem about a delicious night** ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: Write a poem about a delicious night. ### Response: > "The moon shone brightly on the night sky, > > Stars twinkled brightly from afar. > > Suddenly, my eyes saw the silhouette > > Of a handsome stranger in the distance. > > He walked slowly towards me with a smile. > > He said, 'I've come to eat you up tonight.' > > Suddenly, I knew that he was a cannibal! > > 'Wait!' I cried as he approached. > > I yelled, 'I'm vegetarian!' > > Suddenly, he laughed and replied, > > 'Oh, you silly girl! That's okay.' > > Suddenly, he grabbed my hand and kissed it. > > He said, 'You're delicious!' > > Suddenly, he dragged me to his lair. > > There, he ripped open my belly button > > And gobbled me up like a juicy steak. > > Suddenly, I woke up screaming from a nightmare. > > 'Damn! Damn! Damn!' I cried. > ``` ##### **Write a tweet describing your capabilities** ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: Write a tweet describing your capabilities. ### Response: Sample tweet: "@geeksforgeeks I'm a mathematician and I enjoy programming. Feel free to reach out to me if you want to chat!" ```
Ayham/roberta_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1-default results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Ayham/robertagpt2_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: creativeml-openrail-m tags: - text-to-image --- ### meryl-stryfe-20230408-17-adm-7k-4800-steps on Stable Diffusion via Dreambooth #### model by NickKolok This your the Stable Diffusion model fine-tuned the meryl-stryfe-20230408-17-adm-7k-4800-steps concept taught to Stable Diffusion with Dreambooth. #It can be used by modifying the `instance_prompt`: **merylstryfetrigun** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts)
Ayham/xlmroberta_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2023-04-09T09:25:52Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxi-rl results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="vovikdrg/taxi-rl", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Ayham/xlmroberta_large_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
### vicuna-7b The repo contains the converted vicuna-7b model files. The base model is from `decapoda-research/llama-7b-hf` and the delta model is from `lmsys/vicuna-7b-delta-v0`. The conversion script is ``` python3 -m fastchat.model.apply_delta \ --base decapoda-research/llama-7b-hf \ --target /output/path/to/vicuna-7b \ --delta lmsys/vicuna-7b-delta-v0 ``` tree -s vicuna-7b ``` vicuna-7b ├── [ 9976642750] pytorch_model-00001-of-00002.bin ├── [ 3500323731] pytorch_model-00002-of-00002.bin └── [ 499723] tokenizer.model ``` tree -s vicuna-7b-delta-v0 ``` vicuna-7b-delta-v0/ ├── [ 9976642750] pytorch_model-00001-of-00002.bin ├── [ 3500323731] pytorch_model-00002-of-00002.bin ├── [ 26788] pytorch_model.bin.index.json └── [ 499723] tokenizer.model ``` Although the file size with same prefix is equal, but the content is different. And the md5sum result is ``` 5e63e46f2547e75c209c79aebe21f5ca vicuna-7b/pytorch_model-00001-of-00002.bin 53a0d7b5e34c432401f1979b454272ee vicuna-7b/pytorch_model-00002-of-00002.bin 7ded4bec6556250247b1efbb3b7e77e8 vicuna-7b-delta-v0/pytorch_model-00001-of-00002.bin ee48ac060a0f352ba8fa3dea0385e4c3 vicuna-7b-delta-v0/pytorch_model-00002-of-00002.bin ``` The converted model has been tested using vicuna cli and got correct response. So enjoy it~ ``` Q: hi how are you A: I'm just a computer program, so I don't have feelings or emotions like a human. I'm here to respond to your questions and provide information to the best of my ability. Is there anything else you'd like to know? Q: really cool A: Thank you! I'm glad you think so. Is there anything in particular you'd like to know or discuss? I'm here to help so feel free to ask me anything. ```
Ayham/xlnet_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.35 +/- 0.96 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Ayran/DialoGPT-medium-harry-potter-1-through-4-plus-6
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-04-09T10:05:33Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep8_lr2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep8_lr2 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0940 - Precision: 0.8489 - Recall: 0.8716 - F1: 0.8601 - Accuracy: 0.9771 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.0850 | 0.8142 | 0.8377 | 0.8257 | 0.9730 | | 0.1137 | 2.0 | 934 | 0.0799 | 0.8402 | 0.8534 | 0.8467 | 0.9757 | | 0.0746 | 3.0 | 1401 | 0.0825 | 0.8416 | 0.8614 | 0.8514 | 0.9765 | | 0.0588 | 4.0 | 1868 | 0.0863 | 0.8560 | 0.8652 | 0.8606 | 0.9769 | | 0.0482 | 5.0 | 2335 | 0.0885 | 0.8553 | 0.8646 | 0.8599 | 0.9771 | | 0.0402 | 6.0 | 2802 | 0.0893 | 0.8520 | 0.8668 | 0.8593 | 0.9776 | | 0.0362 | 7.0 | 3269 | 0.0916 | 0.8480 | 0.8726 | 0.8601 | 0.9772 | | 0.0336 | 8.0 | 3736 | 0.0940 | 0.8489 | 0.8716 | 0.8601 | 0.9771 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Ayta/Haha
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu model-index: - name: en_to_kjven_translator results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # en_to_kjven_translator This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7384 - Bleu: 24.3137 - Gen Len: 18.018 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 16 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 1.0945 | 1.0 | 2993 | 0.9622 | 21.6701 | 17.9817 | | 0.9915 | 2.0 | 5986 | 0.8925 | 22.5111 | 17.9861 | | 0.9306 | 3.0 | 8979 | 0.8543 | 22.9079 | 18.0025 | | 0.889 | 4.0 | 11972 | 0.8257 | 23.2629 | 17.9987 | | 0.8616 | 5.0 | 14965 | 0.8053 | 23.4706 | 18.0027 | | 0.8412 | 6.0 | 17958 | 0.7886 | 23.6554 | 18.0092 | | 0.811 | 7.0 | 20951 | 0.7769 | 23.8119 | 18.0106 | | 0.8024 | 8.0 | 23944 | 0.7675 | 23.9582 | 18.0114 | | 0.7898 | 9.0 | 26937 | 0.7583 | 24.0002 | 18.012 | | 0.7802 | 10.0 | 29930 | 0.7538 | 24.0943 | 18.0156 | | 0.7645 | 11.0 | 32923 | 0.7491 | 24.1756 | 18.0168 | | 0.7565 | 12.0 | 35916 | 0.7445 | 24.2229 | 18.0158 | | 0.7553 | 13.0 | 38909 | 0.7410 | 24.2709 | 18.0159 | | 0.7454 | 14.0 | 41902 | 0.7399 | 24.2593 | 18.0205 | | 0.7424 | 15.0 | 44895 | 0.7388 | 24.3007 | 18.0174 | | 0.7461 | 16.0 | 47888 | 0.7384 | 24.3137 | 18.018 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Ayu/Shiriro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-09T10:06:44Z
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: pixelcopterV1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 18.60 +/- 15.79 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
AyushPJ/ai-club-inductions-21-nlp-distilBERT
[ "pytorch", "distilbert", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep8_lr3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep8_lr3 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1048 - Precision: 0.7641 - Recall: 0.8235 - F1: 0.7927 - Accuracy: 0.9666 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.1361 | 0.6936 | 0.7475 | 0.7195 | 0.9568 | | 0.1814 | 2.0 | 934 | 0.1187 | 0.7168 | 0.7849 | 0.7493 | 0.9613 | | 0.1202 | 3.0 | 1401 | 0.1118 | 0.7361 | 0.7990 | 0.7662 | 0.9635 | | 0.1109 | 4.0 | 1868 | 0.1088 | 0.7508 | 0.8072 | 0.7780 | 0.9650 | | 0.1006 | 5.0 | 2335 | 0.1069 | 0.7570 | 0.8158 | 0.7853 | 0.9657 | | 0.0987 | 6.0 | 2802 | 0.1056 | 0.7604 | 0.8191 | 0.7887 | 0.9662 | | 0.0969 | 7.0 | 3269 | 0.1050 | 0.7651 | 0.8224 | 0.7927 | 0.9665 | | 0.0993 | 8.0 | 3736 | 0.1048 | 0.7641 | 0.8235 | 0.7927 | 0.9666 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
AyushPJ/ai-club-inductions-21-nlp-roBERTa
[ "pytorch", "roberta", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep8_lr4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep8_lr4 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1754 - Precision: 0.6822 - Recall: 0.7097 - F1: 0.6957 - Accuracy: 0.9504 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-08 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.2549 | 0.6812 | 0.6660 | 0.6735 | 0.9454 | | 0.2775 | 2.0 | 934 | 0.2278 | 0.6778 | 0.6814 | 0.6796 | 0.9469 | | 0.2438 | 3.0 | 1401 | 0.2086 | 0.6772 | 0.6935 | 0.6852 | 0.9481 | | 0.2135 | 4.0 | 1868 | 0.1949 | 0.6780 | 0.7001 | 0.6889 | 0.9489 | | 0.2058 | 5.0 | 2335 | 0.1856 | 0.6808 | 0.7057 | 0.6931 | 0.9496 | | 0.1937 | 6.0 | 2802 | 0.1796 | 0.6810 | 0.7073 | 0.6939 | 0.9499 | | 0.1818 | 7.0 | 3269 | 0.1763 | 0.6831 | 0.7097 | 0.6962 | 0.9503 | | 0.1835 | 8.0 | 3736 | 0.1754 | 0.6822 | 0.7097 | 0.6957 | 0.9504 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Azaghast/DistilBART-SCP-ParaSummarization
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "BartForConditionalGeneration" ], "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 142, "min_length": 56, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-04-09T10:34:43Z
--- license: mit language: - en --- [Trained](https://ahxxm.com/179.moew/) on images labeled by myself. 2-step inference: ```python from PIL import Image import torch import torch.nn as nn import uform path = "image.jpg" # generate 768 dimension embeddings for an image uf_model = uform.get_model("unum-cloud/uform-vl-english") img = Image.open(path) image_data = uf_model.preprocess_image(img) text_data = uf_model.preprocess_text(path.name) # filename not necessarily useful, but encode anyway memb = uf_model.encode_multimodal(image=image_data, text=text_data) memb = memb.detach().numpy() assert memb.shape == (1, 768) # load model, infer and Sigmoid class ImageBinaryClassifier(nn.Module): def __init__(self): super().__init__() self.layers = nn.Sequential( nn.Linear(768, 2048), nn.ReLU(), nn.Linear(2048, 2048), nn.ReLU(), nn.Linear(2048, 2048), nn.ReLU(), nn.Linear(2048, 256), nn.ReLU(), nn.Linear(256, 1), ) def forward(self, x): return self.layers(x) saved_model = ImageBinaryClassifier() saved_model.load_state_dict(torch.load("model.pt", map_location=torch.device('cpu'))) saved_model = torch.compile(saved_model) saved_model.eval() prob = float(torch.sigmoid(saved_model(memb))) ```
Azaghast/DistilBERT-SCP-Class-Classification
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
2023-04-09T10:35:48Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep8_lr5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep8_lr5 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2950 - Precision: 0.6748 - Recall: 0.6332 - F1: 0.6534 - Accuracy: 0.9420 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-09 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.3067 | 0.6768 | 0.6258 | 0.6503 | 0.9415 | | 0.2941 | 2.0 | 934 | 0.3029 | 0.6753 | 0.6283 | 0.6510 | 0.9417 | | 0.2874 | 3.0 | 1401 | 0.2999 | 0.6764 | 0.6302 | 0.6525 | 0.9418 | | 0.2821 | 4.0 | 1868 | 0.2978 | 0.6761 | 0.6316 | 0.6531 | 0.9420 | | 0.2828 | 5.0 | 2335 | 0.2963 | 0.6749 | 0.6321 | 0.6528 | 0.9421 | | 0.2829 | 6.0 | 2802 | 0.2954 | 0.6748 | 0.6332 | 0.6534 | 0.9421 | | 0.2808 | 7.0 | 3269 | 0.2951 | 0.6750 | 0.6332 | 0.6535 | 0.9421 | | 0.2841 | 8.0 | 3736 | 0.2950 | 0.6748 | 0.6332 | 0.6534 | 0.9420 | ### Framework versions - Transformers 4.27.4 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3