modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
BigTooth/DialoGPT-Megumin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
16
2022-08-16T08:03:06Z
--- license: mit tags: - generated_from_trainer datasets: - naem1023/aihub-dialogue model-index: - name: bart-v2-dialouge results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comme...
BigTooth/DialoGPT-small-tohru
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
10
2022-08-16T08:07:40Z
--- tags: - Issue_fixed - textattack - textclassification - entailment license: mit datasets: - mnli metrics: - accuracy --- Fixed label mapping issue for textattack/bert-base-uncased-MNLI, if using the original model, the predicted label has systematic confusion with the huggingface MNLI dataset. See the Github iss...
Bilz/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - metrics: - type: mean_reward value: 1176.34 +/- 238.60 name: mean_reward task: type: reinforcement-learning name:...
BinksSachary/ShaxxBot2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
12
null
--alpha_ce 0.0 --alpha_mlm 2.0 --alpha_cos 0.0 --alpha_act 1.0 --alpha_clm 0.0 --mlm \
Blazeolmo/Scrabunzi
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- tags: - generated_from_trainer datasets: - squad_es model-index: - name: tiny-bert-qa-es results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tiny-bert-qa-es T...
Blerrrry/Kkk
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
2022-08-16T10:01:27Z
This is the IndicBART model fine-tuned on the PMI and PIB dataset for XX to En translation. For detailed documentation look here: https://indicnlp.ai4bharat.org/indic-bart/ and https://github.com/AI4Bharat/indic-bart/ Usage: ``` from transformers import MBartForConditionalGeneration, AutoModelForSeq2SeqLM from transf...
BlightZz/MakiseKurisu
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
14
null
--- datasets: - squad_v2 language: en license: mit pipeline_tag: question-answering tags: - deberta - deberta-v3 model-index: - name: navteca/deberta-v3-base-squad2 results: - task: type: question-answering name: Question Answering dataset: name: squad_v2 type: squad_v2 config: squ...
BobBraico/bert-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- license: mit tags: - generated_from_trainer datasets: - banking77 metrics: - accuracy model-index: - name: xlm-roberta-base-banking77-classification results: - task: name: Text Classification type: text-classification dataset: name: banking77 type: banking77 config: default ...
BonjinKim/dst_kor_bert
[ "pytorch", "jax", "bert", "pretraining", "transformers" ]
null
{ "architectures": [ "BertForPreTraining" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
5
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 193.41 +/- 23.10 name: mean_reward task: type: reinforcement-learning name: re...
BotterHax/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
8
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned...
Brinah/1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- widget: - text: "You've just won $1000. Contact now at +9211122233 to confirm the lottery!" example_title: "Example 1" - text: "Hello. Are you joining us for the party tonight?" example_title: "Example 2" - text: "On a shelf, there are five books: a gray book, a red book, a purple book, a blue book, and a black...
Brokette/projetCS
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
4
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - metrics: - type: mean_reward value: 231.98 +/- 16.70 name: mean_reward task: type: reinforcement-learning name: re...
Brykee/DialoGPT-medium-Morty
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
10
null
--- tags: - generated_from_trainer datasets: - samsum model-index: - name: pegasus-samsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-samsum This ...
BumBelDumBel/ZORK_AI_SCIFI
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
14
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: distilbert-base-uncased-distilled-clinc results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then rem...
CAMeL-Lab/bert-base-arabic-camelbert-ca
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
580
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 183.96 +/- 75.63 name: mean_reward task: type: reinforcement-learning name: re...
CAMeL-Lab/bert-base-arabic-camelbert-da
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
449
null
--- language: pl tags: - distilherbert --- ## distilHerBERT distilHerBERT-base is a BERT-based Language Model trained on Polish subset of [cc100](https://huggingface.co/datasets/cc100) dataset using Masked Language Modelling (MLM) and [distillation procedure](https://arxiv.org/abs/1910.01108) from model [HerBERT](http...
CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
31
null
--- license: cc-by-4.0 language: mr datasets: - L3Cube-MahaCorpus --- ## MahaBERT MahaBERT is a Marathi BERT model. It is a multilingual BERT (google/muril-base-cased) model fine-tuned on L3Cube-MahaCorpus and other publicly available Marathi monolingual datasets. [dataset link] (https://github.com/l3cube-pune/Marath...
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-madar-twitter5
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
75
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 config:...
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-nadi
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
71
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - f1 model-index: - name: results results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text me...
CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
26
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-large-xlsr-korean-demo-test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> ...
CAUKiel/JavaBERT-uncased
[ "pytorch", "safetensors", "bert", "fill-mask", "java", "code", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
7
null
--alpha_ce 0.0 --alpha_mlm 0.0 --alpha_cos 0.0 --alpha_act 1.0 --alpha_clm 0.0 --mlm \
CBreit00/DialoGPT_small_Rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- tags: - feature-extraction - endpoints-template license: bsd-3-clause library_name: generic --- # Fork of [salesforce/BLIP](https://github.com/salesforce/BLIP) for a `feature-extraction` task on 🤗Inference endpoint. This repository implements a `custom` task for `feature-extraction` for 🤗 Inference Endpoints. The...
CL/safe-math-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- tags: - generated_from_trainer datasets: - samsum model-index: - name: pegasus-samsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-samsum This ...
CLAck/indo-pure
[ "pytorch", "marian", "text2text-generation", "en", "id", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
4
null
--- tags: - audio - spectrograms datasets: - teticio/audio-diffusion-256 --- De-noising Diffusion Probabilistic Model trained on [teticio/audio-diffusion-256](https://huggingface.co/datasets/teticio/audio-diffusion-256) to generate mel spectrograms of 256x256 corresponding to 5 seconds of audio. The code to convert fro...
CLAck/vi-en
[ "pytorch", "marian", "text2text-generation", "en", "vi", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
6
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu model-index: - name: t5-small-finetuned-text2log-finetuned-nl-to-fol-finetuned-nl-to-fol-finetuned-nl-to-fol-version2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. Yo...
CLEE/CLEE
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: defau...
CLTL/gm-ner-xlmrbase
[ "pytorch", "tf", "xlm-roberta", "token-classification", "nl", "transformers", "dighum", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
2
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: finetuning-sentiment-model-3000-samples results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove...
CLTL/icf-domains
[ "pytorch", "roberta", "nl", "transformers", "license:mit", "text-classification" ]
text-classification
{ "architectures": [ "RobertaForMultiLabelSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": nul...
35
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 285.40 +/- 14.55 name: mean_reward task: type: reinforcement-learning name: re...
CLTL/icf-levels-adm
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
33
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: distilbert-base-future results: [] widget: - text: "We will have a good time." example_title: "Positive" - text: "We had a good time." example_title: "Negative" --- # distilbert-base-future ## Table of Contents - [Model descripti...
CLTL/icf-levels-att
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
32
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - cuad model-index: - name: bert-small-finetuned-cuad-full-longer results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove t...
CLTL/icf-levels-ber
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
33
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers language: - ko license: mit --- # smartmind/ko-sbert-augSTS-maxlength512 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional den...
CLTL/icf-levels-enr
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
30
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: QRDQN results: - metrics: - type: mean_reward value: 3510.00 +/- 4506.87 name: mean_reward task: type: reinforcement-learn...
CLTL/icf-levels-etn
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
31
null
--- tags: - generated_from_keras_callback model-index: - name: Mostafa3zazi/arabicQA-finetuned-squad_arcd results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Mostafa3zaz...
CM-CA/DialoGPT-small-cartman
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-tw-small results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then rem...
CNT-UPenn/Bio_ClinicalBERT_for_seizureFreedom_classification
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
28
null
--- tags: - generated_from_keras_callback model-index: - name: arabicQA-finetuned-squad_arcd_manual_push results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # arabicQA-fin...
CNT-UPenn/RoBERTa_for_seizureFrequency_QA
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
5
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b0.05 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - nam...
CSResearcher/TestModel
[ "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-test2 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - nam...
CSZay/bart
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b0.1 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name...
CTBC/ATS
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b0.5 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name...
CZWin32768/xlm-align
[ "pytorch", "xlm-roberta", "fill-mask", "arxiv:2106.06381", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repe...
6
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b1 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name: ...
Caddy/UD
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b0.01 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - nam...
Callidior/bert2bert-base-arxiv-titlegen
[ "pytorch", "safetensors", "encoder-decoder", "text2text-generation", "en", "dataset:arxiv_dataset", "transformers", "summarization", "license:apache-2.0", "autotrain_compatible", "has_space" ]
summarization
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
145
null
--- license: apache-2.0 tags: - audio-classification - generated_from_trainer datasets: - superb metrics: - accuracy model-index: - name: wav2vec2-base-ks-padpt400 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofre...
Cameron/BERT-SBIC-offensive
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
31
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-large-cased-finetuned-fce results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert...
Cameron/BERT-SBIC-targetcategory
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
30
null
--- license: mit tags: - generated_from_trainer datasets: - squad_v2 - quoref - adversarial_qa - duorc model-index: - name: rob-base-superqa2 results: - task: type: question-answering name: Question Answering dataset: name: squad_v2 type: squad_v2 config: squad_v2 split: vali...
Cameron/BERT-eec-emotion
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
36
null
--- tags: - generated_from_trainer model-index: - name: koBERT-finetuned-wholemasking20 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # koBERT-finetuned-wholem...
Cameron/BERT-mdgender-convai-binary
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
33
null
--- license: apache-2.0 tags: - audio-classification - generated_from_trainer datasets: - superb metrics: - accuracy model-index: - name: wav2vec2-base-ks-padpt800 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofre...
Cameron/BERT-mdgender-convai-ternary
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
38
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion args: default...
Camzure/MaamiBot-test
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
9
2022-08-17T05:48:30Z
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b2 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name: ...
Camzure/MaamiBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
2022-08-17T05:52:41Z
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b10 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name:...
Canadiancaleb/DialoGPT-small-jesse
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
9
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b20 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name:...
Canadiancaleb/DialoGPT-small-walter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
13
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b50 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name:...
Canadiancaleb/jessebot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
2022-08-17T05:57:15Z
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b100 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name...
Canyonevo/DialoGPT-medium-KingHenry
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
2022-08-17T05:57:18Z
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b5 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name: ...
Capreolus/birch-bert-large-msmarco_mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
1
2022-08-17T06:31:07Z
--- license: cc-by-nc-sa-3.0 --- # KhanomTan TTS v1.0 KhanomTan TTS (ขนมตาล) is an open-source Thai text-to-speech model that supports multilingual speakers such as Thai, English, and others. KhanomTan TTS is a YourTTS model trained on multilingual languages that supports Thai. We use Thai speech corpora, TSync 1* a...
Capreolus/electra-base-msmarco
[ "pytorch", "tf", "electra", "text-classification", "arxiv:2008.09093", "transformers" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
110
null
--- annotations_creators: [] language: - ro language_creators: - machine-generated license: - apache-2.0 multilinguality: - monolingual pretty_name: BlackKakapo/t5-small-paraphrase-ro size_categories: - 10K<n<100K source_datasets: - original tags: [] task_categories: - text2text-generation task_ids: [] --- # Romanian ...
Captain-1337/CrudeBERT
[ "pytorch", "bert", "text-classification", "arxiv:1908.10063", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
28
2022-08-17T06:37:49Z
--- license: apache-2.0 tags: - audio-classification - generated_from_trainer datasets: - superb metrics: - accuracy model-index: - name: wav2vec2-base-ks-padpt1600 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofr...
CarlosPR/mt5-spanish-memmories-analysis
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat...
7
2022-08-17T07:12:14Z
--- language: en tags: - t5 datasets: - squad license: mit --- # Question Generation Model ## Github https://github.com/Seoneun/T5-Question-Generation ## Fine-tuning Dataset SQuAD 1.1 | Train Data | Dev Data | Test Data | | ------ | ------ | ------ | | 75,722 | 10,570 | 11,877 | ## Demo https://huggingface.co/S...
CarlosTron/Yo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- language: de widget: - text: "[Title_nullsechsroy feat. YFG Pave_" tags: - Text Generation datasets: - genius lyrics license: mit --- # GPT-Rapgenerator The Rapgenerator is trained for [nullsechsroy](https://genius.com/artists/Nullsechsroy) on [german-poetry-gpt2](https://huggingface.co/Anjoe/german-poetry-gpt2...
CasualHomie/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
11
2022-08-17T07:29:21Z
--- license: apache-2.0 tags: - audio-classification - generated_from_trainer datasets: - superb metrics: - accuracy model-index: - name: wav2vec2-base-ks-padpt3200 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofr...
Cathy/reranking_model
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
27
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers datasets: - embedding-data/sentence-compression --- # edumunozsala/distilroberta-sentence-transformer-test This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & p...
Cedille/fr-boris
[ "pytorch", "gptj", "text-generation", "fr", "dataset:c4", "arxiv:2202.03371", "transformers", "causal-lm", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPTJForCausalLM" ], "model_type": "gptj", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
401
2022-08-17T07:43:29Z
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b0.03 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - nam...
dccuchile/albert-base-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no...
34
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b0.04 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - nam...
dccuchile/albert-base-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
14
null
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b0.75 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - nam...
dccuchile/albert-base-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no...
25
2022-08-17T07:48:27Z
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b1.25 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - nam...
dccuchile/albert-base-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
5
2022-08-17T07:48:45Z
--- language: - en - ro license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: distilled-mt5-small-b1.5 results: - task: name: Translation type: translation dataset: name: wmt16 ro-en type: wmt16 args: ro-en metrics: - name...
dccuchile/albert-large-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
3
2022-08-17T08:27:19Z
jeremy sits and reads an imaginary book even though jeremy is actually the imaginary friend of a horse ghost
dccuchile/albert-large-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no...
25
null
--- license: apache-2.0 tags: - audio-classification - generated_from_trainer datasets: - superb metrics: - accuracy model-index: - name: wav2vec2-base-ks-ept4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread a...
dccuchile/albert-tiny-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
8
null
--- license: apache-2.0 tags: - translation - generated_from_trainer model-index: - name: nils-nl-to-rx-pt-v3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # n...
dccuchile/albert-xxlarge-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
3
null
Access to model Thantiwa/Thaitune_eiei is restricted and you are not in the authorized list. Visit https://huggingface.co/Thantiwa/Thaitune_eiei to ask for access.
dccuchile/bert-base-spanish-wwm-cased-finetuned-qa-mlqa
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_n...
5
null
--- language: en thumbnail: http://www.huggingtweets.com/apesahoy-discoelysiumbot-jzux/1660737778768/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; marg...
dccuchile/distilbert-base-spanish-uncased-finetuned-mldoc
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
27
2022-08-17T12:22:02Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wnut_17 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-small-finetuned-wnut17-ner-longer10 results: - task: name: Token Classification type: token-classification dataset: name: wnut_17 type: wnut_...
Chae/botman
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
5
2022-08-17T13:17:39Z
--- language: - en --- # Maverick <br> Developed during my internship at [**Vela Partners**](https://vela.partners/) as a Machine Learning Engineer. <br> The paper presenting Maverick can be found on my [GitHub](https://github.com/lukasec/Maverick). <br> Maverick consists of two sub-models published here on Hugging F...
Cheatham/xlm-roberta-large-finetuned-r01
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
23
2022-08-17T15:10:23Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, the...
Ci/Pai
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
2022-08-17T20:01:32Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: train_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # train_model This model...
Cinnamon/electra-small-japanese-generator
[ "pytorch", "electra", "fill-mask", "ja", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngra...
19
null
--- license: apache-2.0 language: code datasets: - codeparrot/codecomplex --- This is a fine-tuned version of [UniXcoder](https://huggingface.co/microsoft/unixcoder-base-nine), a unified cross-modal pre-trained model for programming languages, on [CodeComplex](https://huggingface.co/datasets/codeparrot/codecomplex), a ...
CoShin/XLM-roberta-large_ko_en_nil_sts
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
2022-08-18T00:55:49Z
--- tags: - conversational --- # Spike Spiegel DialoGPT Model
Craig/mGqFiPhu
[ "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
feature-extraction
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
2022-08-20T17:11:20Z
--- tags: - generated_from_trainer model-index: - name: chinese-pert-large-finetuned-med-zh results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # chinese-pert-large-...
DTAI-KULeuven/mbert-corona-tweets-belgium-topics
[ "pytorch", "jax", "bert", "text-classification", "multilingual", "nl", "fr", "en", "arxiv:2104.09947", "transformers", "Dutch", "French", "English", "Tweets", "Topic classification" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
167
null
This is random-wav2vec2-base, an unpretrained version of wav2vec 2.0. The weight of this model is randomly initialized, and can be used for establishing randomized baselines or training a model from scratch. The code used to do so is adapted from: https://huggingface.co/saibo/random-roberta-base.
DaWang/demo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image inference: false extra_gated_prompt: |- This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1....
Daiki/scibert_scivocab_uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: finetuned-marktextepoch-n800 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetu...
Davlan/xlm-roberta-base-finetuned-yoruba
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repe...
29
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-large-xlsr-korean-demo-test2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> ...
Declan/ChicagoTribune_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
7
null
--- tags: - conversational --- #Rick & Morty DialoGPT Model
Declan/ChicagoTribune_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
5
null
--- language: en thumbnail: http://www.huggingtweets.com/jeffreykofman/1660909090300/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; w...
Declan/NPR_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
9
null
--- tags: - autotrain - text-classification language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - sasha/autotrain-data-BERTBase-TweetEval co2_eq_emissions: emissions: 0.04868905658915141 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1281249000 - CO2 Emissions...
Declan/NPR_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
3
null
--- tags: - autotrain - text-classification language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - sasha/autotrain-data-RobertaBaseTweetEval co2_eq_emissions: emissions: 28.053963781460215 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1281048989 - CO2 Emission...
DeepBasak/Slack
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
# RNA_Project # Projeto Final - Modelos Preditivos Conexionistas ### Aluno - Caio Emanoel Serpa Lopes ### Tutor - Vitor Casadei --- |**Tipo de Projeto**|**Modelo Selecionado**|**Linguagem**| |--|--|--| |Classificação de Imagens|MobileNetV2|Tensorflow| [Clique aqui para rodar o modelo via browser (roboflow)](https:...
DeividasM/wav2vec2-large-xlsr-53-lithuanian
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "lt", "dataset:common_voice", "transformers", "audio", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
7
null
--- license: cc-by-4.0 language: hi --- ## HindAlBERT HindAlBERT is a Hindi AlBERT model model trained on publicly available Hindi monolingual datasets. [project link] (https://github.com/l3cube-pune/MarathiNLP) More details on the dataset, models, and baseline results can be found in our [<a href='https://arxiv.org...
DeltaHub/adapter_t5-3b_cola
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
3
null
--- license: cc-by-4.0 language: hi --- ## HindBERT HindBERT is a Hindi BERT model. It is a multilingual BERT (bert-base-multilingual-cased) model fine-tuned on publicly available Hindi monolingual datasets. [project link] (https://github.com/l3cube-pune/MarathiNLP) More details on the dataset, models, and baseline ...
DeltaHub/adapter_t5-3b_mrpc
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
3
null
--- license: cc-by-4.0 language: hi --- ## HindBERT HindBERT is a Hindi BERT model. It is a multilingual BERT (google/muril-base-cased) model fine-tuned on publicly available Hindi monolingual datasets. [project link] (https://github.com/l3cube-pune/MarathiNLP) More details on the dataset, models, and baseline resul...
DeltaHub/adapter_t5-3b_qnli
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
3
null
--- language: - hi - mr - multilingual license: cc-by-4.0 --- ## DevRoBERTa DevRoBERTa is a Devanagari RoBERTa model. It is a multilingual RoBERTa (xlm-roberta-base) model fine-tuned on publicly available Hindi and Marathi monolingual datasets. [project link] (https://github.com/l3cube-pune/MarathiNLP) More details ...
Deniskin/emailer_medium_300
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
14
2022-08-19T19:19:20Z
--- language: - hi - mr - multilingual license: cc-by-4.0 --- ## DevAlBERT DevAlBERT is a Devanagari AlBERT model model trained on publicly available Hindi and Marathi monolingual datasets. [project link] (https://github.com/l3cube-pune/MarathiNLP) More details on the dataset, models, and baseline results can be fou...
Deniskin/essays_small_2000
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- language: en tags: - pythae - reproducibility license: apache-2.0 --- ### Downloading this model from the Hub This model was trained with pythae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from pythae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path...
Deniskin/essays_small_2000i
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- language: en tags: - pythae - reproducibility license: apache-2.0 --- ### Downloading this model from the Hub This model was trained with pythae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from pythae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path...
Denver/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-PixelCopter results: - metrics: - type: mean_reward value: 7.70 +/- 11.04 name: mean_reward task: type: reinforcement-learning name: reinforcemen...
DeskDown/MarianMixFT_en-fil
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
3
null
--- language: en tags: - pythae - reproducibility license: apache-2.0 --- This model was trained with pythae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from pythae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path="clementchadebec/reproduced_rae_gp") ``...
DeskDown/MarianMixFT_en-hi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
3
null
--- language: en tags: - pythae - reproducibility license: apache-2.0 --- This model was trained with pythae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from pythae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path="clementchadebec/reproduced_rae_l2") ``...
DeskDown/MarianMixFT_en-ja
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
9
null
--- language: en tags: - pythae - reproducibility license: apache-2.0 --- This model was trained with pythae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from pythae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path="clementchadebec/reproduced_svae") ``` ...
DeskDown/MarianMixFT_en-ms
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
5
null
--- title: README emoji: 🏃 colorFrom: gray colorTo: purple sdk: static pinned: false --- # Model Description TinyBioBERT is a distilled version of the [BioBERT](https://huggingface.co/dmis-lab/biobert-base-cased-v1.2?text=The+goal+of+life+is+%5BMASK%5D.) which is distilled for 100k training steps using a total batch ...
DeskDown/MarianMixFT_en-vi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
5
null
--- library_name: stable-baselines3 tags: - CartPole-v1 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: QRDQN results: - metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: rein...
DeskDown/MarianMix_en-ja-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
1
null
--- tags: - autotrain - text-classification language: - en widget: - text: "I love AutoTrain 🤗" datasets: - aujer/autotrain-data-not_interested_8_19 co2_eq_emissions: emissions: 7.7092029324718965 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1283149075 - CO2 Emissions ...