modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
embedding
list
albert-base-v1
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_...
38,156
2019-12-20T12:28:51Z
--- tags: - exbert language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT Base v1 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github...
[ -0.01628965139389038, 0.004767254460602999, -0.02520707994699478, 0.06764831393957138, 0.03774816170334816, 0.02252449281513691, -0.016135185956954956, -0.03889353945851326, -0.03518366441130638, 0.058156222105026245, 0.02774147316813469, -0.0001626275625312701, 0.00031815734109841287, 0.0...
albert-base-v2
[ "pytorch", "tf", "jax", "rust", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_...
4,785,283
2019-11-04T16:00:52Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT Base v2 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-rese...
[ -0.017371734604239464, 0.0015233331359922886, -0.02679620310664177, 0.06894268840551376, 0.035669613629579544, 0.021609894931316376, -0.017564035952091217, -0.037039611488580704, -0.03948637843132019, 0.05711784213781357, 0.0304550938308239, 0.0007140071247704327, 0.004628319758921862, 0.0...
albert-large-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_...
687
2019-12-20T12:28:51Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT Large v1 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-res...
[ -0.018121063709259033, 0.002527142409235239, -0.026467183604836464, 0.06899815797805786, 0.035836223512887955, 0.019501175731420517, -0.017295489087700844, -0.03910917416214943, -0.03756653517484665, 0.05714162811636925, 0.030745433643460274, 0.0013536266051232815, 0.004762119147926569, 0....
albert-large-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_...
26,792
2019-11-04T16:00:53Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT Large v2 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-res...
[ -0.017891714349389076, 0.002103835577145219, -0.025990311056375504, 0.0689493864774704, 0.036186132580041885, 0.019427411258220673, -0.017666058614850044, -0.03851858526468277, -0.03691915050148964, 0.0569012388586998, 0.03065728396177292, 0.002100828569382429, 0.00448606489226222, 0.03260...
albert-xlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_...
341
2019-12-20T12:28:51Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT XLarge v1 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-re...
[ -0.017884569242596626, 0.006399250589311123, -0.020775070413947105, 0.06745092570781708, 0.03645424172282219, 0.019362691789865494, -0.020082443952560425, -0.04499386250972748, -0.031038735061883926, 0.05520254373550415, 0.03186742216348648, -0.0028629458975046873, 0.0012352790217846632, 0...
albert-xlarge-v2
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_...
2,973
2019-11-04T16:00:53Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT XLarge v2 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-re...
[ -0.017797056585550308, 0.005246325396001339, -0.020524784922599792, 0.06777287274599075, 0.03693637624382973, 0.019691655412316322, -0.020058730617165565, -0.043487515300512314, -0.03131895139813423, 0.055198222398757935, 0.031354181468486786, -0.0017217992572113872, 0.0013881685445085168, ...
albert-xxlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_...
7,091
2019-12-20T12:28:51Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT XXLarge v1 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-r...
[ -0.02078196220099926, 0.006141583435237408, -0.0204166267067194, 0.0693066269159317, 0.036629922688007355, 0.01839315891265869, -0.018037041649222374, -0.04531469568610191, -0.03084694966673851, 0.05282839015126228, 0.03224258869886398, -0.0016929764533415437, 0.0014640215085819364, 0.0344...
albert-xxlarge-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_...
42,640
2019-11-04T16:00:52Z
--- tags: - exbert language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT XXLarge v2 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://gith...
[ -0.019903937354683876, 0.008878600783646107, -0.02040528692305088, 0.06702258437871933, 0.03765907511115074, 0.02060181088745594, -0.01716987043619156, -0.045623209327459335, -0.027434248477220535, 0.053571511059999466, 0.029728639870882034, -0.0004125060513615608, -0.001796863623894751, 0...
bert-base-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
8,621,271
2018-11-14T23:35:08Z
--- language: en tags: - exbert license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT base model (cased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https:...
[ -0.005537884775549173, 0.0068740639835596085, -0.01787860319018364, 0.06503400951623917, 0.026847530156373978, 0.033723000437021255, -0.01929895021021366, -0.03633744642138481, -0.03174727410078049, 0.04941345006227493, 0.015982916578650475, -0.005766472313553095, 0.016170065850019455, 0.0...
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
3,377,486
2018-11-14T23:35:08Z
--- language: zh --- # Bert-base-chinese ## Table of Contents - [Model Details](#model-details) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [How to Get Started With the Model](#how-to-get-started-with-the-model) ## Model Deta...
[ -0.027363037690520287, -0.012506258673965931, -0.0041877892799675465, 0.06901980191469193, 0.015909267589449883, 0.01630205661058426, -0.006000135093927383, -0.02104058675467968, -0.01659931242465973, 0.05400661751627922, -0.006732940208166838, -0.02970108948647976, 0.032426174730062485, 0...
bert-base-german-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "exbert", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
175,983
2019-06-18T09:14:06Z
--- language: de license: mit thumbnail: https://static.tildacdn.com/tild6438-3730-4164-b266-613634323466/german_bert.png tags: - exbert --- <a href="https://huggingface.co/exbert/?model=bert-base-german-cased"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a> # German BERT ![bert_im...
[ -0.004065467044711113, 0.0007318712305277586, -0.0237592626363039, 0.0700923278927803, 0.03437672182917595, 0.029773039743304253, -0.0022655632346868515, -0.024777282029390335, -0.024793291464447975, 0.06250905990600586, -0.00813546497374773, -0.00507994694635272, 0.018282661214470863, 0.0...
bert-base-german-dbmdz-cased
[ "pytorch", "jax", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
1,814
2019-09-25T16:48:39Z
--- language: de license: mit --- This model is the same as [dbmdz/bert-base-german-cased](https://huggingface.co/dbmdz/bert-base-german-cased). See the [dbmdz/bert-base-german-cased model card](https://huggingface.co/dbmdz/bert-base-german-cased) for details on the model.
[ -0.048520587384700775, -0.01363783422857523, -0.021567007526755333, 0.04094906896352768, 0.030024362727999687, 0.02876911871135235, -0.017450902611017227, -0.015293032862246037, -0.03592192009091377, 0.055546943098306656, 0.011228273622691631, -0.02746684104204178, 0.02676054835319519, 0.0...
bert-base-german-dbmdz-uncased
[ "pytorch", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
68,305
2019-09-25T16:50:02Z
--- language: de license: mit --- This model is the same as [dbmdz/bert-base-german-uncased](https://huggingface.co/dbmdz/bert-base-german-uncased). See the [dbmdz/bert-base-german-cased model card](https://huggingface.co/dbmdz/bert-base-german-uncased) for details on the model.
[ -0.04549082741141319, -0.015838054940104485, -0.02329784259200096, 0.03793869912624359, 0.026241527870297432, 0.02944178320467472, -0.01881193369626999, -0.014970763586461544, -0.03462523967027664, 0.05449669808149338, 0.015074321068823338, -0.028820615261793137, 0.02868707664310932, 0.037...
bert-base-multilingual-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", ...
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
4,749,504
2018-11-30T13:36:24Z
--- language: - multilingual - af - sq - ar - an - hy - ast - az - ba - eu - bar - be - bn - inc - bs - br - bg - my - ca - ceb - ce - zh - cv - hr - cs - da - nl - en - et - fi - fr - gl - ka - de - el - gu - ht - he - hi - hu - is - io - id - ga - it - ja - jv - kn - kk - ky - ko - la - lv - lt - roa - nds - lm - mk...
[ -0.01246543601155281, -0.013974811881780624, -0.011870586313307285, 0.06439980119466782, 0.026871293783187866, 0.022756872698664665, 0.013229860924184322, -0.00970689207315445, -0.041041050106287, 0.044759251177310944, -0.003977108281105757, -0.03387429937720299, 0.01188259944319725, 0.020...
bert-base-multilingual-uncased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", ...
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
328,585
2018-11-30T13:36:23Z
--- language: - multilingual - af - sq - ar - an - hy - ast - az - ba - eu - bar - be - bn - inc - bs - br - bg - my - ca - ceb - ce - zh - cv - hr - cs - da - nl - en - et - fi - fr - gl - ka - de - el - gu - ht - he - hi - hu - is - io - id - ga - it - ja - jv - kn - kk - ky - ko - la - lv - lt - roa - nds - lm - mk...
[ -0.0103159099817276, -0.01097841840237379, -0.011103062890470028, 0.06341356784105301, 0.02764466032385826, 0.02311810851097107, 0.013504192233085632, -0.01322674099355936, -0.03739003464579582, 0.04429417848587036, 0.00044902466470375657, -0.03863493353128433, 0.011513454839587212, 0.0206...
bert-base-uncased
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
59,663,489
2018-11-14T23:35:08Z
--- language: en tags: - exbert license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT base model (uncased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](http...
[ -0.0042279125191271305, 0.0030666447710245848, -0.018318351358175278, 0.06348302215337753, 0.02924499846994877, 0.03182365372776985, -0.019427748396992683, -0.03530840948224068, -0.028829434886574745, 0.049393828958272934, 0.017601648345589638, -0.007109126076102257, 0.017407912760972977, ...
bert-large-cased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_n...
8,214
2019-06-18T21:49:26Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (cased) whole word masking finetuned on SQuAD Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in ...
[ -0.012906759046018124, 0.0037411816883832216, -0.013025769963860512, 0.05689029395580292, 0.022575486451387405, 0.026862403377890587, -0.017131082713603973, -0.02745896764099598, -0.028134431689977646, 0.047932159155607224, 0.008765504695475101, 0.0010784948244690895, 0.011876181699335575, ...
bert-large-cased-whole-word-masking
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
2,316
2019-06-15T21:59:11Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (cased) whole word masking Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](h...
[ -0.013023896142840385, 0.004246004857122898, -0.013001266866922379, 0.056599222123622894, 0.022739851847290993, 0.02895577996969223, -0.0168935414403677, -0.03000759333372116, -0.026967283338308334, 0.048546336591243744, 0.010901414789259434, -0.0011269774986431003, 0.011947228573262691, 0...
bert-large-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
388,769
2018-11-30T13:36:23Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (cased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https://github.com/g...
[ -0.007263294421136379, -0.0025373364333063364, -0.016460983082652092, 0.05909114331007004, 0.027586117386817932, 0.03554476425051689, -0.02039707824587822, -0.03891567885875702, -0.028000840917229652, 0.052542995661497116, 0.019628992304205894, -0.008350742049515247, 0.02015450783073902, 0...
bert-large-uncased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_n...
480,510
2019-06-18T13:41:43Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (uncased) whole word masking finetuned on SQuAD Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released i...
[ -0.012540115974843502, 0.0044858637265861034, -0.014498109929263592, 0.05772971734404564, 0.021730072796344757, 0.02691120281815529, -0.0164218470454216, -0.02643810398876667, -0.027705954387784004, 0.04678012430667877, 0.01313548069447279, 0.0011332540307193995, 0.012354725040495396, 0.03...
bert-large-uncased-whole-word-masking
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
76,685
2019-06-17T07:55:04Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (uncased) whole word masking Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository]...
[ -0.012554420158267021, 0.004546852316707373, -0.013807971030473709, 0.056908298283815384, 0.021865714341402054, 0.028806159272789955, -0.01661727949976921, -0.029373949393630028, -0.02641567587852478, 0.04809326305985451, 0.014314915984869003, -0.0007713751401752234, 0.012661212123930454, ...
bert-large-uncased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
1,058,496
2018-11-14T23:35:08Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (uncased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https://github.com...
[ -0.006085440516471863, 0.0009613729198463261, -0.01668190397322178, 0.06285299360752106, 0.028104914352297783, 0.030963823199272156, -0.020126869902014732, -0.03348826244473457, -0.030550867319107056, 0.049342453479766846, 0.01767853833734989, -0.00400198670104146, 0.01777447946369648, 0.0...
camembert-base
[ "pytorch", "tf", "safetensors", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_...
1,440,898
2019-11-16T04:17:25Z
--- language: fr license: mit datasets: - oscar --- # CamemBERT: a Tasty French Language Model ## Table of Contents - [Model Details](#model-details) - [Uses](#uses) - [Risks, Limitations, and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Citation Information](#citation...
[ -0.020097097381949425, -0.016600843518972397, -0.00538646150380373, 0.05541989207267761, 0.017579155042767525, 0.013426127843558788, -0.028852131217718124, -0.01814541406929493, -0.025296609848737717, 0.06960288435220718, 0.012533331289887428, -0.026998119428753853, 0.017302917316555977, 0...
distilbert-base-cased-distilled-squad
[ "pytorch", "tf", "rust", "safetensors", "openvino", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "model-index", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
257,745
2020-02-07T19:16:00Z
--- language: en license: apache-2.0 datasets: - squad metrics: - squad model-index: - name: distilbert-base-cased-distilled-squad results: - task: type: question-answering name: Question Answering dataset: name: squad type: squad config: plain_text split: validation metr...
[ 0.005515695549547672, -0.03346019238233566, -0.024390308186411858, 0.0433480404317379, 0.06918568909168243, 0.012284352444112301, -0.024527348577976227, 0.002197678666561842, -0.0418853834271431, 0.03237953409552574, 0.03491295129060745, -0.011495656333863735, 0.015988508239388466, 0.05048...
distilbert-base-cased
[ "pytorch", "tf", "onnx", "distilbert", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1910.01108", "transformers", "license:apache-2.0", "has_space" ]
null
{ "architectures": null, "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "n...
574,859
2020-02-07T19:16:00Z
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # Model Card for DistilBERT base model (cased) This model is a distilled version of the [BERT base model](https://huggingface.co/bert-base-cased). It was introduced in [this paper](https://arxiv.org/abs/1910.01108). The code for the distillat...
[ -0.010380462743341923, -0.00014546641614288092, -0.0382581502199173, 0.05683795362710953, 0.027643902227282524, 0.03689701110124588, -0.015173140913248062, -0.02950618974864483, -0.04597385227680206, 0.06255622208118439, 0.021582774817943573, -0.006572623271495104, 0.002080748789012432, 0....
distilbert-base-multilingual-cased
[ "pytorch", "tf", "onnx", "safetensors", "distilbert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", ...
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repea...
8,339,633
2019-11-25T19:22:20Z
--- language: - multilingual - af - sq - ar - an - hy - ast - az - ba - eu - bar - be - bn - inc - bs - br - bg - my - ca - ceb - ce - zh - cv - hr - cs - da - nl - en - et - fi - fr - gl - ka - de - el - gu - ht - he - hi - hu - is - io - id - ga - it - ja - jv - kn - kk - ky - ko - la - lv - lt - roa - nds - lm - mk...
[ -0.011019791476428509, -0.007608421146869659, -0.013485545292496681, 0.056858666241168976, 0.02982271835207939, 0.025902921333909035, 0.0014453696785494685, -0.02334984950721264, -0.05866226181387901, 0.04729994386434555, 0.0004893152508884668, -0.044792525470256805, 0.009781910106539726, ...
distilbert-base-uncased-distilled-squad
[ "pytorch", "tf", "tflite", "coreml", "safetensors", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
100,097
2019-08-28T12:06:26Z
--- language: en datasets: - squad widget: - text: "Which name is also used to describe the Amazon rainforest in English?" context: "The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; Spanish: Selva Amazónica, Amazonía or usually Amazonia; French: Forêt amazonienne; Dutch: Amazoneregenwoud), also know...
[ -0.014815470203757286, -0.037721142172813416, -0.02040264755487442, 0.036371149122714996, 0.050523530691862106, 0.026605786755681038, 0.015834469348192215, 0.002761497860774398, -0.03325972333550453, 0.06029042601585388, 0.02222844399511814, -0.02047206088900566, 0.0234863068908453, 0.0495...
gpt2-large
[ "pytorch", "tf", "jax", "rust", "safetensors", "gpt2", "text-generation", "en", "arxiv:1910.09700", "transformers", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
1,454,819
2019-08-21T00:28:36Z
--- language: en license: mit --- # GPT-2 Large ## Table of Contents - [Model Details](#model-details) - [How To Get Started With the Model](#how-to-get-started-with-the-model) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Envir...
[ -0.019210295751690865, -0.01626688428223133, -0.0069940462708473206, 0.04214398190379143, 0.04708635061979294, 0.029937541112303734, 0.009007442742586136, -0.028868699446320534, -0.01715780980885029, 0.05393955111503601, 0.02747075818479061, -0.014892312698066235, 0.004411180969327688, 0.0...
gpt2-xl
[ "pytorch", "tf", "jax", "rust", "gpt2", "text-generation", "en", "arxiv:1910.09700", "transformers", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
308,781
2019-11-05T17:51:20Z
--- language: en license: mit --- # GPT-2 XL ## Table of Contents - [Model Details](#model-details) - [How To Get Started With the Model](#how-to-get-started-with-the-model) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Environm...
[ -0.020884642377495766, -0.015609662979841232, -0.0020949163008481264, 0.03980805724859238, 0.04831736534833908, 0.03866703808307648, 0.011711533181369305, -0.02444232441484928, -0.017004122957587242, 0.05376949906349182, 0.022963527590036392, -0.019910424947738647, -0.00021067328634671867, ...
AIDA-UPM/bertweet-base-multi-mami
[ "pytorch", "roberta", "text-classification", "en", "transformers", "misogyny", "license:apache-2.0" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
41
null
--- pipeline_tag: text-classification tags: - text-classification - misogyny language: en license: apache-2.0 widget: - text: "Women wear yoga pants because men don't stare at their personality" example_title: "Misogyny detection" --- # bertweet-base-multi-mami This is a Bertweet model: It maps sentences & paragraph...
[ -0.010873628780245781, -0.008632580749690533, 0.013887869194149971, 0.03233775496482849, 0.06488702446222305, 0.030270477756857872, 0.006801179610192776, 0.009612026624381542, -0.008260836824774742, 0.045600686222314835, 0.035277217626571655, 0.0051359133794903755, 0.04148901253938675, 0.0...
AIDA-UPM/mstsb-paraphrase-multilingual-mpnet-base-v2
[ "pytorch", "xlm-roberta", "feature-extraction", "multilingual", "transformers", "sentence-similarity" ]
sentence-similarity
{ "architectures": [ "XLMRobertaModel" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngr...
1,084
2021-07-13T10:48:12Z
--- pipeline_tag: sentence-similarity language: "multilingual" tags: - feature-extraction - sentence-similarity - transformers - multilingual --- # mstsb-paraphrase-multilingual-mpnet-base-v2 This is a fine-tuned version of `paraphrase-multilingual-mpnet-base-v2` from [sentence-transformers](https://www.SBERT.net) mo...
[ -0.021330412477254868, -0.026658739894628525, -0.020646430552005768, 0.0674527958035469, 0.043967727571725845, 0.03708125278353691, -0.006465953309088945, 0.016840199008584023, -0.07092086970806122, 0.07607365399599075, 0.025544045493006706, 0.003748631803318858, 0.00506663927808404, 0.034...
ARTeLab/mbart-summarization-mlsum
[ "pytorch", "mbart", "text2text-generation", "it", "dataset:ARTeLab/mlsum-it", "transformers", "summarization", "autotrain_compatible", "has_space" ]
summarization
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
111
2021-12-25T07:39:53Z
--- tags: - summarization language: - it metrics: - rouge model-index: - name: summarization_mbart_mlsum results: [] datasets: - ARTeLab/mlsum-it --- # mbart_summarization_mlsum This model is a fine-tuned version of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on mlsum-it for Abstra...
[ -0.010495769791305065, -0.02107301540672779, -0.02242184616625309, 0.05249032750725746, 0.029602646827697754, 0.014708726666867733, -0.03193940967321396, -0.013051177375018597, -0.027481524273753166, 0.0649409070611, 0.05891687422990799, -0.013877293094992638, -0.005297796335071325, 0.0364...
AdapterHub/bert-base-uncased-pf-wikihop
[ "bert", "en", "arxiv:2104.08247", "adapter-transformers", "question-answering", "adapterhub:qa/wikihop" ]
question-answering
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_bea...
4
2021-08-31T13:42:52Z
--- tags: - question-answering - bert - adapterhub:qa/wikihop - adapter-transformers language: - en --- # Adapter `AdapterHub/bert-base-uncased-pf-wikihop` for bert-base-uncased An [adapter](https://adapterhub.ml) for the `bert-base-uncased` model that was trained on the [qa/wikihop](https://adapterhub.ml/explore/qa/...
[ -0.0055169276893138885, -0.029544973745942116, -0.021199414506554604, 0.054959528148174286, 0.010224147699773312, 0.01899798773229122, -0.015081333927810192, -0.010945849120616913, -0.04886632785201073, 0.042991556227207184, 0.016519546508789062, 0.018832512199878693, -0.009421841241419315, ...
AdapterHub/narrativeqa
[ "bart", "dataset:narrativeqa", "adapter-transformers", "adapterhub:qa/narrativeqa" ]
null
{ "architectures": null, "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_bea...
23
2021-12-14T13:44:06Z
--- tags: - adapterhub:qa/narrativeqa - adapter-transformers - bart datasets: - narrativeqa --- # Adapter `hSterz/narrativeqa` for facebook/bart-base An [adapter](https://adapterhub.ml) for the `facebook/bart-base` model that was trained on the [qa/narrativeqa](https://adapterhub.ml/explore/qa/narrativeqa/) dataset. ...
[ -0.0472346693277359, -0.03241787850856781, -0.010705140419304371, 0.05226442217826843, 0.016803208738565445, 0.034696225076913834, -0.030486775562167168, -0.020149867981672287, -0.04581405594944954, 0.06214888393878937, 0.016938529908657074, -0.008296453393995762, 0.002001091605052352, 0.0...
Aftabhussain/Tomato_Leaf_Classifier
[ "pytorch", "tensorboard", "vit", "image-classification", "transformers", "huggingpics", "model-index", "autotrain_compatible" ]
image-classification
{ "architectures": [ "ViTForImageClassification" ], "model_type": "vit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_n...
50
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: Tomato_Leaf_Classifier results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 1.0 --- # Tomato_Leaf_Classifier A...
[ 0.004152571316808462, -0.0004835593281313777, 0.02311917394399643, 0.022892436012625694, 0.022400328889489174, -0.027861513197422028, -0.029716407880187035, -0.010496901348233223, -0.0023207671474665403, 0.04079240933060646, 0.013874698430299759, 0.01872020587325096, 0.009124122560024261, ...
Ahmad/parsT5-base
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_n...
25
null
A monolingual T5 model for Persian trained on OSCAR 21.09 (https://oscar-corpus.com/) corpus with self-supervised method. 35 Gig deduplicated version of Persian data was used for pre-training the model. It's similar to the English T5 model but just for Persian. You may need to fine-tune it on your specific task. Exa...
[ -0.009901105426251888, -0.0392649881541729, 0.01183412317186594, 0.05820661038160324, 0.00806773453950882, 0.021093308925628662, -0.028953509405255318, 0.011932440102100372, -0.031116347759962082, 0.03184978663921356, 0.03056228905916214, -0.009432998485863209, 0.00020639349531847984, 0.04...
AhmedSSoliman/MarianCG-CoNaLa
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
21
null
--- widget: - text: "create array containing the maximum value of respective elements of array `[2, 3, 4]` and array `[1, 5, 2]" - text: "check if all elements in list `mylist` are identical" - text: "enable debug mode on flask application `app`" - text: "getting the length of `my_tuple`" - text: 'find all files in dir...
[ -0.036963313817977905, -0.016969092190265656, 0.00979140680283308, 0.056333914399147034, 0.04587133973836899, 0.019300581887364388, -0.009029842913150787, -0.005184822250157595, -0.0016885449877008796, 0.051510289311409, 0.051042672246694565, -0.008444000035524368, -0.04000517353415489, 0....
AigizK/wav2vec2-large-xls-r-300m-bashkir-cv7_opt
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "ba", "dataset:mozilla-foundation/common_voice_7_0", "transformers", "generated_from_trainer", "hf-asr-leaderboard", "mozilla-foundation/common_voice_7_0", "robust-speech-event", "license:apache-2.0", "model-index", "has_space" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
64
null
--- language: - ba license: apache-2.0 tags: - automatic-speech-recognition - generated_from_trainer - hf-asr-leaderboard - mozilla-foundation/common_voice_7_0 - robust-speech-event datasets: - mozilla-foundation/common_voice_7_0 model-index: - name: wav2vec2-large-xls-r-300m-bashkir-cv7_opt results: - task: ...
[ -0.030909843742847443, -0.009984084405004978, -0.015481088310480118, 0.0359111912548542, 0.051136963069438934, 0.020162273198366165, -0.011135826818645, -0.013878224417567253, -0.03439752757549286, 0.05986606329679489, 0.02261023409664631, -0.028525015339255333, 0.012611466459929943, 0.015...
AimB/mT5-en-kr-natural
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat...
78
null
you can use this model with simpletransfomers. ``` !pip install simpletransformers from simpletransformers.t5 import T5Model model = T5Model("mt5", "AimB/mT5-en-kr-natural") print(model.predict(["I feel good today"])) print(model.predict(["우리집 고양이는 세상에서 제일 귀엽습니다"])) ```
[ -0.06004346162080765, -0.016335109248757362, 0.006723075173795223, 0.02890191785991192, 0.0148444389924407, 0.0413275882601738, 0.006608871743083, -0.003512646770104766, -0.04621485620737076, 0.03126444295048714, 0.04113199934363365, 0.00006332839984679595, 0.018116896972060204, 0.02554496...
Ajay191191/autonlp-Test-530014983
[ "pytorch", "bert", "text-classification", "en", "dataset:Ajay191191/autonlp-data-Test", "transformers", "autonlp", "co2_eq_emissions" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_rep...
34
null
--- tags: autonlp language: en widget: - text: "I love AutoNLP 🤗" datasets: - Ajay191191/autonlp-data-Test co2_eq_emissions: 55.10196329868386 --- # Model Trained Using AutoNLP - Problem type: Binary Classification - Model ID: 530014983 - CO2 Emissions (in grams): 55.10196329868386 ## Validation Metrics - Loss: 0....
[ -0.0234396792948246, -0.024233898147940636, -0.0071845087222754955, 0.03859707713127136, 0.031028475612401962, 0.012503944337368011, -0.018656479194760323, -0.024432623758912086, -0.03983224183320999, 0.08175364136695862, 0.02413497120141983, 0.018310442566871643, -0.004602005705237389, 0....
Ajaykannan6/autonlp-manthan-16122692
[ "pytorch", "bart", "text2text-generation", "unk", "dataset:Ajaykannan6/autonlp-data-manthan", "transformers", "autonlp", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "BartForConditionalGeneration" ], "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 142, "min_length": 56, "no_repeat_ngr...
4
null
--- tags: autonlp language: unk widget: - text: "I love AutoNLP 🤗" datasets: - Ajaykannan6/autonlp-data-manthan --- # Model Trained Using AutoNLP - Problem type: Summarization - Model ID: 16122692 ## Validation Metrics - Loss: 1.1877621412277222 - Rouge1: 42.0713 - Rouge2: 23.3043 - RougeL: 37.3755 - RougeLsum: 37...
[ -0.03189496695995331, -0.01676969602704048, 0.002092089969664812, 0.048906125128269196, 0.019152024760842323, 0.006377926096320152, -0.02794368751347065, -0.035520344972610474, -0.021031726151704788, 0.06908029317855835, 0.0329279899597168, 0.006093441508710384, 0.01684638112783432, 0.0281...
Akari/albert-base-v2-finetuned-squad
[ "pytorch", "tensorboard", "albert", "question-answering", "dataset:squad_v2", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repe...
13
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad_v2 model-index: - name: albert-base-v2-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this...
[ -0.029572684317827225, -0.020743416622281075, -0.016498945653438568, 0.04478905722498894, 0.04452267661690712, 0.006870929151773453, -0.026607204228639603, 0.013551851734519005, -0.029215577989816666, 0.04142707586288452, 0.04274341091513634, -0.015718000009655952, 0.006309848744422197, 0....
Akash7897/bert-base-cased-wikitext2
[ "pytorch", "tensorboard", "bert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
8
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-base-cased-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base...
[ -0.017138328403234482, -0.01508724968880415, -0.0287577286362648, 0.04021750018000603, 0.03171816095709801, 0.013152047991752625, -0.010431719943881035, -0.021625515073537827, -0.04631798341870308, 0.06324805319309235, 0.007839635014533997, -0.021449975669384003, 0.01795564405620098, 0.042...
Akash7897/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
31
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: cola met...
[ -0.016847942024469376, 0.012150720693171024, -0.01897217333316803, 0.04333428293466568, 0.06852603703737259, 0.023040657863020897, -0.0285385400056839, -0.026320114731788635, -0.04608210176229477, 0.05947006493806839, 0.034364018589258194, -0.011909706518054008, 0.021749870851635933, 0.033...
Akash7897/distilbert-base-uncased-finetuned-sst2
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
31
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-sst2 results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: sst2 metrics: - ...
[ -0.017538683488965034, 0.0022388496436178684, -0.029487038031220436, 0.04940084367990494, 0.07627291232347488, 0.03173481673002243, -0.008243282325565815, -0.026529349386692047, -0.05034610629081726, 0.07153264433145523, 0.019475091248750687, -0.013240296393632889, 0.016702184453606606, 0....
Akash7897/gpt2-wikitext2
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
5
null
--- license: mit tags: - generated_from_trainer model-index: - name: gpt2-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2-wikitext2 This model ...
[ -0.020038191229104996, -0.021166520193219185, -0.013148708269000053, 0.032234739512205124, 0.02591344341635704, 0.018762804567813873, -0.006228595972061157, 0.002977769821882248, -0.041567280888557434, 0.05789986997842789, 0.016291866078972816, -0.020257001742720604, 0.016534266993403435, ...
Akashpb13/Swahili_xlsr
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "sw", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "generated_from_trainer", "hf-asr-leaderboard", "model_for_talk", "mozilla-foundation/common_voice_8_0", "robust-speech-event", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
10
2022-01-30T05:50:47Z
--- language: - sw license: apache-2.0 tags: - automatic-speech-recognition - generated_from_trainer - hf-asr-leaderboard - model_for_talk - mozilla-foundation/common_voice_8_0 - robust-speech-event - sw datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: Akashpb13/Swahili_xlsr results: - task: ...
[ -0.031882546842098236, -0.014554023742675781, -0.023722512647509575, 0.031780537217855453, 0.0516783744096756, 0.03899982199072838, -0.026388373225927353, -0.012011697515845299, -0.02731936052441597, 0.06612816452980042, 0.034311000257730484, -0.027303773909807205, 0.005662569310516119, 0....
Akashpb13/xlsr_hungarian_new
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "hu", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "generated_from_trainer", "hf-asr-leaderboard", "model_for_talk", "mozilla-foundation/common_voice_8_0", "robust-speech-event", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
7
null
--- language: - hu license: apache-2.0 tags: - automatic-speech-recognition - generated_from_trainer - hf-asr-leaderboard - hu - model_for_talk - mozilla-foundation/common_voice_8_0 - robust-speech-event datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: Akashpb13/xlsr_hungarian_new results: - tas...
[ -0.011181526817381382, -0.02494693547487259, -0.03199692443013191, 0.036355748772621155, 0.042448826134204865, 0.03664779290556908, -0.013177667744457722, -0.007614721078425646, -0.035155896097421646, 0.05814102664589882, 0.028566565364599228, -0.018567267805337906, 0.013594227842986584, 0...
Akashpb13/xlsr_kurmanji_kurdish
[ "pytorch", "safetensors", "wav2vec2", "automatic-speech-recognition", "kmr", "ku", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-...
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
10
2022-01-29T13:25:14Z
--- language: - kmr - ku license: apache-2.0 tags: - automatic-speech-recognition - mozilla-foundation/common_voice_8_0 - generated_from_trainer - kmr - robust-speech-event - model_for_talk - hf-asr-leaderboard datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: Akashpb13/xlsr_kurmanji_kurdish result...
[ -0.017029309645295143, -0.016133397817611694, -0.020054088905453682, 0.043810706585645676, 0.05538635328412056, 0.028422478586435318, -0.019076960161328316, -0.015273897908627987, -0.03868124634027481, 0.06927556544542313, 0.030099056661128998, -0.037190910428762436, -0.000940995872952044, ...
Akashpb13/xlsr_maltese_wav2vec2
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "mt", "dataset:common_voice", "transformers", "audio", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
8
null
--- language: mt datasets: - common_voice tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Maltese by Akash PB results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common...
[ -0.024652456864714622, -0.022942891344428062, -0.019556356593966484, 0.043677158653736115, 0.0559670552611351, 0.04088230058550835, -0.01576230116188526, -0.005111028905957937, -0.02309916540980339, 0.07648169249296188, 0.027662048116326332, -0.03434057906270027, -0.006564888637512922, 0.0...
Akjder/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
8
null
--- tags: - conversational --- # Harry Potter DialoGPT Model
[ -0.029324334114789963, 0.006045046728104353, 0.013366674073040485, 0.034415628761053085, 0.0064101917669177055, 0.0184163898229599, 0.0027549832593649626, 0.015343309380114079, -0.019336814060807228, 0.01679833233356476, 0.02836332842707634, -0.0335305817425251, 0.010642284527420998, 0.035...
AkshatSurolia/BEiT-FaceMask-Finetuned
[ "pytorch", "beit", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible" ]
image-classification
{ "architectures": [ "BeitForImageClassification" ], "model_type": "beit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat...
239
null
--- license: apache-2.0 tags: - image-classification datasets: - Face-Mask18K --- # BEiT for Face Mask Detection BEiT model pre-trained and fine-tuned on Self Currated Custom Face-Mask18K Dataset (18k images, 2 classes) at resolution 224x224. It was introduced in the paper BEIT: BERT Pre-Training of Image ...
[ -0.020677804946899414, -0.017275972291827202, 0.0066489712335169315, 0.028485625982284546, 0.04120609536767006, 0.0074081504717469215, -0.000487236597109586, -0.0013661521952599287, 0.005351048894226551, 0.0399339459836483, 0.0002983034064527601, 0.0007545743719674647, 0.00821635127067566, ...
AkshatSurolia/DeiT-FaceMask-Finetuned
[ "pytorch", "deit", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible" ]
image-classification
{ "architectures": [ "DeiTForImageClassification" ], "model_type": "deit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat...
46
null
--- license: apache-2.0 tags: - image-classification datasets: - Face-Mask18K --- # Distilled Data-efficient Image Transformer for Face Mask Detection Distilled data-efficient Image Transformer (DeiT) model pre-trained and fine-tuned on Self Currated Custom Face-Mask18K Dataset (18k images, 2 classes) at r...
[ -0.027245115488767624, -0.024786649271845818, -0.0009485746268182993, 0.017941346392035484, 0.03432236611843109, 0.013580622151494026, -0.011854017153382301, -0.0020672131795436144, 0.0036551556549966335, 0.06592775136232376, 0.025036662817001343, 0.001007674029096961, 0.020014138892292976, ...
AkshatSurolia/ICD-10-Code-Prediction
[ "pytorch", "bert", "transformers", "text-classification", "license:apache-2.0", "has_space" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_bea...
994
null
--- license: apache-2.0 tags: - text-classification --- # Clinical BERT for ICD-10 Prediction The Publicly Available Clinical BERT Embeddings paper contains four unique clinicalBERT models: initialized with BERT-Base (cased_L-12_H-768_A-12) or BioBERT (BioBERT-Base v1.0 + PubMed 200K + PMC 270K) & trained on either a...
[ -0.00464306166395545, -0.02549247443675995, -0.0061746928840875626, 0.028125977143645287, 0.01803267002105713, 0.018246950581669807, -0.029822323471307755, -0.020784549415111542, -0.012944869697093964, 0.03910352662205696, 0.05487530678510666, -0.010672912932932377, -0.0035439468920230865, ...
AkshatSurolia/ViT-FaceMask-Finetuned
[ "pytorch", "safetensors", "vit", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible" ]
image-classification
{ "architectures": [ "ViTForImageClassification" ], "model_type": "vit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_n...
40
null
--- license: apache-2.0 tags: - image-classification datasets: - Face-Mask18K --- # Vision Transformer (ViT) for Face Mask Detection Vision Transformer (ViT) model pre-trained and fine-tuned on Self Currated Custom Face-Mask18K Dataset (18k images, 2 classes) at resolution 224x224. It was first introduced ...
[ -0.028824718669056892, -0.013043593615293503, 0.010582017712295055, 0.018788767978549004, 0.042260631918907166, 0.013768422417342663, -0.009769818745553493, -0.00941441860049963, -0.006998524535447359, 0.05365334078669548, 0.021737249568104744, -0.004361131228506565, 0.01004294864833355, 0...
AlekseyKulnevich/Pegasus-HeaderGeneration
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "n...
8
2022-01-01T16:15:27Z
**Usage HuggingFace Transformers for header generation task** ``` from transformers import AutoTokenizer, AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("AlekseyKulnevich/Pegasus-HeaderGeneration") tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-large') input_text # your text input_ ...
[ -0.01804734766483307, -0.03409673646092415, -0.0026001296937465668, 0.040262266993522644, 0.04582793265581131, 0.017161739990115166, -0.025579148903489113, -0.04025344178080559, -0.03198616951704025, 0.06228310242295265, 0.0015479626599699259, 0.008950861170887947, 0.01828843355178833, 0.0...
AlekseyKulnevich/Pegasus-QuestionGeneration
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "n...
17
null
**Usage HuggingFace Transformers for question generation task** ``` from transformers import AutoTokenizer, AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("AlekseyKulnevich/Pegasus-QuestionGeneration") tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-large') input_text # your text inp...
[ 0.007300873752683401, -0.02193848229944706, -0.010622253641486168, 0.04952740669250488, 0.03560711815953255, 0.013048920780420303, -0.009913694113492966, -0.01903858222067356, -0.027504954487085342, 0.038956232368946075, 0.014056339859962463, 0.017486203461885452, 0.005636075511574745, 0.0...
AlexN/xls-r-300m-fr-0
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "fr", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
4
null
--- language: - fr license: apache-2.0 tags: - automatic-speech-recognition - mozilla-foundation/common_voice_8_0 - generated_from_trainer - robust-speech-event - hf-asr-leaderboard datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: xls-r-300m-fr results: - task: name: Speech Recognition ...
[ -0.02094249799847603, -0.009222322143614292, -0.02397555112838745, 0.030666448175907135, 0.044284574687480927, 0.03192916139960289, -0.02757592685520649, -0.019710270687937737, -0.03430653735995293, 0.060805462300777435, 0.034385405480861664, -0.027825545519590378, 0.00875111110508442, 0.0...
AlexN/xls-r-300m-fr
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "fr", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "generated_from_trainer", "hf-asr-leaderboard", "mozilla-foundation/common_voice_8_0", "robust-speech-event", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
17
null
--- language: - fr tags: - automatic-speech-recognition - generated_from_trainer - hf-asr-leaderboard - mozilla-foundation/common_voice_8_0 - robust-speech-event datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: xls-r-300m-fr results: - task: name: Speech Recognition type: automatic-s...
[ -0.01637325994670391, -0.011713245883584023, -0.025164050981402397, 0.030176594853401184, 0.040496423840522766, 0.03378641977906227, -0.028547901660203934, -0.019923964515328407, -0.03421279042959213, 0.05733998864889145, 0.040066979825496674, -0.021452704444527626, 0.010397096164524555, 0...
Andrija/SRoBERTa-L
[ "pytorch", "roberta", "fill-mask", "hr", "sr", "multilingual", "dataset:oscar", "dataset:srwac", "dataset:leipzig", "transformers", "masked-lm", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngra...
58
null
--- datasets: - oscar - srwac - leipzig language: - hr - sr - multilingual tags: - masked-lm widget: - text: "Ovo je početak <mask>." license: apache-2.0 --- # Transformer language model for Croatian and Serbian Trained on 6GB datasets that contain Croatian and Serbian language for two epochs (500k steps). Leipzi...
[ 0.009129156358540058, -0.028247032314538956, 0.0010668413015082479, 0.05003666505217552, 0.05036885291337967, 0.008799825794994831, -0.014335207641124725, -0.0058690994046628475, -0.05726508051156998, 0.08277229219675064, 0.02404298260807991, -0.033987753093242645, -0.011930692940950394, 0...
Andrija/SRoBERTa-NER
[ "pytorch", "roberta", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_...
7
null
--- datasets: - hr500k language: - hr - sr - multilingual widget: - text: "Moje ime je Aleksandar i zivim u Beogradu pored Vlade Republike Srbije" license: apache-2.0 --- Named Entity Recognition (Token Classification Head) for Serbian / Croatian languges. Abbreviation|Description -|- O|Outside of a named entity B...
[ -0.000006347585440380499, -0.0056273601949214935, -0.004387859255075455, 0.024481110274791718, 0.06991644203662872, 0.023339910432696342, -0.009151143953204155, 0.01524671446532011, -0.04564926028251648, 0.059377413243055344, 0.024966053664684296, -0.007056138012558222, 0.018305756151676178,...
Anonymous/ReasonBERT-BERT
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": nul...
5
null
Pre-trained to have better reasoning ability, try this if you are working with task like QA. For more details please see https://openreview.net/forum?id=cGB7CMFtrSx This is based on bert-base-uncased model and pre-trained for text input
[ -0.006859705783426762, 0.00883505679666996, -0.021434515714645386, 0.04664171114563942, 0.019348595291376114, 0.01925700344145298, -0.020740875974297523, 0.005885523743927479, -0.03420328348875046, 0.010323778726160526, 0.0032002802472561598, -0.016938989982008934, 0.00047281032311730087, ...
Aron/distilbert-base-uncased-finetuned-emotion
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:emotion", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
36
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion args: default...
[ -0.009498849511146545, 0.009653325192630291, -0.028936166316270828, 0.03763705864548683, 0.06053311377763748, 0.03326282650232315, -0.024181917309761047, -0.03544573858380318, -0.03370451554656029, 0.055523116141557693, 0.018917378038167953, -0.04675398766994476, 0.035176169127225876, 0.04...
Aruden/DialoGPT-medium-harrypotterall
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
6
null
--- tags: - conversational --- # Harry Potter DialoGPT Model
[ -0.029324334114789963, 0.006045046728104353, 0.013366674073040485, 0.034415628761053085, 0.0064101917669177055, 0.0184163898229599, 0.0027549832593649626, 0.015343309380114079, -0.019336814060807228, 0.01679833233356476, 0.02836332842707634, -0.0335305817425251, 0.010642284527420998, 0.035...
AryanLala/autonlp-Scientific_Title_Generator-34558227
[ "pytorch", "pegasus", "text2text-generation", "en", "dataset:AryanLala/autonlp-data-Scientific_Title_Generator", "transformers", "autonlp", "co2_eq_emissions", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "n...
103
null
--- tags: autonlp language: en widget: - text: "The scale, variety, and quantity of publicly-available NLP datasets has grown rapidly as researchers propose new tasks, larger models, and novel benchmarks. Datasets is a community library for contemporary NLP designed to support this ecosystem. Datasets aims to standard...
[ -0.015695618465542793, -0.012349703349173069, 0.004788646940141916, 0.04972020909190178, 0.04614366590976715, -0.003312081331387162, -0.02014695294201374, -0.03835402801632881, -0.02132038213312626, 0.06137670949101448, 0.02366006001830101, 0.031071458011865616, 0.035528842359781265, 0.045...
Ashkanmh/bert-base-parsbert-uncased-finetuned
[ "pytorch", "tensorboard", "bert", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
3
null
--- tags: - generated_from_trainer model-index: - name: bert-base-parsbert-uncased-finetuned results: - task: name: Masked Language Modeling type: fill-mask --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread an...
[ -0.01987166330218315, -0.013812064193189144, -0.02104773372411728, 0.052686627954244614, 0.04412341117858887, 0.024253234267234802, -0.009775509126484394, -0.014571042731404305, -0.030795209109783173, 0.06819160282611847, 0.015369975008070469, -0.01877770945429802, 0.011338389478623867, 0....
Atampy26/GPT-Glacier
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram...
5
null
GPT-Glacier, a GPT-Neo 125M model finetuned on the Glacier2 Modding Discord server.
[ -0.018713049590587616, -0.0032266827765852213, -0.006518980022519827, 0.015918362885713577, 0.07091409713029861, 0.01734098605811596, 0.04068811982870102, 0.01002254243940115, -0.025752825662493706, -0.0008721005287952721, 0.04079768434166908, -0.012509702704846859, 0.04385343939065933, 0....
Atchuth/DialoGPT-small-MichaelBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
6
2022-02-12T08:07:29Z
--- tags: - conversational --- # Michael Scott DialoGPT Model
[ -0.03385935351252556, 0.026552503928542137, 0.00015034624084364623, 0.01872318424284458, 0.010290856473147869, 0.03149537369608879, -0.005848324857652187, 0.039833396673202515, -0.005112145096063614, 0.02234308421611786, 0.042588744312524796, -0.033236000686883926, 0.023352084681391716, 0....
Aurora/asdawd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
null
https://www.geogebra.org/m/bbuczchu https://www.geogebra.org/m/xwyasqje https://www.geogebra.org/m/mx2cqkwr https://www.geogebra.org/m/tkqqqthm https://www.geogebra.org/m/asdaf9mj https://www.geogebra.org/m/ywuaj7p5 https://www.geogebra.org/m/jkfkayj3 https://www.geogebra.org/m/hptnn7ar https://www.geogebra.org/m/de9cw...
[ 0.01000450924038887, -0.029606236144900322, -0.0194969791918993, 0.033842477947473526, 0.01758449152112007, 0.01426932867616415, 0.018629582598805428, 0.013413336127996445, -0.058955561369657516, 0.051665060222148895, 0.017157668247818947, -0.021504120901226997, -0.015280550345778465, 0.04...
Ayham/albert_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
9
null
--- tags: - generated_from_trainer datasets: - cnn_dailymail model-index: - name: albert_distilgpt2_summarization_cnn_dailymail results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this...
[ -0.027727466076612473, -0.014908277429640293, -0.030296094715595245, 0.05311718210577965, 0.03613452613353729, 0.019335538148880005, -0.004624738357961178, -0.029071422293782234, -0.04108477756381035, 0.06502967327833176, 0.053155627101659775, 0.00012075075937900692, 0.0025256574153900146, ...
Ayham/albert_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
6
null
--- tags: - generated_from_trainer datasets: - cnn_dailymail model-index: - name: albert_large_gpt2_summarization_cnndm results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment...
[ -0.023604728281497955, -0.006189126055687666, -0.02070355974137783, 0.059805817902088165, 0.0391814298927784, 0.00014794316666666418, -0.001427522744052112, -0.037180304527282715, -0.033805862069129944, 0.05761459842324257, 0.04959709569811821, 0.0006475832196883857, 0.00882620271295309, 0...
Ayham/distilbert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
8
null
--- tags: - generated_from_trainer datasets: - xsum model-index: - name: distilbert_gpt2_summarization_xsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # dis...
[ -0.012397421523928642, -0.005650687962770462, -0.025247860699892044, 0.04433070123195648, 0.03998791053891182, 0.030337588861584663, -0.012200996279716492, -0.025289960205554962, -0.04090764373540878, 0.05847376212477684, 0.0467989444732666, -0.006149031221866608, 0.0015813957434147596, 0....
Ayham/ernie_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
13
null
--- tags: - generated_from_trainer datasets: - cnn_dailymail model-index: - name: ernie_gpt2_summarization_cnn_dailymail results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this commen...
[ -0.025416741147637367, -0.007336568087339401, -0.01733085699379444, 0.04508119076490402, 0.03886226564645767, 0.019939720630645752, -0.0062176804058253765, -0.027837257832288742, -0.04385002329945564, 0.06073789298534393, 0.04108747839927673, -0.005043423734605312, 0.015578207559883595, 0....
Ayham/roberta_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
12
null
--- tags: - generated_from_trainer datasets: - cnn_dailymail model-index: - name: roberta_bert_summarization_cnn_dailymail results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comm...
[ -0.02529999427497387, -0.007166489493101835, -0.025497978553175926, 0.04488505423069, 0.035269249230623245, 0.0263226255774498, -0.01878948323428631, -0.03301657736301422, -0.04705727472901344, 0.06412534415721893, 0.04698002338409424, -0.0063147032633423805, 0.015623193234205246, 0.052286...
Ayham/roberta_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
4
null
--- tags: - generated_from_trainer datasets: - cnn_dailymail model-index: - name: roberta_distilgpt2_summarization_cnn_dailymail results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove thi...
[ -0.025774048641324043, -0.012613351456820965, -0.019462333992123604, 0.04021063819527626, 0.034207940101623535, 0.030683394521474838, -0.010204577818512917, -0.025553373619914055, -0.04322012886404991, 0.06214778125286102, 0.04828183352947235, -0.005087254103273153, 0.00480163749307394, 0....
Ayham/roberta_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
6
null
--- tags: - generated_from_trainer datasets: - xsum model-index: - name: roberta_gpt2_summarization_xsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robert...
[ -0.01818983629345894, -0.006584119517356157, -0.00879990216344595, 0.04007445648312569, 0.024731583893299103, 0.030810700729489326, -0.007657925598323345, -0.018006592988967896, -0.04315405339002609, 0.05112875998020172, 0.04723035916686058, -0.009053993038833141, 0.004068800248205662, 0.0...
Ayham/xlnet_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
7
null
--- tags: - generated_from_trainer datasets: - cnn_dailymail model-index: - name: xlnet_bert_summarization_cnn_dailymail results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this commen...
[ -0.024525148794054985, -0.0022169260773807764, -0.031006136909127235, 0.03920414671301842, 0.0333976075053215, 0.02873683162033558, -0.02142714336514473, -0.033122718334198, -0.03355025500059128, 0.061281632632017136, 0.04246876761317253, -0.0070390901528298855, 0.016509229317307472, 0.043...
Ayran/DialoGPT-medium-harry-potter-1-through-4-plus-6-e18
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
12
null
--- tags: - conversational --- #DialoGPT medium model (Based on Harry Potter 1 through 4 plus 6, 18 epochs)
[ -0.0365067794919014, 0.01275510061532259, 0.0035163320135325193, 0.02053210325539112, 0.018720299005508423, 0.02433137036859989, -0.004047156777232885, 0.020871173590421677, -0.014467047527432442, 0.02176743559539318, 0.03861569985747337, -0.02917957492172718, 0.010484030470252037, 0.03530...
Ayran/DialoGPT-small-gandalf
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
11
null
--- tags: - conversational --- # Gandalf DialoGPT Model
[ -0.017163848504424095, 0.0021795600187033415, 0.011440247297286987, 0.01132178120315075, 0.026106007397174835, 0.035861168056726456, -0.004572286736220121, 0.03751932084560394, -0.01953050307929516, 0.015187175944447517, 0.04805498570203781, -0.036674171686172485, 0.01627880334854126, 0.02...
AyushPJ/ai-club-inductions-21-nlp-roBERTa-base-squad-v2
[ "pytorch", "roberta", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
8
null
--- tags: - generated_from_trainer model-index: - name: ai-club-inductions-21-nlp-roBERTa-base-squad-v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ai-club...
[ -0.03455235809087753, -0.022666610777378082, -0.011897986754775047, 0.030924290418624878, 0.040529459714889526, 0.00833556242287159, -0.01581529527902603, 0.011384881101548672, -0.029494309797883034, 0.038732077926397324, 0.03264516219496727, -0.009810901246964931, -0.012956755235791206, 0...
AyushPJ/ai-club-inductions-21-nlp-roBERTa
[ "pytorch", "roberta", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_re...
8
null
--- tags: - generated_from_trainer model-index: - name: ai-club-inductions-21-nlp-roBERTa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ai-club-inductions-21...
[ -0.02864033170044422, -0.014081289060413837, -0.009880959056317806, 0.03289592266082764, 0.04152611270546913, 0.003809188725426793, -0.011745953932404518, -0.0040899706073105335, -0.035392556339502335, 0.05008569732308388, 0.026693614199757576, -0.010325491428375244, -0.014318493194878101, ...
BSC-LT/roberta-large-bne-capitel-ner
[ "pytorch", "roberta", "token-classification", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "capitel", "ner", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_...
5
null
--- language: - es license: apache-2.0 tags: - "national library of spain" - "spanish" - "bne" - "capitel" - "ner" datasets: - "bne" - "capitel" metrics: - "f1" --- **⚠️NOTICE⚠️: THIS MODEL HAS BEEN MOVED TO THE FOLLOWING URL AND WILL SOON BE REMOVED:** https://huggingface.co/PlanTL-GOB-ES/roberta-large-bne-capitel...
[ -0.013140302151441574, 0.008949889801442623, 0.005648141261190176, 0.0642058253288269, 0.03996429964900017, 0.011962760239839554, -0.02005387842655182, -0.01629311591386795, -0.028922246769070625, 0.04286734387278557, 0.008388450369238853, -0.016061918810009956, -0.019138608127832413, 0.05...
BSen/wav2vec2-base-timit-demo-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
4
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2...
[ -0.0324067622423172, -0.012858448550105095, -0.019763117656111717, 0.023773204535245895, 0.038853537291288376, 0.02334434539079666, 0.00405894685536623, 0.003960238769650459, -0.033092349767684937, 0.047073546797037125, 0.03590237349271774, -0.018723880872130394, -0.0023193489760160446, 0....
Babelscape/rebel-large
[ "pytorch", "safetensors", "bart", "text2text-generation", "en", "dataset:Babelscape/rebel-dataset", "transformers", "seq2seq", "relation-extraction", "license:cc-by-nc-sa-4.0", "model-index", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "BartForConditionalGeneration" ], "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repe...
9,458
null
--- language: - en widget: - text: "Punta Cana is a resort town in the municipality of Higuey, in La Altagracia Province, the eastern most province of the Dominican Republic" tags: - seq2seq - relation-extraction datasets: - Babelscape/rebel-dataset model-index: - name: REBEL results: - task: name: Relation E...
[ -0.010500592179596424, -0.03242766112089157, -0.013916089199483395, 0.04477895051240921, 0.054918136447668076, 0.02090439945459366, -0.01986108534038067, 0.010226966813206673, -0.04634961858391762, 0.04872405156493187, -0.008534769527614117, -0.0161061342805624, 0.012008367106318474, 0.017...
Babysittingyoda/DialoGPT-small-familyguy
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
13
null
--- tags: - conversational --- #A Peter DialoGPT Model
[ -0.036111779510974884, 0.03164803609251976, 0.015021933242678642, 0.01589340716600418, 0.015772666782140732, 0.017661171033978462, 0.0038279264699667692, 0.0286259762942791, -0.014450727961957455, 0.017371106892824173, 0.03454577550292015, -0.03121502697467804, 0.00861534383147955, 0.03184...
Bagus/wav2vec2-large-xlsr-bahasa-indonesia
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "el", "dataset:common_voice_id_6.1", "transformers", "audio", "speech", "bahasa-indonesia", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
12
null
--- language: el datasets: - common_voice_id_6.1 tags: - audio - automatic-speech-recognition - speech - bahasa-indonesia license: apache-2.0 --- Dataset used for training: - Name: Common Voice - Language: Indonesian [id] - Version: 6.1 Test WER: 19.3 % Contact: bagus@ep.its.ac.id
[ -0.033216796815395355, -0.014584439806640148, -0.02048506960272789, 0.017797116190195084, 0.06048831716179848, 0.016769198700785637, 0.0005904000718146563, -0.01211559772491455, -0.0044015138410031796, 0.055244963616132736, 0.01902417466044426, -0.03392527252435684, 0.020525313913822174, 0...
Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition
[ "pytorch", "wav2vec2", "audio-classification", "ja", "dataset:jtes", "transformers", "audio", "speech", "speech-emotion-recognition", "has_space" ]
audio-classification
{ "architectures": [ "HubertForSequenceClassification" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "...
26
null
--- language: ja datasets: - jtes tags: - audio - audio-classification - speech - speech-emotion-recognition --- This is for (private) DEMO only.
[ -0.04115305840969086, -0.01297744270414114, -0.0016235909424722195, 0.02663354203104973, 0.0676797553896904, 0.024844540283083916, 0.013219201937317848, -0.015505552291870117, -0.03526631370186806, 0.05942654609680176, 0.0187111534178257, -0.04917192831635475, 0.024974865838885307, 0.05422...
BaptisteDoyen/camembert-base-xnli
[ "pytorch", "tf", "camembert", "text-classification", "fr", "dataset:xnli", "transformers", "zero-shot-classification", "xnli", "nli", "license:mit", "has_space" ]
zero-shot-classification
{ "architectures": [ "CamembertForSequenceClassification" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
405,474
2021-03-24T16:43:34Z
--- language: - fr thumbnail: tags: - zero-shot-classification - xnli - nli - fr license: mit pipeline_tag: zero-shot-classification datasets: - xnli metrics: - accuracy --- # camembert-base-xnli ## Model description Camembert-base model fine-tuned on french part of XNLI dataset. <br> One of the few Zero-Shot c...
[ -0.02432314306497574, -0.014132813550531864, 0.009933752939105034, 0.04409412667155266, 0.039495185017585754, 0.014269725419580936, -0.010511870495975018, -0.004505624063313007, -0.03510694205760956, 0.058593299239873886, 0.0022852891124784946, -0.0061280191875994205, -0.017499880865216255, ...
Batsy24/DialoGPT-medium-Twilight_BellaBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
8
null
--- tags: - conversational --- # Bella Swan DialoGPT model
[ -0.05652390047907829, 0.016988415271043777, 0.013422401621937752, 0.01829984225332737, -0.0032349908724427223, 0.015877991914749146, 0.005312751047313213, 0.029053859412670135, -0.02306198701262474, 0.021632535383105278, 0.03333413973450661, -0.04433472827076912, 0.019134104251861572, 0.03...
Batsy24/DialoGPT-small-Twilight_EdBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
6
2021-08-26T19:47:43Z
--- tags: - conversational --- # Twilight Edward DialoGPT Model
[ -0.031777627766132355, 0.02548781968653202, 0.004613311495631933, 0.020761625841259956, 0.004049879498779774, 0.01645183563232422, 0.0004574389895424247, 0.024089476093649864, -0.030741088092327118, 0.020437531173229218, 0.03451504558324814, -0.03968954086303711, 0.02342875860631466, 0.027...
BatuhanYilmaz/dummy-model
[ "tf", "camembert", "fill-mask", "transformers", "generated_from_keras_callback", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_...
6
null
--- license: mit tags: - generated_from_keras_callback model-index: - name: dummy-model results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # dummy-model This model is a ...
[ -0.060626763850450516, -0.006557994522154331, 0.0003560581535566598, 0.03280690684914589, 0.027389459311962128, 0.022150754928588867, -0.009804321452975273, 0.005338778719305992, -0.03328477591276169, 0.04638205096125603, 0.01552434079349041, -0.023666584864258766, 0.013863984495401382, 0....
Baybars/wav2vec2-xls-r-1b-turkish
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "tr", "dataset:common_voice", "transformers", "common_voice", "generated_from_trainer" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
13
null
--- language: - tr tags: - automatic-speech-recognition - common_voice - generated_from_trainer datasets: - common_voice model-index: - name: '' results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it,...
[ -0.02517840266227722, -0.002014386234804988, -0.005102722905576229, 0.041968587785959244, 0.05318162962794304, 0.022293945774435997, 0.0012194797163829207, -0.013768802396953106, -0.036466121673583984, 0.05553101375699043, 0.012516127899289131, -0.04639177396893501, 0.0100634153932333, 0.0...
Baybars/wav2vec2-xls-r-300m-cv8-turkish
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "tr", "dataset:common_voice", "transformers", "common_voice", "generated_from_trainer", "hf-asr-leaderboard", "robust-speech-event", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_s...
5
null
--- language: - tr license: apache-2.0 tags: - automatic-speech-recognition - common_voice - generated_from_trainer - hf-asr-leaderboard - robust-speech-event - tr datasets: - common_voice model-index: - name: '' results: [] --- <!-- This model card has been generated automatically according to the information the T...
[ -0.0211473498493433, -0.00024660993949510157, -0.024652250111103058, 0.060456231236457825, 0.04609803482890129, 0.03227173909544945, -0.000568919291254133, -0.007305052597075701, -0.044426657259464264, 0.0740593746304512, 0.02549871616065502, -0.03200352564454079, -0.0011801455402746797, 0...
BeIR/query-gen-msmarco-t5-base-v1
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_s...
1,816
null
# Query Generation This model is the t5-base model from [docTTTTTquery](https://github.com/castorini/docTTTTTquery). The T5-base model was trained on the [MS MARCO Passage Dataset](https://github.com/microsoft/MSMARCO-Passage-Ranking), which consists of about 500k real search queries from Bing together with the releva...
[ 0.006665314547717571, -0.024771330878138542, -0.015181586146354675, 0.07506373524665833, 0.022244613617658615, 0.03187179937958717, -0.020792517811059952, 0.018398651853203773, -0.03758012130856514, 0.03154726326465607, 0.010181964375078678, -0.001217871205881238, -0.01004168763756752, 0.0...
BeIR/query-gen-msmarco-t5-large-v1
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_s...
1,225
null
# Query Generation This model is the t5-base model from [docTTTTTquery](https://github.com/castorini/docTTTTTquery). The T5-base model was trained on the [MS MARCO Passage Dataset](https://github.com/microsoft/MSMARCO-Passage-Ranking), which consists of about 500k real search queries from Bing together with the releva...
[ 0.006665314547717571, -0.024771330878138542, -0.015181586146354675, 0.07506373524665833, 0.022244613617658615, 0.03187179937958717, -0.020792517811059952, 0.018398651853203773, -0.03758012130856514, 0.03154726326465607, 0.010181964375078678, -0.001217871205881238, -0.01004168763756752, 0.0...
BeIR/sparta-msmarco-distilbert-base-v1
[ "pytorch", "distilbert", "feature-extraction", "arxiv:2009.13013", "arxiv:2104.08663", "transformers" ]
feature-extraction
{ "architectures": [ "DistilBertModel" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngra...
106
null
# SPARTA Re-Implementation of [SPARTA: Efficient Open-Domain Question Answering via Sparse Transformer Matching Retrieval](https://arxiv.org/abs/2009.13013). It is the re-implementation we used for [BEIR: A Heterogenous Benchmark for Zero-shot Evaluation of Information Retrieval Models](https://arxiv.org/abs/2104.08663...
[ -0.01208326406776905, -0.010308091528713703, -0.025207336992025375, 0.04378091171383858, 0.02080470882356167, 0.016092728823423386, -0.009890414774417877, 0.013769061304628849, -0.05994081124663353, 0.0436457134783268, 0.02226351387798786, 0.011137934401631355, 0.008531155996024609, 0.0259...
BearThreat/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, ...
30
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: cola met...
[ -0.020428238436579704, 0.008538545109331608, -0.020600613206624985, 0.047663796693086624, 0.06862862408161163, 0.028152596205472946, -0.023160967975854874, -0.02812786214053631, -0.0486995205283165, 0.062389519065618515, 0.04206063970923424, -0.009140994399785995, 0.01765252649784088, 0.03...
Bee-Garbs/DialoGPT-real-cartman-small
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
10
null
--- tags: - conversational --- # Cartman Southpark DialoGPT2 small 18 epochs
[ -0.025591084733605385, 0.0100005604326725, -0.015600182116031647, 0.0002896491205319762, 0.02751355990767479, 0.0014230277156457305, 0.011797082610428333, 0.029994580894708633, -0.024498077109456062, 0.04542490094900131, 0.04041219502687454, -0.01957462541759014, 0.022896887734532356, 0.03...
Begimay/Task
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams...
0
2021-06-26T12:49:19Z
from transformers import GPTNeoForCausalLM, GPT2Tokenizer model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B") tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B") prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \ ... "previously ...
[ -0.03709251061081886, -0.029051978141069412, 0.0015572370029985905, 0.06345819681882858, 0.06844431161880493, 0.038142427802085876, -0.012345516122877598, -0.026248177513480186, -0.033635661005973816, 0.04302563890814781, 0.00004153319605393335, -0.002072308212518692, 0.012227887287735939, ...
BenWitter/DialoGPT-small-Tyrion
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size...
11
null
\ntags: -conversational inference: false conversational: true #First time chat bot using a guide, low epoch count due to limited resources.
[ -0.014878230169415474, -0.005156678147614002, -0.01421799510717392, 0.017912352457642555, 0.030382685363292694, 0.008951956406235695, -0.025558484718203545, 0.012864730320870876, -0.02509419247508049, 0.03223489597439766, 0.0619264617562294, 0.022171752527356148, 0.02070264331996441, 0.060...