modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
readme
stringlengths
0
186k
embedding
list
hfl/chinese-macbert-base
a986e004d2a7f2a1c2f5a3edef4e20604a974ed1
2021-05-19T19:09:45.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "zh", "arxiv:2004.13922", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
hfl
null
hfl/chinese-macbert-base
36,823,840
43
transformers
--- language: - zh tags: - bert license: "apache-2.0" --- <p align="center"> <br> <img src="https://github.com/ymcui/MacBERT/raw/master/pics/banner.png" width="500"/> <br> </p> <p align="center"> <a href="https://github.com/ymcui/MacBERT/blob/master/LICENSE"> <img alt="GitHub" src="https://img....
[ -0.12368089705705643, 0.0159906018525362, 0.07993485778570175, 0.010601235553622246, 0.06334946304559708, 0.02685576304793358, 0.021748080849647522, 0.007624071557074785, 0.0053676413372159, -0.0035076644271612167, 0.08696867525577545, -0.03400150686502457, 0.06885205209255219, 0.063679352...
microsoft/deberta-base
7d4c0126b06bd59dccd3e48e467ed11e37b77f3f
2022-01-13T13:56:18.000Z
[ "pytorch", "tf", "rust", "deberta", "en", "arxiv:2006.03654", "transformers", "deberta-v1", "license:mit" ]
null
false
microsoft
null
microsoft/deberta-base
23,662,412
15
transformers
--- language: en tags: deberta-v1 thumbnail: https://huggingface.co/front/thumbnails/microsoft.png license: mit --- ## DeBERTa: Decoding-enhanced BERT with Disentangled Attention [DeBERTa](https://arxiv.org/abs/2006.03654) improves the BERT and RoBERTa models using disentangled attention and enhanced mask decoder. It...
[ -0.10310466587543488, -0.1119682714343071, 0.015463879331946373, 0.00776915205642581, 0.017533447593450546, -0.0014948627213016152, -0.015752648934721947, 0.038549475371837616, -0.017899053171277046, 0.048586562275886536, 0.026507191359996796, -0.007420028559863567, -0.02895142324268818, 0...
bert-base-uncased
418430c3b5df7ace92f2aede75700d22c78a0f95
2022-06-06T11:41:24.000Z
[ "pytorch", "tf", "jax", "rust", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
bert-base-uncased
22,268,934
204
transformers
--- language: en tags: - exbert license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT base model (uncased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](http...
[ -0.10429023206233978, -0.07568823546171188, 0.0528205968439579, 0.029537051916122437, 0.03592930734157562, 0.06076014041900635, 0.016745345667004585, -0.011535749770700932, 0.014104398898780346, -0.024821121245622635, 0.0364287793636322, -0.055009908974170685, 0.05354085937142372, 0.041097...
gpt2
6c0e6080953db56375760c0471a8c5f2929baf11
2021-05-19T16:25:59.000Z
[ "pytorch", "tf", "jax", "tflite", "rust", "gpt2", "text-generation", "en", "transformers", "exbert", "license:mit" ]
text-generation
false
null
null
gpt2
11,350,803
164
transformers
--- language: en tags: - exbert license: mit --- # GPT-2 Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in [this paper](https://d4mucfpksywv.cloudfront.net/better...
[ -0.05892602726817131, -0.08013296872377396, 0.060150083154439926, 0.0071877371519804, 0.07087277621030807, 0.028216511011123657, 0.01796095259487629, 0.037697840481996536, 0.03646615520119667, -0.03444734588265419, -0.016371622681617737, -0.00010747667693067342, 0.03819410130381584, 0.0576...
distilbert-base-uncased
043235d6088ecd3dd5fb5ca3592b6913fd516027
2022-05-31T19:08:36.000Z
[ "pytorch", "tf", "jax", "rust", "distilbert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1910.01108", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
distilbert-base-uncased
11,250,037
70
transformers
--- language: en tags: - exbert license: apache-2.0 datasets: - bookcorpus - wikipedia --- # DistilBERT base model (uncased) This model is a distilled version of the [BERT base model](https://huggingface.co/bert-base-uncased). It was introduced in [this paper](https://arxiv.org/abs/1910.01108). The code for the disti...
[ -0.13866068422794342, -0.06151656433939934, 0.08515582978725433, 0.01706661842763424, 0.014973160810768604, -0.052277322858572006, -0.007684790063649416, 0.061743155121803284, 0.00879606045782566, -0.05457921698689461, 0.02629678323864937, 0.030604751780629158, 0.04207247868180275, 0.00054...
Jean-Baptiste/camembert-ner
dbec8489a1c44ecad9da8a9185115bccabd799fe
2022-04-04T01:13:33.000Z
[ "pytorch", "camembert", "token-classification", "fr", "dataset:Jean-Baptiste/wikiner_fr", "transformers", "autotrain_compatible" ]
token-classification
false
Jean-Baptiste
null
Jean-Baptiste/camembert-ner
9,833,060
11
transformers
--- language: fr datasets: - Jean-Baptiste/wikiner_fr widget: - text: "Je m'appelle jean-baptiste et je vis à montréal" - text: "george washington est allé à washington" --- # camembert-ner: model fine-tuned from camemBERT for NER task. ## Introduction [camembert-ner] is a NER model that was fine-tuned from camemBER...
[ -0.11113610863685608, -0.03646893799304962, 0.059876054525375366, -0.003222051775082946, 0.012641402892768383, -0.04548738896846771, -0.01905476488173008, 0.06513305753469467, 0.05475887656211853, -0.036326371133327484, 0.01774558238685131, -0.053867973387241364, 0.0843958929181099, 0.0142...
bert-base-cased
a8d257ba9925ef39f3036bfc338acf5283c512d9
2021-09-06T08:07:18.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
bert-base-cased
7,598,326
30
transformers
--- language: en tags: - exbert license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT base model (cased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https:...
[ -0.089531309902668, -0.07718607783317566, 0.05249395594000816, 0.018646515905857086, 0.0447709858417511, 0.06499486416578293, 0.01449884008616209, 0.013737021014094353, 0.022442683577537537, -0.017234643921256065, 0.04510723799467087, -0.0327119417488575, 0.06715553998947144, 0.04273235425...
roberta-base
251c3c36356d3ad6845eb0554fdb9703d632c6cc
2021-07-06T10:34:50.000Z
[ "pytorch", "tf", "jax", "rust", "roberta", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1907.11692", "arxiv:1806.02847", "transformers", "exbert", "license:mit", "autotrain_compatible" ]
fill-mask
false
null
null
roberta-base
7,254,067
45
transformers
--- language: en tags: - exbert license: mit datasets: - bookcorpus - wikipedia --- # RoBERTa base model Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1907.11692) and first released in [this repository](https://github.com...
[ -0.08212313055992126, -0.10390054434537888, -0.027432728558778763, 0.05361437052488327, -0.008713857270777225, 0.08931197971105576, 0.012921089306473732, -0.0319417305290699, 0.03874579817056656, 0.02321513742208481, 0.06043674796819687, -0.031464289873838425, 0.07827506959438324, 0.029055...
SpanBERT/spanbert-large-cased
a49cba45de9565a5d3e7b089a94dbae679e64e79
2021-05-19T11:31:33.000Z
[ "pytorch", "jax", "bert", "transformers" ]
null
false
SpanBERT
null
SpanBERT/spanbert-large-cased
7,120,559
3
transformers
Entry not found
[ 0.0461147278547287, -0.038838207721710205, -0.01049656979739666, -0.03682169318199158, 0.011261860840022564, 0.013094935566186905, 0.0019101888174191117, -0.013979103416204453, 0.027092741802334785, -0.015212527476251125, 0.017284274101257324, -0.08189476281404495, 0.03817418962717056, -0....
xlm-roberta-base
f6d161e8f5f6f2ed433fb4023d6cb34146506b3f
2022-06-06T11:40:43.000Z
[ "pytorch", "tf", "jax", "xlm-roberta", "fill-mask", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha"...
fill-mask
false
null
null
xlm-roberta-base
6,960,013
42
transformers
--- tags: - exbert language: - multilingual - af - am - ar - as - az - be - bg - bn - br - bs - ca - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fr - fy - ga - gd - gl - gu - ha - he - hi - hr - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lo - lt - lv - mg - mk - ml - mn ...
[ -0.08281730860471725, 0.003249414497986436, -0.041813094168901443, -0.047962404787540436, 0.060015056282281876, 0.03705935552716255, 0.03477831184864044, 0.03974342346191406, -0.008022570051252842, 0.021637847647070885, 0.08089376986026764, -0.04877686873078346, 0.08540689200162888, -0.035...
distilbert-base-uncased-finetuned-sst-2-english
00c3f1ef306e837efb641eaca05d24d161d9513c
2022-07-22T08:00:55.000Z
[ "pytorch", "tf", "rust", "distilbert", "text-classification", "en", "dataset:sst2", "dataset:glue", "transformers", "license:apache-2.0", "model-index" ]
text-classification
false
null
null
distilbert-base-uncased-finetuned-sst-2-english
5,401,984
77
transformers
--- language: en license: apache-2.0 datasets: - sst2 - glue model-index: - name: distilbert-base-uncased-finetuned-sst-2-english results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue config: sst2 split: validation metrics: ...
[ -0.034528184682130814, -0.04061563313007355, -0.060734909027814865, 0.035735320299863815, 0.08161136507987976, 0.04066551476716995, 0.0003900852461811155, 0.05125672370195389, -0.01557959709316492, -0.04621459171175957, 0.06463723629713058, -0.0745718702673912, 0.007900773547589779, -0.046...
distilroberta-base
c1149320821601524a8d373726ed95bbd2bc0dc2
2022-07-22T08:13:21.000Z
[ "pytorch", "tf", "jax", "rust", "roberta", "fill-mask", "en", "dataset:openwebtext", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
distilroberta-base
5,192,102
21
transformers
--- language: en tags: - exbert license: apache-2.0 datasets: - openwebtext --- # Model Card for DistilRoBERTa base # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Training Details](#training-details) 5. [Evaluation](#evaluat...
[ -0.1325169950723648, -0.03109726682305336, 0.035468652844429016, 0.0266091488301754, 0.027898555621504784, -0.04462097957730293, -0.04662588611245155, 0.06629492342472076, -0.08276285231113434, -0.04405451565980911, 0.023149758577346802, -0.024889251217246056, 0.013186859898269176, -0.0024...
distilgpt2
ca98be8f8f0994e707b944a9ef55e66fbcf9e586
2022-07-22T08:12:56.000Z
[ "pytorch", "tf", "jax", "tflite", "rust", "gpt2", "text-generation", "en", "dataset:openwebtext", "arxiv:1910.01108", "arxiv:2201.08542", "arxiv:2203.12574", "arxiv:1910.09700", "arxiv:1503.02531", "transformers", "exbert", "license:apache-2.0", "model-index", "co2_eq_emissions" ...
text-generation
false
null
null
distilgpt2
4,525,173
77
transformers
--- language: en tags: - exbert license: apache-2.0 datasets: - openwebtext model-index: - name: distilgpt2 results: - task: type: text-generation name: Text Generation dataset: type: wikitext name: WikiText-103 metrics: - type: perplexity name: Perplexity ...
[ -0.12711955606937408, -0.02593734860420227, 0.022106902673840523, 0.04674231633543968, 0.011120681650936604, -0.0465211421251297, 0.0013098361669108272, 0.08374807238578796, -0.028656257316470146, -0.08449400216341019, 0.015780135989189148, -0.07485173642635345, -0.02609829604625702, 0.017...
cross-encoder/ms-marco-MiniLM-L-12-v2
97f7dcbdd6ab58fe7f44368c795fc5200b48fcbe
2021-08-05T08:39:01.000Z
[ "pytorch", "jax", "bert", "text-classification", "transformers", "license:apache-2.0" ]
text-classification
false
cross-encoder
null
cross-encoder/ms-marco-MiniLM-L-12-v2
3,951,063
10
transformers
--- license: apache-2.0 --- # Cross-Encoder for MS Marco This model was trained on the [MS Marco Passage Ranking](https://github.com/microsoft/MSMARCO-Passage-Ranking) task. The model can be used for Information Retrieval: Given a query, encode the query will all possible passages (e.g. retrieved with ElasticSearch)....
[ -0.06551434844732285, -0.07030782848596573, -0.004193244501948357, 0.05925549939274788, -0.008339117281138897, 0.08594850450754166, -0.029806630685925484, 0.0668809562921524, -0.0017081426922231913, -0.053372517228126526, -0.029908085241913795, 0.04448296129703522, 0.032882269471883774, 0....
albert-base-v2
51dbd9db43a0c6eba97f74b91ce26fface509e0b
2021-08-30T12:04:48.000Z
[ "pytorch", "tf", "jax", "rust", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
albert-base-v2
3,862,051
15
transformers
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT Base v2 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-rese...
[ 0.0007209334871731699, -0.040847085416316986, -0.0007834088755771518, 0.05562419071793556, 0.026085415855050087, 0.06654420495033264, -0.03512277826666832, -0.035424113273620605, 0.04950462654232979, -0.05898051708936691, 0.05823838338255882, -0.05316125601530075, 0.054170671850442886, -0....
bert-base-chinese
38fda776740d17609554e879e3ac7b9837bdb5ee
2022-07-22T08:09:06.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "zh", "transformers", "autotrain_compatible" ]
fill-mask
false
null
null
bert-base-chinese
3,660,463
107
transformers
--- language: zh --- # Bert-base-chinese ## Table of Contents - [Model Details](#model-details) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [How to Get Started With the Model](#how-to-get-started-with-the-model) # Model Detai...
[ -0.1257508248090744, -0.012285888195037842, 0.0526764802634716, 0.03729807585477829, 0.047670330852270126, 0.0854436531662941, 0.026564601808786392, -0.009043475612998009, 0.01670899987220764, -0.019426235929131508, 0.03087518736720085, -0.07265433669090271, 0.087125264108181, 0.0327844023...
bert-base-multilingual-cased
aff660c4522e466f4d0de19eaf94f91e4e2e7375
2021-05-18T16:18:16.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "multilingual", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
bert-base-multilingual-cased
3,089,919
40
transformers
--- language: multilingual license: apache-2.0 datasets: - wikipedia --- # BERT multilingual base model (cased) Pretrained model on the top 104 languages with the largest Wikipedia using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released ...
[ -0.0880918800830841, -0.08954025059938431, 0.05024603381752968, 0.02027062512934208, 0.02424587681889534, 0.05601003021001816, -0.009814261458814144, -0.006841807160526514, 0.03744438290596008, -0.031194061040878296, 0.022091440856456757, -0.05183573439717293, 0.0506647489964962, 0.0512378...
xlm-roberta-large-finetuned-conll03-english
33a83d9855a119c0453ce450858c07835a0bdbed
2022-07-22T08:04:08.000Z
[ "pytorch", "rust", "xlm-roberta", "token-classification", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", ...
token-classification
false
null
null
xlm-roberta-large-finetuned-conll03-english
2,851,282
23
transformers
--- language: - multilingual - af - am - ar - as - az - be - bg - bn - br - bs - ca - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fr - fy - ga - gd - gl - gu - ha - he - hi - hr - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lo - lt - lv - mg - mk - ml - mn - mr - ms - my ...
[ -0.10246257483959198, 0.04425393044948578, -0.012316679581999779, -0.025751229375600815, 0.0764460638165474, 0.03561047092080116, 0.0360226072371006, 0.010814625769853592, 0.045203957706689835, 0.0420810803771019, 0.07108546048402786, -0.02943423204123974, 0.04923563078045845, -0.026391703...
tals/albert-xlarge-vitaminc-mnli
4c79eb5353f6104eb148d9221560c913f45677c7
2022-06-24T01:33:47.000Z
[ "pytorch", "tf", "albert", "text-classification", "python", "dataset:fever", "dataset:glue", "dataset:multi_nli", "dataset:tals/vitaminc", "transformers" ]
text-classification
false
tals
null
tals/albert-xlarge-vitaminc-mnli
2,529,752
null
transformers
--- language: python datasets: - fever - glue - multi_nli - tals/vitaminc --- # Details Model used in [Get Your Vitamin C! Robust Fact Verification with Contrastive Evidence](https://aclanthology.org/2021.naacl-main.52/) (Schuster et al., NAACL 21`). For more details see: https://github.com/TalSchuster/VitaminC When ...
[ -0.1226511225104332, -0.010432502254843712, -0.0084984814748168, 0.02337195724248886, 0.06423482298851013, -0.00009322218102170154, -0.00042406650027260184, 0.10444913804531097, 0.013196569867432117, -0.002656024880707264, 0.027341550216078758, -0.052648305892944336, 0.052716560661792755, ...
bert-large-uncased
3835a195d41f7ddc47d5ecab84b64f71d6f144e9
2021-05-18T16:40:29.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
bert-large-uncased
2,362,221
9
transformers
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (uncased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https://github.com...
[ -0.089927539229393, -0.0816199854016304, 0.0626024678349495, 0.043510835617780685, 0.032939936965703964, 0.05772925913333893, 0.012369436211884022, -0.022393228486180305, 0.034775275737047195, -0.026343373581767082, 0.03744859620928764, -0.044184569269418716, 0.05464019253849983, 0.0297626...
valhalla/t5-small-qa-qg-hl
a9d81e686f2169360fd59d8329235d3c4ba74f4f
2021-06-23T14:42:41.000Z
[ "pytorch", "jax", "t5", "text2text-generation", "dataset:squad", "arxiv:1910.10683", "transformers", "question-generation", "license:mit", "autotrain_compatible" ]
text2text-generation
false
valhalla
null
valhalla/t5-small-qa-qg-hl
2,171,047
5
transformers
--- datasets: - squad tags: - question-generation widget: - text: "generate question: <hl> 42 <hl> is the answer to life, the universe and everything. </s>" - text: "question: What is 42 context: 42 is the answer to life, the universe and everything. </s>" license: mit --- ## T5 for multi-task QA and QG This is multi-...
[ -0.09922609478235245, 0.028051195666193962, -0.03504813835024834, 0.041627272963523865, 0.03275827690958977, 0.03436599299311638, 0.013137323781847954, 0.0001289481297135353, 0.05512295663356781, -0.008591915480792522, 0.014779935590922832, -0.13871879875659943, 0.0689271092414856, -0.0048...
google/t5-v1_1-xl
a9e51c46bd6f3893213c51edf9498be6f0426797
2020-11-19T19:55:34.000Z
[ "pytorch", "tf", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2002.05202", "arxiv:1910.10683", "transformers", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-v1_1-xl
1,980,571
3
transformers
--- language: en datasets: - c4 license: apache-2.0 --- [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) Version 1.1 ## Version 1.1 [T5 Version 1.1](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/released_checkpoints.md#t511) includes the f...
[ -0.08697672933340073, -0.03702358901500702, 0.06502795964479446, 0.006211201194673777, 0.059899959713220596, 0.04369475692510605, -0.025000816211104393, -0.05999826639890671, -0.010968467220664024, -0.09833592176437378, 0.03334726393222809, 0.0184017401188612, -0.024979878216981888, -0.026...
sentence-transformers/all-MiniLM-L6-v2
717413c64de70e37b55cf53c9cdff0e2d331fac3
2022-07-11T21:08:45.000Z
[ "pytorch", "tf", "bert", "feature-extraction", "en", "dataset:s2orc", "dataset:flax-sentence-embeddings/stackexchange_xml", "dataset:MS Marco", "dataset:gooaq", "dataset:yahoo_answers_topics", "dataset:code_search_net", "dataset:search_qa", "dataset:eli5", "dataset:snli", "dataset:multi_...
sentence-similarity
false
sentence-transformers
null
sentence-transformers/all-MiniLM-L6-v2
1,933,749
60
sentence-transformers
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity language: en license: apache-2.0 datasets: - s2orc - flax-sentence-embeddings/stackexchange_xml - MS Marco - gooaq - yahoo_answers_topics - code_search_net - search_qa - eli5 - snli - multi_nli - wikihow - nat...
[ -0.08548831194639206, -0.06260090321302414, -0.011101335287094116, -0.022259490564465523, 0.056814294308423996, 0.07366131246089935, -0.029393881559371948, 0.02135009691119194, 0.01766866445541382, -0.0356454998254776, 0.05661066994071007, -0.019041549414396286, 0.0534217394888401, 0.04904...
sentence-transformers/paraphrase-MiniLM-L6-v2
68b97aaedb0c72be3c88c1af64296b3bbb8001fa
2022-06-15T18:39:43.000Z
[ "pytorch", "tf", "bert", "feature-extraction", "arxiv:1908.10084", "sentence-transformers", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
false
sentence-transformers
null
sentence-transformers/paraphrase-MiniLM-L6-v2
1,710,481
16
sentence-transformers
--- pipeline_tag: sentence-similarity license: apache-2.0 tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # sentence-transformers/paraphrase-MiniLM-L6-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dens...
[ -0.028167856857180595, -0.05239181965589523, -0.009377066977322102, 0.040452685207128525, 0.03128011152148247, 0.06838051974773407, -0.054357923567295074, 0.03204594552516937, 0.006394308991730213, -0.07616591453552246, 0.06814518570899963, -0.0010415782453492284, 0.05818144232034683, 0.04...
t5-small
d78aea13fa7ecd06c29e3e46195d6341255065d5
2022-07-22T08:11:14.000Z
[ "pytorch", "tf", "jax", "rust", "t5", "text2text-generation", "en", "fr", "ro", "de", "dataset:c4", "arxiv:1805.12471", "arxiv:1708.00055", "arxiv:1704.05426", "arxiv:1606.05250", "arxiv:1808.09121", "arxiv:1810.12885", "arxiv:1905.10044", "arxiv:1910.09700", "transformers", ...
translation
false
null
null
t5-small
1,707,833
20
transformers
--- language: - en - fr - ro - de datasets: - c4 tags: - summarization - translation license: apache-2.0 --- # Model Card for T5 Small ![model image](https://camo.githubusercontent.com/623b4dea0b653f2ad3f36c71ebfe749a677ac0a1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f343030362f312a44304a31674e51663876...
[ -0.05660082772374153, -0.000966046005487442, -0.04522397369146347, 0.05273234471678734, 0.13198721408843994, 0.019032752141356468, -0.004690529778599739, 0.07759352773427963, 0.026721565052866936, -0.04503154382109642, 0.032610055059194565, -0.04494328796863556, 0.024699600413441658, 0.027...
facebook/bart-large-mnli
c626438eeca63a93bd6024b0a0fbf8b3c0c30d7b
2021-08-09T08:25:07.000Z
[ "pytorch", "jax", "rust", "bart", "text-classification", "dataset:multi_nli", "arxiv:1910.13461", "arxiv:1909.00161", "transformers", "license:mit", "zero-shot-classification" ]
zero-shot-classification
false
facebook
null
facebook/bart-large-mnli
1,668,146
147
transformers
--- license: mit thumbnail: https://huggingface.co/front/thumbnails/facebook.png pipeline_tag: zero-shot-classification datasets: - multi_nli --- # bart-large-mnli This is the checkpoint for [bart-large](https://huggingface.co/facebook/bart-large) after being trained on the [MultiNLI (MNLI)](https://huggingface.co/da...
[ -0.07644176483154297, -0.043402399867773056, 0.05649516358971596, -0.028029777109622955, 0.06512340158224106, 0.0230893362313509, -0.0759434774518013, -0.030554726719856262, -0.020933518186211586, -0.053710583597421646, 0.046663254499435425, -0.05096018686890602, -0.015758709982037544, -0....
cardiffnlp/twitter-xlm-roberta-base-sentiment
f3e34b6c30bf27b6649f72eca85d0bbe79df1e55
2022-06-22T19:15:32.000Z
[ "pytorch", "tf", "xlm-roberta", "text-classification", "multilingual", "arxiv:2104.12250", "transformers" ]
text-classification
false
cardiffnlp
null
cardiffnlp/twitter-xlm-roberta-base-sentiment
1,479,744
25
transformers
--- language: multilingual widget: - text: "🤗" - text: "T'estimo! ❤️" - text: "I love you!" - text: "I hate you 🤮" - text: "Mahal kita!" - text: "사랑해!" - text: "난 너가 싫어" - text: "😍😍😍" --- # twitter-XLM-roBERTa-base for Sentiment Analysis This is a multilingual XLM-roBERTa-base model trained on ~198M tweets and ...
[ -0.10031694173812866, -0.02794644981622696, -0.014125077985227108, -0.00975096970796585, -0.004203904885798693, 0.0024737271014600992, 0.008777450770139694, 0.024569865316152573, 0.06987903267145157, -0.003512149676680565, 0.03865955024957657, -0.035966381430625916, 0.0726815015077591, -0....
roberta-large
619fd8c2ca2bc7ac3959b7f71b6c426c897ba407
2021-05-21T08:57:02.000Z
[ "pytorch", "tf", "jax", "roberta", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1907.11692", "arxiv:1806.02847", "transformers", "exbert", "license:mit", "autotrain_compatible" ]
fill-mask
false
null
null
roberta-large
1,479,252
39
transformers
--- language: en tags: - exbert license: mit datasets: - bookcorpus - wikipedia --- # RoBERTa large model Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1907.11692) and first released in [this repository](htt...
[ -0.07912211120128632, -0.10217892378568649, -0.02644594945013523, 0.0550602488219738, -0.00734882615506649, 0.08770351111888885, 0.00936263706535101, -0.03351088985800743, 0.03828578069806099, 0.022800803184509277, 0.06117379665374756, -0.03063245862722397, 0.07556978613138199, 0.027471099...
DeepPavlov/rubert-base-cased-conversational
645946ce91842a52eaacb2705c77e59194145ffa
2021-11-08T13:06:54.000Z
[ "pytorch", "jax", "bert", "feature-extraction", "ru", "transformers" ]
feature-extraction
false
DeepPavlov
null
DeepPavlov/rubert-base-cased-conversational
1,418,924
5
transformers
--- language: - ru --- # rubert-base-cased-conversational Conversational RuBERT \(Russian, cased, 12‑layer, 768‑hidden, 12‑heads, 180M parameters\) was trained on OpenSubtitles\[1\], [Dirty](https://d3.ru/), [Pikabu](https://pikabu.ru/), and a Social Media segment of Taiga corpus\[2\]. We assembled a new vocabulary f...
[ -0.07024551928043365, -0.13161787390708923, 0.024362895637750626, -0.043754514306783676, 0.08567774295806885, 0.07290639728307724, 0.0071761030703783035, -0.009646309539675713, 0.03384178876876831, -0.034751225262880325, 0.03113185241818428, -0.022719744592905045, 0.01884695142507553, 0.04...
microsoft/codebert-base
3b0952feddeffad0063f274080e3c23d75e7eb39
2022-02-11T19:59:44.000Z
[ "pytorch", "tf", "jax", "rust", "roberta", "feature-extraction", "arxiv:2002.08155", "transformers" ]
feature-extraction
false
microsoft
null
microsoft/codebert-base
1,347,269
30
transformers
## CodeBERT-base Pretrained weights for [CodeBERT: A Pre-Trained Model for Programming and Natural Languages](https://arxiv.org/abs/2002.08155). ### Training Data The model is trained on bi-modal data (documents & code) of [CodeSearchNet](https://github.com/github/CodeSearchNet) ### Training Objective This model is i...
[ -0.1369808167219162, -0.10500998049974442, -0.036125168204307556, 0.05996467173099518, -0.01003129780292511, 0.08013205230236053, -0.01008650567382574, -0.008504729717969894, -0.051955025643110275, -0.0021893756929785013, 0.06277790665626526, 0.015850992873311043, 0.03458425775170326, -0.0...
ProsusAI/finbert
5ea63b3d0c737ad6f06e061d9af36b1f7bbd1a4b
2022-06-03T06:34:37.000Z
[ "pytorch", "tf", "jax", "bert", "text-classification", "en", "arxiv:1908.10063", "transformers", "financial-sentiment-analysis", "sentiment-analysis" ]
text-classification
false
ProsusAI
null
ProsusAI/finbert
1,254,493
81
transformers
--- language: "en" tags: - financial-sentiment-analysis - sentiment-analysis widget: - text: "Stocks rallied and the British pound gained." --- FinBERT is a pre-trained NLP model to analyze sentiment of financial text. It is built by further training the BERT language model in the finance domain, using a large financi...
[ -0.024671152234077454, -0.06598435342311859, -0.07246746122837067, 0.007539877202361822, 0.03852372616529465, 0.034074846655130386, 0.06207428500056267, 0.0459100604057312, 0.0375867560505867, -0.06407948583364487, -0.010915640741586685, 0.0014108888572081923, -0.019794268533587456, 0.0332...
t5-base
23aa4f41cb7c08d4b05c8f327b22bfa0eb8c7ad9
2022-07-22T08:10:56.000Z
[ "pytorch", "tf", "jax", "rust", "t5", "text2text-generation", "en", "fr", "ro", "de", "dataset:c4", "arxiv:1805.12471", "arxiv:1708.00055", "arxiv:1704.05426", "arxiv:1606.05250", "arxiv:1808.09121", "arxiv:1810.12885", "arxiv:1905.10044", "arxiv:1910.09700", "transformers", ...
translation
false
null
null
t5-base
1,234,008
53
transformers
--- language: - en - fr - ro - de datasets: - c4 tags: - summarization - translation license: apache-2.0 --- # Model Card for T5 Base ![model image](https://camo.githubusercontent.com/623b4dea0b653f2ad3f36c71ebfe749a677ac0a1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f343030362f312a44304a31674e516638767...
[ -0.06001785770058632, -0.0032114647328853607, -0.043597523123025894, 0.04925158619880676, 0.13157330453395844, 0.017546458169817924, -0.0037867906503379345, 0.07551992684602737, 0.02609967440366745, -0.04440973699092865, 0.03008037619292736, -0.046874769032001495, 0.024552587419748306, 0.0...
deepset/roberta-base-squad2
d3c3bb6f2aaec6bf057fbf3796af9c5b9b939758
2022-07-22T11:42:08.000Z
[ "pytorch", "tf", "jax", "rust", "roberta", "question-answering", "en", "dataset:squad_v2", "transformers", "license:cc-by-4.0", "model-index", "autotrain_compatible" ]
question-answering
false
deepset
null
deepset/roberta-base-squad2
1,111,876
92
transformers
--- language: en datasets: - squad_v2 license: cc-by-4.0 model-index: - name: deepset/roberta-base-squad2 results: - task: type: question-answering name: Question Answering dataset: name: squad_v2 type: squad_v2 config: squad_v2 split: validation metrics: - name: Exac...
[ -0.11695931106805801, -0.0684281513094902, -0.02039109356701374, 0.052642595022916794, 0.036685723811388016, 0.05338458716869354, 0.014029116369783878, 0.021648414433002472, -0.009836599230766296, -0.04259449988603592, -0.016081688925623894, -0.11886314302682877, -0.0033667238894850016, 0....
distilbert-base-cased-distilled-squad
1b9d42b637aed70c9f3cd27e13b66ee9f847ed03
2022-07-22T07:57:01.000Z
[ "pytorch", "tf", "rust", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible" ]
question-answering
false
null
null
distilbert-base-cased-distilled-squad
1,064,466
15
transformers
--- language: en datasets: - squad metrics: - squad license: apache-2.0 --- # DistilBERT base cased distilled SQuAD ## Table of Contents - [Model Details](#model-details) - [How To Get Started With the Model](#how-to-get-started-with-the-model) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-...
[ -0.14533667266368866, -0.026285884901881218, 0.008701134473085403, 0.04142187163233757, 0.058554768562316895, -0.013294125907123089, -0.04081675037741661, 0.17894130945205688, -0.03330094739794731, -0.043892886489629745, -0.0002285213122377172, 0.07188321650028229, 0.00904697272926569, 0.0...
xlm-roberta-large
b2a6150f8be56457baf80c74342cc424080260f0
2022-06-27T11:25:40.000Z
[ "pytorch", "tf", "jax", "xlm-roberta", "fill-mask", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha"...
fill-mask
false
null
null
xlm-roberta-large
1,017,218
24
transformers
--- tags: - exbert language: - multilingual - af - am - ar - as - az - be - bg - bn - br - bs - ca - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fr - fy - ga - gd - gl - gu - ha - he - hi - hr - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lo - lt - lv - mg - mk - ml - mn -...
[ -0.08172927796840668, 0.005226295441389084, -0.04117339476943016, -0.04801236838102341, 0.05737524852156639, 0.031084084883332253, 0.03243193402886391, 0.03622283786535263, -0.0067443703301250935, 0.019056109711527824, 0.08277745544910431, -0.043742552399635315, 0.08112572878599167, -0.036...
facebook/wav2vec2-base-960h
706111756296bc76512407a11e69526cf4e22aae
2022-06-30T00:05:41.000Z
[ "pytorch", "tf", "wav2vec2", "automatic-speech-recognition", "en", "dataset:librispeech_asr", "arxiv:2006.11477", "transformers", "audio", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
facebook
null
facebook/wav2vec2-base-960h
986,202
57
transformers
--- language: en datasets: - librispeech_asr tags: - audio - automatic-speech-recognition - hf-asr-leaderboard license: apache-2.0 widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface....
[ -0.1060151606798172, -0.14686547219753265, -0.01560901664197445, -0.047346293926239014, 0.04207441955804825, 0.006387645378708839, 0.011943151243031025, -0.04540662840008736, -0.04757222533226013, -0.08122971653938293, 0.0399002879858017, -0.1235450878739357, -0.05642370507121086, -0.00941...
daigo/bert-base-japanese-sentiment
51ac2d2c0a5645d77ca26078fc5f02c349fbb93d
2021-05-19T14:36:34.000Z
[ "pytorch", "jax", "bert", "text-classification", "ja", "transformers" ]
text-classification
false
daigo
null
daigo/bert-base-japanese-sentiment
972,842
7
transformers
--- language: - ja --- binary classification # Usage ``` print(pipeline("sentiment-analysis",model="daigo/bert-base-japanese-sentiment",tokenizer="daigo/bert-base-japanese-sentiment")("私は幸福である。")) [{'label': 'ポジティブ', 'score': 0.98430425}] ```
[ -0.04349634051322937, 0.12685701251029968, -0.0252006184309721, 0.021490514278411865, -0.008598824962973595, 0.042314764112234116, 0.037573881447315216, 0.03974836319684982, 0.016339000314474106, -0.07068994641304016, 0.036761365830898285, -0.07984782010316849, -0.020948681980371475, 0.012...
bert-base-multilingual-uncased
99406b9f2cfa046409626308a01da45a2a078f62
2021-05-18T16:19:22.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
bert-base-multilingual-uncased
970,081
13
transformers
--- language: en license: apache-2.0 datasets: - wikipedia --- # BERT multilingual base model (uncased) Pretrained model on the top 102 languages with the largest Wikipedia using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this...
[ -0.08508127182722092, -0.07772396504878998, 0.052124522626399994, 0.023885423317551613, 0.02731981873512268, 0.05022984370589256, -0.014638576656579971, -0.01630280539393425, 0.01909637451171875, -0.032029204070568085, 0.0286273006349802, -0.05059870332479477, 0.05735894665122032, 0.047481...
sentence-transformers/all-mpnet-base-v2
bd44305fd6a1b43c16baf96765e2ecb20bca8e1d
2022-07-11T21:01:04.000Z
[ "pytorch", "mpnet", "fill-mask", "en", "dataset:s2orc", "dataset:flax-sentence-embeddings/stackexchange_xml", "dataset:MS Marco", "dataset:gooaq", "dataset:yahoo_answers_topics", "dataset:code_search_net", "dataset:search_qa", "dataset:eli5", "dataset:snli", "dataset:multi_nli", "dataset...
sentence-similarity
false
sentence-transformers
null
sentence-transformers/all-mpnet-base-v2
966,231
43
sentence-transformers
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity language: en license: apache-2.0 datasets: - s2orc - flax-sentence-embeddings/stackexchange_xml - MS Marco - gooaq - yahoo_answers_topics - code_search_net - search_qa - eli5 - snli - multi_nli - wikihow - nat...
[ -0.0859527662396431, -0.06226963922381401, -0.018229903653264046, -0.016796143725514412, 0.060666605830192566, 0.07571472227573395, -0.02144172601401806, 0.015986090525984764, 0.014631819911301136, -0.030871329829096794, 0.04278617352247238, -0.022306712344288826, 0.05234656110405922, 0.05...
sentence-transformers/all-MiniLM-L12-v2
9e16800aed25dbd1a96dfa6949c68c4d81d5dded
2022-07-11T21:05:39.000Z
[ "pytorch", "rust", "bert", "en", "dataset:s2orc", "dataset:flax-sentence-embeddings/stackexchange_xml", "dataset:MS Marco", "dataset:gooaq", "dataset:yahoo_answers_topics", "dataset:code_search_net", "dataset:search_qa", "dataset:eli5", "dataset:snli", "dataset:multi_nli", "dataset:wikih...
sentence-similarity
false
sentence-transformers
null
sentence-transformers/all-MiniLM-L12-v2
954,345
5
sentence-transformers
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity language: en license: apache-2.0 datasets: - s2orc - flax-sentence-embeddings/stackexchange_xml - MS Marco - gooaq - yahoo_answers_topics - code_search_net - search_qa - eli5 - snli - multi_nli - wikihow - nat...
[ -0.08374010771512985, -0.06014135852456093, -0.010806024074554443, -0.02427024208009243, 0.05708928406238556, 0.07425114512443542, -0.028894850984215736, 0.021960189566016197, 0.016340626403689384, -0.03585219755768776, 0.05689363181591034, -0.01657729037106037, 0.053780630230903625, 0.050...
EleutherAI/gpt-j-6B
918ad376364058dee23512629bc385380c98e57d
2022-03-15T13:34:01.000Z
[ "pytorch", "tf", "jax", "gptj", "text-generation", "en", "dataset:The Pile", "arxiv:2104.09864", "arxiv:2101.00027", "transformers", "causal-lm", "license:apache-2.0" ]
text-generation
false
EleutherAI
null
EleutherAI/gpt-j-6B
945,885
243
transformers
--- language: - en tags: - pytorch - causal-lm license: apache-2.0 datasets: - The Pile --- # GPT-J 6B ## Model Description GPT-J 6B is a transformer model trained using Ben Wang's [Mesh Transformer JAX](https://github.com/kingoflolz/mesh-transformer-jax/). "GPT-J" refers to the class of model, while "6B" represent...
[ -0.12672092020511627, 0.005716377403587103, -0.010990752838551998, 0.010420054197311401, 0.029339727014303207, -0.019964296370744705, -0.06083495914936066, 0.02669130079448223, -0.05095050483942032, -0.07086796313524246, 0.023609910160303116, -0.03130849823355675, 0.02748296409845352, -0.0...
prithivida/parrot_paraphraser_on_T5
9f32aa1e456e8e8a90d97e8673365f3090fa49fa
2021-05-18T07:53:27.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
prithivida
null
prithivida/parrot_paraphraser_on_T5
870,393
20
transformers
# Parrot ## 1. What is Parrot? Parrot is a paraphrase based utterance augmentation framework purpose built to accelerate training NLU models. A paraphrase framework is more than just a paraphrasing model. For more details on the library and usage please refer to the [github page](https://github.com/PrithivirajDamodar...
[ -0.03259994462132454, -0.0861857458949089, 0.0016837017610669136, 0.0013438478345051408, 0.013536225073039532, 0.042183469980955124, 0.035633981227874756, -0.051882416009902954, 0.023064566776156425, -0.07208341360092163, 0.0177255030721426, -0.05516481027007103, -0.03346610814332962, 0.02...
openai/clip-vit-base-patch32
f4881ba48ee4d21b7ed5602603b9e3e92eb1b346
2022-03-14T17:58:13.000Z
[ "pytorch", "tf", "jax", "clip", "feature-extraction", "arxiv:2103.00020", "arxiv:1908.04913", "transformers", "vision" ]
feature-extraction
false
openai
null
openai/clip-vit-base-patch32
854,364
49
transformers
--- tags: - vision --- # Model Card: CLIP Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md). ## Model Details The CLIP model was developed by researchers at OpenAI to learn about what contributes to robu...
[ -0.05266270786523819, -0.029511218890547752, -0.01378261111676693, -0.016498874872922897, 0.0789661630988121, -0.019893504679203033, -0.024232693016529083, 0.048260241746902466, 0.0682869404554367, -0.09571637958288193, 0.027020670473575592, -0.017727835103869438, -0.00020476577628869563, ...
prajjwal1/bert-tiny
6f75de8b60a9f8a2fdf7b69cbd86d9e64bcb3837
2021-10-27T18:29:01.000Z
[ "pytorch", "en", "arxiv:1908.08962", "arxiv:2110.01518", "transformers", "BERT", "MNLI", "NLI", "transformer", "pre-training", "license:mit" ]
null
false
prajjwal1
null
prajjwal1/bert-tiny
799,875
9
transformers
--- language: - en license: - mit tags: - BERT - MNLI - NLI - transformer - pre-training --- The following model is a Pytorch pre-trained model obtained from converting Tensorflow checkpoint found in the [official Google BERT repository](https://github.com/google-research/bert). This is one of the smaller ...
[ -0.10233648121356964, -0.04423704370856285, 0.06137974560260773, 0.04279656708240509, 0.01968778856098652, 0.06089229881763458, -0.011356689967215061, 0.07008757442235947, -0.03280850872397423, -0.00013833395496476442, 0.04287395998835564, 0.06444608420133591, -0.04259755089879036, 0.08806...
Jean-Baptiste/camembert-ner-with-dates
8c2d77a331733d26e0ca95a8f525e0ca3aa8e909
2021-08-30T12:55:48.000Z
[ "pytorch", "camembert", "token-classification", "fr", "dataset:Jean-Baptiste/wikiner_fr", "transformers", "autotrain_compatible" ]
token-classification
false
Jean-Baptiste
null
Jean-Baptiste/camembert-ner-with-dates
782,295
8
transformers
--- language: fr datasets: - Jean-Baptiste/wikiner_fr widget: - text: "Je m'appelle jean-baptiste et j'habite à montréal depuis fevr 2012" --- # camembert-ner: model fine-tuned from camemBERT for NER task (including DATE tag). ## Introduction [camembert-ner-with-dates] is an extension of french camembert-ner model w...
[ -0.037811506539583206, 0.0039187888614833355, 0.029471388086676598, 0.06324126571416855, 0.05223608762025833, -0.02278575301170349, -0.06069985404610634, 0.09245885163545609, 0.04154825583100319, -0.05503368377685547, -0.010640758089721203, -0.10378167033195496, -0.0010783277684822679, 0.0...
bert-large-cased
d9238236d8326ce4bc117132bb3b7e62e95f3a9a
2021-05-18T16:33:16.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
bert-large-cased
778,414
3
transformers
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (cased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https://github.com/g...
[ -0.09336259216070175, -0.0772688239812851, 0.06526713073253632, 0.03688047081232071, 0.03932911157608032, 0.06279470771551132, 0.015506204217672348, -0.017896251752972603, 0.04141203686594963, -0.015988845378160477, 0.04849627986550331, -0.04040355235338211, 0.05780552327632904, 0.03051182...
facebook/bart-large-cnn
9137060abd52495839d8c5c67ab4e6d0c49254b2
2022-07-28T15:16:55.000Z
[ "pytorch", "tf", "jax", "rust", "bart", "text2text-generation", "arxiv:1910.13461", "transformers", "summarization", "license:mit", "model-index", "autotrain_compatible" ]
summarization
false
facebook
null
facebook/bart-large-cnn
766,202
72
transformers
--- tags: - summarization license: mit thumbnail: https://huggingface.co/front/thumbnails/facebook.png model-index: - name: facebook/bart-large-cnn results: - task: type: summarization name: Summarization dataset: name: cnn_dailymail type: cnn_dailymail config: 3.0.0 split: t...
[ -0.05034415051341057, -0.046601008623838425, 0.07880154997110367, 0.04858936369419098, 0.07095653563737869, 0.01733444444835186, -0.0126886498183012, -0.04332607984542847, -0.008795729838311672, -0.06552112102508545, 0.020725594833493233, -0.07976234704256058, -0.014367977157235146, 0.0262...
unitary/toxic-bert
5cc53435803a6e6f1ac8e4b243910d3bf26803ff
2021-06-07T15:20:33.000Z
[ "pytorch", "jax", "bert", "text-classification", "arxiv:1703.04009", "arxiv:1905.12516", "transformers" ]
text-classification
false
unitary
null
unitary/toxic-bert
749,909
15
transformers
<div align="center"> **⚠️ Disclaimer:** The huggingface models currently give different results to the detoxify library (see issue [here](https://github.com/unitaryai/detoxify/issues/15)). For the most up to date models we recommend using the models from https://github.com/unitaryai/detoxify # 🙊 Detoxify...
[ -0.15935559570789337, -0.06404557824134827, 0.01823689602315426, 0.076371930539608, 0.12613750994205475, 0.009862762875854969, 0.012756885960698128, -0.02012854628264904, 0.018156876787543297, -0.035751380026340485, 0.00043700658716261387, -0.02509419247508049, 0.06608232110738754, 0.07615...
cardiffnlp/twitter-roberta-base-sentiment
b636d90b2ed53d7ba6006cefd76f29cd354dd9da
2022-04-06T08:10:31.000Z
[ "pytorch", "tf", "jax", "roberta", "text-classification", "arxiv:2010.12421", "transformers" ]
text-classification
false
cardiffnlp
null
cardiffnlp/twitter-roberta-base-sentiment
734,700
57
transformers
# Twitter-roBERTa-base for Sentiment Analysis This is a roBERTa-base model trained on ~58M tweets and finetuned for sentiment analysis with the TweetEval benchmark. This model is suitable for English (for a similar multilingual model, see [XLM-T](https://huggingface.co/cardiffnlp/twitter-xlm-roberta-base-sentiment)). ...
[ -0.08989861607551575, -0.08094963431358337, -0.017688533291220665, 0.004349798895418644, 0.02066958136856556, 0.08162334561347961, 0.014570934697985649, 0.06855554133653641, 0.052577290683984756, 0.017106739804148674, 0.022359319031238556, -0.014590694569051266, 0.046774785965681076, 0.053...
mrm8488/t5-base-finetuned-question-generation-ap
7281097a2e51b1b57684b7de9999e32a0250dd83
2022-06-06T21:28:57.000Z
[ "pytorch", "tf", "t5", "text2text-generation", "en", "dataset:squad", "arxiv:1910.10683", "transformers", "autotrain_compatible" ]
text2text-generation
false
mrm8488
null
mrm8488/t5-base-finetuned-question-generation-ap
717,961
26
transformers
--- language: en datasets: - squad widget: - text: "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google" --- # T5-base fine-tuned on SQuAD for **Question Generation** [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) fine-tune...
[ -0.1065717414021492, -0.023419689387083054, -0.0009038501302711666, 0.00982855074107647, 0.052154503762722015, 0.03478077054023743, 0.03118412010371685, -0.0010152772301808, 0.01159543078392744, -0.013769790530204773, 0.02345964126288891, -0.04115462675690651, 0.041412290185689926, -0.0218...
google/bert_uncased_L-2_H-128_A-2
1ae49ff827beda5996998802695c4cac8e9932c6
2021-05-19T17:28:12.000Z
[ "pytorch", "jax", "bert", "arxiv:1908.08962", "transformers", "license:apache-2.0" ]
null
false
google
null
google/bert_uncased_L-2_H-128_A-2
687,625
11
transformers
--- thumbnail: https://huggingface.co/front/thumbnails/google.png license: apache-2.0 --- BERT Miniatures === This is the set of 24 BERT models referenced in [Well-Read Students Learn Better: On the Importance of Pre-training Compact Models](https://arxiv.org/abs/1908.08962) (English only, uncased, trained with Word...
[ -0.02777470275759697, -0.02693094126880169, 0.07438826560974121, 0.03228488564491272, -0.0023304771166294813, 0.018128493800759315, -0.06253628432750702, 0.0994548574090004, -0.014644814655184746, 0.018868697807192802, -0.015814494341611862, 0.03585591912269592, 0.03645862638950348, 0.0455...
dslim/bert-base-NER
f7c2808a659015eeb8828f3f809a2f1be67a2446
2021-09-05T12:00:26.000Z
[ "pytorch", "tf", "jax", "bert", "token-classification", "en", "dataset:conll2003", "arxiv:1810.04805", "transformers", "license:mit", "autotrain_compatible" ]
token-classification
false
dslim
null
dslim/bert-base-NER
669,498
62
transformers
--- language: en datasets: - conll2003 license: mit --- # bert-base-NER ## Model description **bert-base-NER** is a fine-tuned BERT model that is ready to use for **Named Entity Recognition** and achieves **state-of-the-art performance** for the NER task. It has been trained to recognize four types of entities: locat...
[ -0.08355141431093216, -0.06715936213731766, 0.06844159960746765, -0.050951920449733734, -0.027103595435619354, -0.008353169076144695, -0.0054949745535850525, 0.02595699019730091, 0.04241294786334038, -0.0159318670630455, -0.019640907645225525, -0.043519243597984314, -0.01804971694946289, 0...
uer/chinese_roberta_L-12_H-768
b082602ba4eba86f785a6b4e3310eccc394816ee
2022-07-15T08:16:22.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "zh", "dataset:CLUECorpusSmall", "arxiv:1909.05658", "arxiv:1908.08962", "transformers", "autotrain_compatible" ]
fill-mask
false
uer
null
uer/chinese_roberta_L-12_H-768
649,235
2
transformers
--- language: zh datasets: CLUECorpusSmall widget: - text: "北京是[MASK]国的首都。" --- # Chinese RoBERTa Miniatures ## Model description This is the set of 24 Chinese RoBERTa models pre-trained by [UER-py](https://github.com/dbiir/UER-py/), which is introduced in [this paper](https://arxiv.org/abs/1909.05658). [Turc e...
[ -0.07201097905635834, -0.024355629459023476, 0.05586782842874527, 0.023193588480353355, -0.03628445044159889, 0.0811743214726448, -0.027624739333987236, 0.04325660318136215, -0.03054499253630638, 0.02316826395690441, 0.06062658503651619, -0.028973720967769623, 0.043243613094091415, 0.04185...
cl-tohoku/bert-base-japanese-whole-word-masking
ab68bf4a4d55e7772b1fbea6441bdab72aaf949c
2021-09-23T13:45:34.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ja", "dataset:wikipedia", "transformers", "license:cc-by-sa-4.0", "autotrain_compatible" ]
fill-mask
false
cl-tohoku
null
cl-tohoku/bert-base-japanese-whole-word-masking
632,322
15
transformers
--- language: ja license: cc-by-sa-4.0 datasets: - wikipedia widget: - text: 東北大学で[MASK]の研究をしています。 --- # BERT base Japanese (IPA dictionary, whole word masking enabled) This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language. This version of the model processes in...
[ -0.10098788142204285, -0.09673559665679932, 0.04363148659467697, 0.021340372040867805, -0.01171626802533865, 0.08598610013723373, 0.013818634673953056, 0.022945279255509377, 0.01243932917714119, -0.0005057408707216382, 0.04235122352838516, -0.03291153535246849, 0.048775240778923035, 0.0368...
facebook/bart-base
84358834e73de6a82c22cec1d90eb45ef4f6eba5
2022-06-03T09:43:53.000Z
[ "pytorch", "tf", "jax", "bart", "feature-extraction", "en", "arxiv:1910.13461", "transformers", "license:apache-2.0" ]
feature-extraction
false
facebook
null
facebook/bart-base
624,921
18
transformers
--- license: apache-2.0 language: en --- # BART (base-sized model) BART model pre-trained on English language. It was introduced in the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Lewis et al. and first...
[ -0.11465750634670258, -0.06656375527381897, 0.05786983296275139, 0.008724099956452847, -0.06584079563617706, 0.04901890456676483, -0.044143904000520706, 0.008671154268085957, 0.048350218683481216, -0.06431283056735992, 0.006088439840823412, -0.013792728073894978, 0.01790396310389042, -0.00...
digitalepidemiologylab/covid-twitter-bert
945b4ea68241df3ccb8554cd1927ba81d2c9ecaa
2021-05-19T15:52:48.000Z
[ "pytorch", "tf", "jax", "bert", "en", "transformers", "Twitter", "COVID-19", "license:mit" ]
null
false
digitalepidemiologylab
null
digitalepidemiologylab/covid-twitter-bert
608,689
null
transformers
--- language: "en" thumbnail: "https://raw.githubusercontent.com/digitalepidemiologylab/covid-twitter-bert/master/images/COVID-Twitter-BERT_small.png" tags: - Twitter - COVID-19 license: mit --- # COVID-Twitter-BERT (CT-BERT) v1 :warning: _You may want to use the [v2 model](https://huggingface.co/digitalepidemiologyl...
[ -0.08992455899715424, -0.022449184209108353, 0.01900365948677063, 0.04468945413827896, 0.07851923257112503, 0.037883076816797256, 0.0014895728090777993, 0.06199873238801956, 0.026359664276242256, 0.0018013465451076627, 0.03541679307818413, -0.026219435036182404, 0.01637023128569126, 0.0679...
microsoft/layoutlm-base-uncased
ca841ce8d2f46b13b0ac3f635b8eb7d2e1d758d5
2021-08-11T05:27:42.000Z
[ "pytorch", "tf", "layoutlm", "arxiv:1912.13318", "transformers" ]
null
false
microsoft
null
microsoft/layoutlm-base-uncased
604,081
8
transformers
# LayoutLM **Multimodal (text + layout/format + image) pre-training for document AI** [Microsoft Document AI](https://www.microsoft.com/en-us/research/project/document-ai/) | [GitHub](https://aka.ms/layoutlm) ## Model description LayoutLM is a simple but effective pre-training method of text and layout for document ...
[ -0.0346904881298542, 0.038127411156892776, 0.010263904929161072, 0.01670531928539276, 0.01803665980696678, 0.023607727140188217, -0.04285275936126709, 0.007251023314893246, -0.00320497527718544, -0.02575642429292202, 0.06485345214605331, 0.030597299337387085, 0.04433407634496689, 0.0214996...
xlnet-base-cased
593a21e8b79948a7f952811aa44f37d76e23d586
2021-09-16T09:43:58.000Z
[ "pytorch", "tf", "rust", "xlnet", "text-generation", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1906.08237", "transformers", "license:mit" ]
text-generation
false
null
null
xlnet-base-cased
599,543
5
transformers
--- language: en license: mit datasets: - bookcorpus - wikipedia --- # XLNet (base-sized model) XLNet model pre-trained on English language. It was introduced in the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Yang et al. and first released in...
[ -0.087545245885849, -0.06660180538892746, 0.03745153546333313, -0.008844469673931599, 0.03226645663380623, 0.11173182725906372, -0.03159862011671066, 0.03530389070510864, 0.03614726662635803, -0.05470224469900131, -0.031718652695417404, -0.008410460315644741, 0.06573323905467987, 0.0371775...
distilbert-base-multilingual-cased
6045845d9e2b056487062a98a902d8304d76441f
2022-07-22T08:13:03.000Z
[ "pytorch", "tf", "distilbert", "fill-mask", "multilingual", "dataset:wikipedia", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
null
null
distilbert-base-multilingual-cased
585,365
16
transformers
--- language: multilingual license: apache-2.0 datasets: - wikipedia --- # Model Card for DistilBERT base multilingual (cased) # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Training Details](#training-details) 5. [Evaluation...
[ -0.07365205883979797, -0.09448935091495514, 0.08372806012630463, -0.0029499151278287172, 0.018073782324790955, -0.005491136107593775, -0.01240010466426611, 0.040871065109968185, 0.00942261517047882, -0.06830482184886932, -0.0061553907580673695, -0.03271116688847542, 0.03827552869915962, 0....
sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
b8ef00830037f9868450f778081ea683e900fe39
2022-06-15T18:43:00.000Z
[ "pytorch", "tf", "bert", "feature-extraction", "multilingual", "arxiv:1908.10084", "sentence-transformers", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
false
sentence-transformers
null
sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
584,527
43
sentence-transformers
--- pipeline_tag: sentence-similarity language: multilingual license: apache-2.0 tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences &...
[ -0.032052427530288696, -0.05750957503914833, -0.018938865512609482, 0.03187490254640579, 0.009913748130202293, 0.06325744837522507, -0.05041438341140747, 0.03558629751205444, 0.016923602670431137, -0.07797453552484512, 0.04905039444565773, -0.0212862528860569, 0.04814773052930832, 0.055095...
bhadresh-savani/distilbert-base-uncased-emotion
322caf2a56793969b8221b87bed988f8e7798b8e
2022-07-06T10:43:55.000Z
[ "pytorch", "tf", "jax", "distilbert", "text-classification", "en", "dataset:emotion", "arxiv:1910.01108", "transformers", "emotion", "license:apache-2.0", "model-index" ]
text-classification
false
bhadresh-savani
null
bhadresh-savani/distilbert-base-uncased-emotion
564,284
37
transformers
--- language: - en thumbnail: https://avatars3.githubusercontent.com/u/32437151?s=460&u=4ec59abc8d21d5feea3dab323d23a5860e6996a4&v=4 tags: - text-classification - emotion - pytorch license: apache-2.0 datasets: - emotion metrics: - Accuracy, F1 Score model-index: - name: bhadresh-savani/distilbert-base-uncased-emotion ...
[ -0.04636714607477188, -0.04246510565280914, -0.041032660752534866, 0.05817602202296257, 0.09009232372045517, 0.0073073990643024445, -0.012772567570209503, 0.03918372094631195, -0.016593430191278458, -0.07036647200584412, 0.025588108226656914, -0.0977468490600586, -0.010455331765115261, -0....
sentence-transformers/bert-base-nli-mean-tokens
18fc720063106176044380e71bad038d01e821d1
2022-06-09T12:34:28.000Z
[ "pytorch", "tf", "jax", "rust", "bert", "feature-extraction", "arxiv:1908.10084", "sentence-transformers", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
false
sentence-transformers
null
sentence-transformers/bert-base-nli-mean-tokens
528,903
9
sentence-transformers
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers license: apache-2.0 --- **⚠️ This model is deprecated. Please don't use it as it produces sentence embeddings of low quality. You can find recommended sentence embedding models here: [SBERT.net ...
[ -0.07522783428430557, -0.06903247535228729, 0.037927139550447464, 0.030251873657107353, 0.011466043069958687, 0.08804690837860107, -0.029127534478902817, 0.07562512904405594, 0.016049973666667938, -0.07408270239830017, 0.026865346357226372, 0.006537787150591612, 0.04384155943989754, 0.0843...
deepset/minilm-uncased-squad2
2f66fe86fb8a3df5b7b07c214a3d33b31d5a133c
2022-07-25T14:34:52.000Z
[ "pytorch", "jax", "bert", "question-answering", "en", "dataset:squad_v2", "transformers", "license:cc-by-4.0", "model-index", "autotrain_compatible" ]
question-answering
false
deepset
null
deepset/minilm-uncased-squad2
515,791
8
transformers
--- language: en datasets: - squad_v2 license: cc-by-4.0 model-index: - name: deepset/minilm-uncased-squad2 results: - task: type: question-answering name: Question Answering dataset: name: squad_v2 type: squad_v2 config: squad_v2 split: validation metrics: - name: Ex...
[ -0.08635865896940231, -0.00962852779775858, -0.02705409564077854, 0.026123594492673874, 0.066559799015522, 0.020438177511096, -0.033974580466747284, 0.004470542073249817, -0.014128177426755428, -0.025917278602719307, 0.039885587990283966, -0.13294433057308197, -0.011967284604907036, 0.0258...
gpt2-medium
8c7ca69f9d24f64c9f3540f9c416d99e16275828
2022-07-22T08:01:16.000Z
[ "pytorch", "tf", "jax", "rust", "gpt2", "text-generation", "en", "arxiv:1910.09700", "transformers", "license:mit" ]
text-generation
false
null
null
gpt2-medium
515,318
4
transformers
--- language: en license: mit --- # GPT-2 Medium ## Table of Contents - [Model Details](#model-details) - [How To Get Started With the Model](#how-to-get-started-with-the-model) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Envi...
[ -0.042659010738134384, -0.022365229204297066, -0.024635720998048782, 0.016201013699173927, 0.12279670685529709, -0.010375344194471836, 0.01589481718838215, 0.08776478469371796, 0.026824552565813065, -0.04380200430750847, -0.043144773691892624, 0.02396426536142826, 0.035122230648994446, 0.0...
pysentimiento/robertuito-sentiment-analysis
e3be95c8efad7f480ce8aab2221188ecb78e40f3
2022-06-23T13:01:10.000Z
[ "pytorch", "tf", "roberta", "text-classification", "es", "arxiv:2106.09462", "arxiv:2111.09453", "transformers", "twitter", "sentiment-analysis" ]
text-classification
false
pysentimiento
null
pysentimiento/robertuito-sentiment-analysis
506,297
9
transformers
--- language: - es tags: - twitter - sentiment-analysis --- # Sentiment Analysis in Spanish ## robertuito-sentiment-analysis Repository: [https://github.com/pysentimiento/pysentimiento/](https://github.com/finiteautomata/pysentimiento/) Model trained with TASS 2020 corpus (around ~5k tweets) of several dial...
[ -0.04791926220059395, -0.0507703460752964, -0.016948174685239792, 0.021370219066739082, 0.013451650738716125, 0.046086281538009644, 0.04978032782673836, 0.03664499521255493, 0.06612519919872284, -0.015097561292350292, 0.059976547956466675, -0.009501159191131592, 0.012026981450617313, 0.041...
Helsinki-NLP/opus-mt-fr-en
967b0840416a86ccf02573c8fedf9dd0e0b42fd6
2021-09-09T21:53:38.000Z
[ "pytorch", "jax", "marian", "text2text-generation", "fr", "en", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
false
Helsinki-NLP
null
Helsinki-NLP/opus-mt-fr-en
490,737
5
transformers
--- tags: - translation license: apache-2.0 --- ### opus-mt-fr-en * source languages: fr * target languages: en * OPUS readme: [fr-en](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/fr-en/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * downl...
[ -0.05996047332882881, -0.024237744510173798, 0.02489008568227291, -0.010470354929566383, 0.012844543904066086, 0.10177662968635559, -0.05968740954995155, 0.037714309990406036, 0.029419874772429466, -0.011656415648758411, 0.007984437979757786, -0.04585428163409233, -0.07401059567928314, -0....
hfl/chinese-roberta-wwm-ext
5c58d0b8ec1d9014354d691c538661bf00bfdb44
2022-03-01T09:13:56.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "zh", "arxiv:1906.08101", "arxiv:2004.13922", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
hfl
null
hfl/chinese-roberta-wwm-ext
485,950
51
transformers
--- language: - zh tags: - bert license: "apache-2.0" --- # Please use 'Bert' related functions to load this model! ## Chinese BERT with Whole Word Masking For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**. **[Pre-Training with Whole Word ...
[ -0.13892720639705658, -0.03527410328388214, 0.06445440649986267, 0.030523041263222694, 0.0028055051807314157, 0.012470254674553871, 0.0003026134509127587, -0.0201879795640707, -0.03373470902442932, -0.0031854051630944014, 0.07240064442157745, -0.05555472522974014, 0.039492979645729065, 0.0...
google/electra-small-discriminator
153f486d928bcfc213932f8fc91fc2e3c41af769
2021-04-29T15:24:16.000Z
[ "pytorch", "tf", "jax", "electra", "pretraining", "en", "transformers", "license:apache-2.0" ]
null
false
google
null
google/electra-small-discriminator
482,240
5
transformers
--- language: en thumbnail: https://huggingface.co/front/thumbnails/google.png license: apache-2.0 --- ## ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators **ELECTRA** is a new method for self-supervised language representation learning. It can be used to pre-train transformer networks usi...
[ -0.15659932792186737, -0.013592531904578209, -0.03288592770695686, 0.026664843782782555, 0.05259827896952629, 0.01947689615190029, 0.018389219418168068, -0.03876601532101631, 0.011088617146015167, -0.05713406205177307, 0.024072255939245224, -0.0444607250392437, 0.024075860157608986, 0.0325...
microsoft/layoutlmv2-base-uncased
5c1ca07c23780c6dc123807def206ae9c4d59aca
2021-12-23T12:52:53.000Z
[ "pytorch", "layoutlmv2", "en", "arxiv:2012.14740", "transformers", "license:cc-by-nc-sa-4.0" ]
null
false
microsoft
null
microsoft/layoutlmv2-base-uncased
477,930
18
transformers
--- language: en license: cc-by-nc-sa-4.0 --- # LayoutLMv2 **Multimodal (text + layout/format + image) pre-training for document AI** The documentation of this model in the Transformers library can be found [here](https://huggingface.co/docs/transformers/model_doc/layoutlmv2). [Microsoft Document AI](https://www.mi...
[ -0.10699433833360672, -0.03563679754734039, -0.03966660425066948, 0.033492639660835266, 0.09648266434669495, 0.009470691904425621, -0.06989522278308868, 0.03577287495136261, 0.02407202310860157, 0.007983296178281307, -0.010976324789226055, 0.019300684332847595, 0.0484066978096962, 0.100375...
klue/bert-base
812449f1a6bc736e693db7aa0e513e5e90795a62
2021-10-20T15:23:59.000Z
[ "pytorch", "bert", "fill-mask", "ko", "arxiv:2105.09680", "transformers", "korean", "klue", "autotrain_compatible" ]
fill-mask
false
klue
null
klue/bert-base
461,579
7
transformers
--- language: ko tags: - korean - klue mask_token: "[MASK]" widget: - text: 대한민국의 수도는 [MASK] 입니다. --- # KLUE BERT base Pretrained BERT Model on Korean Language. See [Github](https://github.com/KLUE-benchmark/KLUE) and [Paper](https://arxiv.org/abs/2105.09680) for more details. ## How to use ```python from tra...
[ -0.16914959251880646, 0.012892910279333591, 0.08553741127252579, 0.006892658770084381, -0.03603089600801468, 0.03273778408765793, 0.027492238208651543, 0.04780479893088341, 0.0014062360860407352, -0.03538554161787033, 0.0029013738967478275, -0.06837352365255356, 0.033635981380939484, 0.067...
sentence-transformers/distilbert-base-nli-mean-tokens
683b927b0b0f77e70b9a7d15f7f7601a515925a9
2022-06-15T19:35:42.000Z
[ "pytorch", "tf", "distilbert", "feature-extraction", "arxiv:1908.10084", "sentence-transformers", "sentence-similarity", "transformers", "license:apache-2.0" ]
feature-extraction
false
sentence-transformers
null
sentence-transformers/distilbert-base-nli-mean-tokens
454,847
null
sentence-transformers
--- pipeline_tag: feature-extraction license: apache-2.0 tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- **⚠️ This model is deprecated. Please don't use it as it produces sentence embeddings of low quality. You can find recommended sentence embedding models here: [SBERT.net ...
[ -0.05753064900636673, -0.07321155071258545, 0.01931511051952839, 0.0479629822075367, 0.02940313331782818, 0.07246044278144836, -0.03149522468447685, 0.06701167672872543, 0.003295734291896224, -0.0997343510389328, 0.0376710519194603, 0.01934780180454254, 0.04845025762915611, 0.0624055489897...
sshleifer/distilbart-cnn-12-6
a4f8f3ea906ed274767e9906dbaede7531d660ff
2021-06-14T07:51:12.000Z
[ "pytorch", "jax", "rust", "bart", "text2text-generation", "en", "dataset:cnn_dailymail", "dataset:xsum", "transformers", "summarization", "license:apache-2.0", "autotrain_compatible" ]
summarization
false
sshleifer
null
sshleifer/distilbart-cnn-12-6
452,231
57
transformers
--- language: en tags: - summarization license: apache-2.0 datasets: - cnn_dailymail - xsum thumbnail: https://huggingface.co/front/thumbnails/distilbart_medium.png --- ### Usage This checkpoint should be loaded into `BartForConditionalGeneration.from_pretrained`. See the [BART docs](https://huggingface.co/transforme...
[ -0.10242787003517151, -0.08055665343999863, 0.058801449835300446, 0.001729630515910685, -0.01020839437842369, -0.020107010379433632, -0.09146951138973236, 0.055804893374443054, -0.03275766223669052, -0.0814155638217926, 0.047091152518987656, -0.024555958807468414, 0.02256167307496071, -0.0...
SEBIS/code_trans_t5_small_program_synthese_transfer_learning_finetune
cf3d414acf70f8f8e68108a2efde164b129e6bfa
2022-06-27T20:56:39.000Z
[ "pytorch", "tf", "jax", "t5", "feature-extraction", "arxiv:2104.02443", "arxiv:1910.09700", "arxiv:2105.09680", "transformers", "summarization" ]
summarization
false
SEBIS
null
SEBIS/code_trans_t5_small_program_synthese_transfer_learning_finetune
443,061
5
transformers
--- tags: - summarization widget: - text: "you are given an array of numbers a and a number b , compute the difference of elements in a and b" --- # CodeTrans model for program synthesis ## Table of Contents - [Model Details](#model-details) - [How to Get Started With the Model](#how-to-get-started-with-the-model) ...
[ -0.09398753941059113, 0.00023620222054887563, -0.07200116664171219, -0.025018131360411644, 0.06995216757059097, 0.0002894390490837395, 0.04121631383895874, 0.018724948167800903, -0.0070031145587563515, -0.036057427525520325, -0.012298164889216423, -0.0036895424127578735, 0.04473763704299927,...
sentence-transformers/distiluse-base-multilingual-cased-v2
896fbacdabde59de4cb8d75dea7b9bff6066015c
2022-06-15T19:24:30.000Z
[ "pytorch", "tf", "distilbert", "feature-extraction", "multilingual", "arxiv:1908.10084", "sentence-transformers", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
false
sentence-transformers
null
sentence-transformers/distiluse-base-multilingual-cased-v2
437,878
18
sentence-transformers
--- pipeline_tag: sentence-similarity language: multilingual license: apache-2.0 tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # sentence-transformers/distiluse-base-multilingual-cased-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & ...
[ -0.045195434242486954, -0.058382563292980194, -0.0042662229388952255, 0.020876796916127205, -0.006434501614421606, 0.029410913586616516, -0.0583835169672966, 0.02599978633224964, 0.029764804989099503, -0.08146019279956818, 0.035641223192214966, -0.04075738042593002, 0.05016973987221718, 0....
sentence-transformers/paraphrase-xlm-r-multilingual-v1
50f7fa9e273db3db51beceacc1b111e4a1a31d34
2022-06-15T19:25:39.000Z
[ "pytorch", "tf", "xlm-roberta", "feature-extraction", "arxiv:1908.10084", "sentence-transformers", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
false
sentence-transformers
null
sentence-transformers/paraphrase-xlm-r-multilingual-v1
434,789
31
sentence-transformers
--- pipeline_tag: sentence-similarity license: apache-2.0 tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # sentence-transformers/paraphrase-xlm-r-multilingual-v1 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensi...
[ -0.042247891426086426, -0.06216457113623619, -0.023620810359716415, 0.01813187263906002, 0.022390039637684822, 0.060496531426906586, -0.04595527797937393, 0.03595336526632309, 0.015518763102591038, -0.07833635061979294, 0.04575902596116066, -0.03354158625006676, 0.060676004737615585, 0.043...
camembert-base
3f452b6e5a89b0e6c828c9bba2642bc577086eae
2022-07-22T08:12:31.000Z
[ "pytorch", "tf", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
false
null
null
camembert-base
431,334
16
transformers
--- language: fr license: mit datasets: - oscar --- # CamemBERT: a Tasty French Language Model ## Table of Contents - [Model Details](#model-details) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Citation Information](#citation-...
[ -0.08638779073953629, -0.06763307005167007, -0.025833409279584885, -0.027342049404978752, 0.03764456510543823, 0.12339358776807785, -0.02534555271267891, 0.11847586929798126, 0.08251497894525528, -0.004934415686875582, -0.004848347045481205, -0.05448513105511665, 0.059380196034908295, 0.00...
nlptown/bert-base-multilingual-uncased-sentiment
e06857fdb0325a7798a8fc361b417dfeec3a3b98
2022-04-18T16:46:13.000Z
[ "pytorch", "tf", "jax", "bert", "text-classification", "en", "nl", "de", "fr", "it", "es", "transformers", "license:mit" ]
text-classification
false
nlptown
null
nlptown/bert-base-multilingual-uncased-sentiment
429,449
57
transformers
--- language: - en - nl - de - fr - it - es license: mit --- # bert-base-multilingual-uncased-sentiment This a bert-base-multilingual-uncased model finetuned for sentiment analysis on product reviews in six languages: English, Dutch, German, French, Spanish and Italian. It predicts the sentiment of the review as a n...
[ -0.08215174823999405, -0.09198115020990372, -0.0035543793346732855, 0.012485884130001068, 0.013683075085282326, 0.09545306861400604, 0.04701993986964226, 0.052623968571424484, 0.01546592079102993, -0.021794479340314865, 0.0045181754976511, -0.04780310392379761, 0.0417378768324852, 0.032698...
Hate-speech-CNERG/indic-abusive-allInOne-MuRIL
159b3636af636844106d203e3d8a07f522aaa6e0
2022-05-03T08:49:47.000Z
[ "pytorch", "bert", "text-classification", "bn", "hi", "hi-en", "ka-en", "ma-en", "mr", "ta-en", "ur", "ur-en", "en", "arxiv:2204.12543", "transformers", "license:afl-3.0" ]
text-classification
false
Hate-speech-CNERG
null
Hate-speech-CNERG/indic-abusive-allInOne-MuRIL
425,203
null
transformers
--- language: [bn, hi, hi-en, ka-en, ma-en, mr, ta-en, ur, ur-en, en] license: afl-3.0 --- This model is used detecting **abusive speech** in **Bengali, Devanagari Hindi, Code-mixed Hindi, Code-mixed Kannada, Code-mixed Malayalam, Marathi, Code-mixed Tamil, Urdu, Code-mixed Urdu, and English languages**. The allInOne ...
[ -0.062019575387239456, -0.04452022537589073, -0.013807946816086769, -0.004274517763406038, 0.05298544839024544, 0.007064873352646828, -0.00453414861112833, -0.05843058228492737, -0.012413380667567253, -0.06542504578828812, 0.02530588209629059, -0.07623746991157532, 0.04151010140776634, 0.0...
yiyanghkust/finbert-tone
69507fb7dad65fd5ee96679690e6336211edc7a5
2022-06-09T12:05:27.000Z
[ "pytorch", "tf", "text-classification", "en", "transformers", "financial-sentiment-analysis", "sentiment-analysis" ]
text-classification
false
yiyanghkust
null
yiyanghkust/finbert-tone
415,031
22
transformers
--- language: "en" tags: - financial-sentiment-analysis - sentiment-analysis widget: - text: "growth is strong and we have plenty of liquidity" --- `FinBERT` is a BERT model pre-trained on financial communication text. The purpose is to enhance financial NLP research and practice. It is trained on the following three ...
[ -0.0377039760351181, -0.04703419655561447, -0.019877271726727486, 0.012331523932516575, 0.030871499329805374, 0.05449178069829941, 0.09381328523159027, 0.03868062421679497, 0.07091400027275085, -0.053262364119291306, -0.01170447375625372, 0.027527792379260063, -0.03912686929106712, 0.01071...
bert-large-uncased-whole-word-masking-finetuned-squad
242d9dbb66bb5033025196d5678907307f8fb098
2021-05-18T16:35:27.000Z
[ "pytorch", "tf", "jax", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible" ]
question-answering
false
null
null
bert-large-uncased-whole-word-masking-finetuned-squad
413,010
23
transformers
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (uncased) whole word masking finetuned on SQuAD Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released i...
[ -0.075973741710186, -0.10039574652910233, 0.06402284651994705, 0.03697829693555832, 0.010584109462797642, 0.02961229346692562, 0.01904246397316456, 0.008290208876132965, 0.02470485121011734, -0.013798780739307404, 0.045710984617471695, -0.003953421488404274, 0.062075424939394, 0.0243134088...
j-hartmann/emotion-english-distilroberta-base
d23807173703d44b48d60ca252664f60d0d46563
2022-06-09T12:43:53.000Z
[ "pytorch", "tf", "roberta", "text-classification", "en", "transformers", "distilroberta", "sentiment", "emotion", "twitter", "reddit" ]
text-classification
false
j-hartmann
null
j-hartmann/emotion-english-distilroberta-base
406,862
31
transformers
--- language: "en" tags: - distilroberta - sentiment - emotion - twitter - reddit widget: - text: "Oh wow. I didn't know that." - text: "This movie always makes me cry.." - text: "Oh Happy Day" --- # Emotion English DistilRoBERTa-base # Description ℹ With this model, you can classify emotions in English text data....
[ -0.030246220529079437, -0.05355704948306084, 0.05665166303515434, 0.03357546031475067, 0.04713347926735878, -0.0002391643647570163, 0.016336070373654366, -0.0035251695662736893, 0.0731247067451477, -0.05102284625172615, 0.04216110333800316, -0.07010572403669357, 0.02198854461312294, -0.011...
sentence-transformers/multi-qa-mpnet-base-dot-v1
69cf9082c6abd4f70bdf8fca0ca826b6b5d16ebc
2022-07-11T21:02:59.000Z
[ "pytorch", "mpnet", "fill-mask", "dataset:flax-sentence-embeddings/stackexchange_xml", "dataset:ms_marco", "dataset:gooaq", "dataset:yahoo_answers_topics", "dataset:search_qa", "dataset:eli5", "dataset:natural_questions", "dataset:trivia_qa", "dataset:embedding-data/QQP", "dataset:embedding-...
sentence-similarity
false
sentence-transformers
null
sentence-transformers/multi-qa-mpnet-base-dot-v1
398,918
9
sentence-transformers
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity datasets: - flax-sentence-embeddings/stackexchange_xml - ms_marco - gooaq - yahoo_answers_topics - search_qa - eli5 - natural_questions - trivia_qa - embedding-data/QQP - embedding-data/PAQ_pairs - embedding-d...
[ -0.03549480810761452, -0.061646245419979095, -0.019519338384270668, 0.03635980188846588, 0.04563184082508087, 0.07743945717811584, -0.031045420095324516, 0.021972540766000748, -0.0001432829740224406, -0.05707089230418205, 0.022465497255325317, -0.011211416684091091, 0.0628516674041748, 0.0...
openai/clip-vit-large-patch14
0993c71e8ad62658387de2714a69f723ddfffacb
2022-03-14T18:01:04.000Z
[ "pytorch", "tf", "jax", "clip", "feature-extraction", "arxiv:2103.00020", "arxiv:1908.04913", "transformers", "vision" ]
feature-extraction
false
openai
null
openai/clip-vit-large-patch14
393,559
3
transformers
--- tags: - vision --- # Model Card: CLIP Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md). ## Model Details The CLIP model was developed by researchers at OpenAI to learn about what contributes to robu...
[ -0.06138596311211586, -0.0320110097527504, -0.0014462426770478487, -0.017546001821756363, 0.08810905367136002, -0.02513975277543068, -0.029011132195591927, 0.05683789402246475, 0.0727316364645958, -0.09636805206537247, 0.03555210307240486, -0.008171260356903076, -0.003876835573464632, 0.04...
valhalla/distilbart-mnli-12-1
506336d4214470e3b3b36021358daae28e25ceac
2021-06-14T10:27:55.000Z
[ "pytorch", "jax", "bart", "text-classification", "dataset:mnli", "transformers", "distilbart", "distilbart-mnli", "zero-shot-classification" ]
zero-shot-classification
false
valhalla
null
valhalla/distilbart-mnli-12-1
389,752
10
transformers
--- datasets: - mnli tags: - distilbart - distilbart-mnli pipeline_tag: zero-shot-classification --- # DistilBart-MNLI distilbart-mnli is the distilled version of bart-large-mnli created using the **No Teacher Distillation** technique proposed for BART summarisation by Huggingface, [here](https://github.com/huggingfa...
[ -0.0767815113067627, -0.005045149940997362, 0.10137208551168442, -0.0044491710141301155, -0.08543753623962402, -0.054864756762981415, -0.06274072825908661, 0.012675730511546135, -0.03593569993972778, -0.08640600740909576, 0.03498023375868797, -0.07564754039049149, 0.0690762847661972, -0.06...
dmis-lab/biosyn-sapbert-bc5cdr-disease
53d4525fccf15663f19f0d0846c50286a0a01f1e
2021-10-25T14:46:40.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
dmis-lab
null
dmis-lab/biosyn-sapbert-bc5cdr-disease
378,648
1
transformers
Entry not found
[ 0.0461147278547287, -0.038838207721710205, -0.01049656979739666, -0.03682169318199158, 0.011261860840022564, 0.013094935566186905, 0.0019101888174191117, -0.013979103416204453, 0.027092741802334785, -0.015212527476251125, 0.017284274101257324, -0.08189476281404495, 0.03817418962717056, -0....
dmis-lab/biosyn-sapbert-bc5cdr-chemical
f9b9daf740698ac427bb6532fd456fc18bccdd80
2021-10-25T14:47:09.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
dmis-lab
null
dmis-lab/biosyn-sapbert-bc5cdr-chemical
373,119
null
transformers
Entry not found
[ 0.0461147278547287, -0.038838207721710205, -0.01049656979739666, -0.03682169318199158, 0.011261860840022564, 0.013094935566186905, 0.0019101888174191117, -0.013979103416204453, 0.027092741802334785, -0.015212527476251125, 0.017284274101257324, -0.08189476281404495, 0.03817418962717056, -0....
allenai/scibert_scivocab_uncased
2ab156b969f2dbbd7ecc0080b78bc2cd272c4092
2021-05-19T11:41:40.000Z
[ "pytorch", "jax", "bert", "transformers" ]
null
false
allenai
null
allenai/scibert_scivocab_uncased
369,675
21
transformers
# SciBERT This is the pretrained model presented in [SciBERT: A Pretrained Language Model for Scientific Text](https://www.aclweb.org/anthology/D19-1371/), which is a BERT model trained on scientific text. The training corpus was papers taken from [Semantic Scholar](https://www.semanticscholar.org). Corpus size is 1....
[ -0.06226271763443947, -0.03768712654709816, 0.038678526878356934, 0.012727038934826851, 0.007553716655820608, 0.03183666244149208, -0.033963095396757126, 0.052286069840192795, 0.0109660429880023, -0.00013924833911005408, 0.031278062611818314, 0.026570413261651993, 0.033382706344127655, 0.0...
hfl/chinese-bert-wwm-ext
2a995a880017c60e4683869e817130d8af548486
2021-05-19T19:06:39.000Z
[ "pytorch", "tf", "jax", "bert", "fill-mask", "zh", "arxiv:1906.08101", "arxiv:2004.13922", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
hfl
null
hfl/chinese-bert-wwm-ext
368,889
26
transformers
--- language: - zh license: "apache-2.0" --- ## Chinese BERT with Whole Word Masking For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**. **[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)** Yiming Cu...
[ -0.13572391867637634, -0.0475379079580307, 0.08086962252855301, 0.03534784913063049, -0.030565138906240463, 0.011088831350207329, -0.00047960912343114614, -0.031159551814198494, -0.03220145404338837, 0.007688344921916723, 0.046967726200819016, -0.040497053414583206, 0.02268795855343342, 0....
mrm8488/t5-base-finetuned-common_gen
5c3010b4532b7834039c65580e688e9656626835
2021-09-24T08:52:57.000Z
[ "pytorch", "t5", "text2text-generation", "en", "dataset:common_gen", "arxiv:1910.10683", "arxiv:1911.03705", "transformers", "common sense", "autotrain_compatible" ]
text2text-generation
false
mrm8488
null
mrm8488/t5-base-finetuned-common_gen
362,815
6
transformers
--- language: en tags: - common sense datasets: - common_gen widget: - text: "tree plant ground hole dig" --- # T5-base fine-tuned on CommonGen [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) fine-tuned on [CommonGen](https://inklab.usc.edu/CommonGen/index.html) for *Generati...
[ -0.09631180018186569, -0.06494303047657013, 0.10465230792760849, 0.025842756032943726, 0.10590454190969467, -0.018765488639473915, -0.0005120787536725402, 0.00932965986430645, -0.0034256186336278915, 0.03443259373307228, -0.026334110647439957, -0.00806945189833641, 0.02498563751578331, 0.0...
emilyalsentzer/Bio_ClinicalBERT
41943bf7f983007123c758373c5246305cc536ec
2022-02-27T13:59:10.000Z
[ "pytorch", "jax", "bert", "en", "arxiv:1904.03323", "arxiv:1901.08746", "transformers", "fill-mask", "license:mit" ]
fill-mask
false
emilyalsentzer
null
emilyalsentzer/Bio_ClinicalBERT
360,523
31
transformers
--- language: "en" tags: - fill-mask license: mit --- # ClinicalBERT - Bio + Clinical BERT Model The [Publicly Available Clinical BERT Embeddings](https://arxiv.org/abs/1904.03323) paper contains four unique clinicalBERT models: initialized with BERT-Base (`cased_L-12_H-768_A-12`) or BioBERT (`BioBERT-Base v1.0 + Pu...
[ -0.13392198085784912, -0.09290673583745956, 0.0021686838008463383, -0.037367839366197586, -0.03704371303319931, 0.06029707193374634, -0.06899192929267883, 0.12982583045959473, 0.04429659992456436, -0.015147091820836067, 0.00587890250608325, 0.0380965955555439, 0.01997937075793743, 0.041801...
dmis-lab/biosyn-sapbert-bc2gn
28ef41eace90e9aa6a9db372413c145883c72902
2022-02-25T13:32:53.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
dmis-lab
null
dmis-lab/biosyn-sapbert-bc2gn
358,818
null
transformers
hello
[ -0.06277179718017578, 0.054958850145339966, 0.05216483399271965, 0.08579003810882568, -0.08274887502193451, -0.07457298040390015, 0.06855464726686478, 0.01839636079967022, -0.08201130479574203, -0.037384770810604095, 0.012124866247177124, 0.0035182537976652384, -0.004134270828217268, -0.04...
facebook/detr-resnet-50
272941311143979e4ade5424ede52fb5e84c9969
2022-06-27T08:29:51.000Z
[ "pytorch", "detr", "object-detection", "dataset:coco", "arxiv:2005.12872", "transformers", "vision", "license:apache-2.0" ]
object-detection
false
facebook
null
facebook/detr-resnet-50
355,674
48
transformers
--- license: apache-2.0 tags: - object-detection - vision datasets: - coco widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/savanna.jpg example_title: Savanna - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg example_title: Football Match - s...
[ -0.10373219102621078, -0.008586849085986614, 0.02562749944627285, -0.021864578127861023, 0.13229957222938538, -0.054243844002485275, -0.015863973647356033, 0.014823966659605503, 0.00230921758338809, -0.012018969282507896, 0.04497916251420975, -0.04509170725941658, -0.028047338128089905, 0....
google/vit-base-patch16-224
5dca96d358b3fcb9d53b3d3881eb1ae20b6752d1
2022-06-23T07:42:10.000Z
[ "pytorch", "tf", "jax", "vit", "image-classification", "dataset:imagenet-1k", "dataset:imagenet-21k", "arxiv:2010.11929", "arxiv:2006.03677", "transformers", "vision", "license:apache-2.0" ]
image-classification
false
google
null
google/vit-base-patch16-224
352,185
52
transformers
--- license: apache-2.0 tags: - vision - image-classification datasets: - imagenet-1k - imagenet-21k widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teap...
[ -0.07735730707645416, 0.03693828359246254, 0.019276298582553864, -0.030712202191352844, 0.07025900483131409, -0.07447640597820282, -0.01946285367012024, 0.053060173988342285, -0.04137694090604782, -0.021975580602884293, 0.06290626525878906, -0.034133244305849075, 0.08397805690765381, 0.048...
sentence-transformers/paraphrase-mpnet-base-v2
18df4b22cd35517843308534d066190182ff39ef
2022-06-15T19:23:23.000Z
[ "pytorch", "tf", "mpnet", "feature-extraction", "arxiv:1908.10084", "sentence-transformers", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
false
sentence-transformers
null
sentence-transformers/paraphrase-mpnet-base-v2
348,258
6
sentence-transformers
--- pipeline_tag: sentence-similarity license: apache-2.0 tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # sentence-transformers/paraphrase-mpnet-base-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional den...
[ -0.039815086871385574, -0.0677199587225914, -0.008281015790998936, 0.031505927443504333, 0.040791913866996765, 0.059537239372730255, -0.03474612906575203, 0.016238495707511902, 0.0063071660697460175, -0.06239967420697212, 0.05775539577007294, 0.004985908977687359, 0.055302850902080536, 0.0...
cross-encoder/nli-distilroberta-base
99f096e70ef1fb038b8f0aecabc5a0f491684084
2021-08-05T08:40:59.000Z
[ "pytorch", "jax", "roberta", "text-classification", "en", "dataset:multi_nli", "dataset:snli", "transformers", "distilroberta-base", "license:apache-2.0", "zero-shot-classification" ]
zero-shot-classification
false
cross-encoder
null
cross-encoder/nli-distilroberta-base
345,008
9
transformers
--- language: en pipeline_tag: zero-shot-classification tags: - distilroberta-base datasets: - multi_nli - snli metrics: - accuracy license: apache-2.0 --- # Cross-Encoder for Natural Language Inference This model was trained using [SentenceTransformers](https://sbert.net) [Cross-Encoder](https://www.sbert.net/example...
[ -0.05010258033871651, -0.09692981094121933, -0.046597957611083984, -0.04686028137803078, 0.060496799647808075, 0.09971753507852554, -0.05690623074769974, -0.0018377932719886303, -0.01317618414759636, -0.07581351697444916, 0.017678597941994667, -0.08035063743591309, -0.008702612482011318, -...
finiteautomata/bertweet-base-sentiment-analysis
cf6b0f60e84096e077c171fe3176093674370291
2022-06-23T13:01:55.000Z
[ "pytorch", "tf", "roberta", "text-classification", "en", "arxiv:2106.09462", "transformers", "sentiment-analysis" ]
text-classification
false
finiteautomata
null
finiteautomata/bertweet-base-sentiment-analysis
338,964
18
transformers
--- language: - en tags: - sentiment-analysis --- # Sentiment Analysis in English ## bertweet-sentiment-analysis Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/finiteautomata/pysentimiento/) Model trained with SemEval 2017 corpus (around ~40k tweets). Base model is [BERTweet...
[ -0.10483858734369278, -0.05710936710238457, -0.007168810814619064, -0.0029249864164739847, 0.04239180311560631, 0.027894387021660805, 0.06531602889299393, 0.04366088658571243, 0.07999078184366226, 0.007977987639605999, 0.07169503718614578, -0.007533452473580837, 0.02954231947660446, 0.0595...
distilbert-base-cased
8d708decd7afb7bec0af233e5338fe1fca3db705
2022-07-22T08:12:05.000Z
[ "pytorch", "tf", "distilbert", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1910.01108", "transformers", "license:apache-2.0" ]
null
false
null
null
distilbert-base-cased
334,535
7
transformers
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # Model Card for DistilBERT base model (cased) This model is a distilled version of the [BERT base model](https://huggingface.co/bert-base-cased). It was introduced in [this paper](https://arxiv.org/abs/1910.01108). The code for the distillat...
[ -0.11877017468214035, -0.11439269036054611, 0.10081158578395844, 0.002818940207362175, -0.03515144810080528, -0.014470581896603107, -0.02057008445262909, 0.08113410323858261, -0.022086936980485916, -0.07199153304100037, -0.010488024912774563, 0.05130061134696007, 0.02058270014822483, 0.018...
yjernite/retribert-base-uncased
aeab2b097862fa41e084db47e0e02229649bbe53
2021-03-10T02:54:37.000Z
[ "pytorch", "retribert", "feature-extraction", "transformers" ]
feature-extraction
false
yjernite
null
yjernite/retribert-base-uncased
332,598
null
transformers
Entry not found
[ 0.0461147278547287, -0.038838207721710205, -0.01049656979739666, -0.03682169318199158, 0.011261860840022564, 0.013094935566186905, 0.0019101888174191117, -0.013979103416204453, 0.027092741802334785, -0.015212527476251125, 0.017284274101257324, -0.08189476281404495, 0.03817418962717056, -0....
emilyalsentzer/Bio_Discharge_Summary_BERT
affde836a50e4d333f15dae9270f5a856d59540b
2022-02-27T13:59:50.000Z
[ "pytorch", "jax", "bert", "en", "arxiv:1904.03323", "arxiv:1901.08746", "transformers", "fill-mask", "license:mit" ]
fill-mask
false
emilyalsentzer
null
emilyalsentzer/Bio_Discharge_Summary_BERT
328,763
8
transformers
--- language: "en" tags: - fill-mask license: mit --- # ClinicalBERT - Bio + Discharge Summary BERT Model The [Publicly Available Clinical BERT Embeddings](https://arxiv.org/abs/1904.03323) paper contains four unique clinicalBERT models: initialized with BERT-Base (`cased_L-12_H-768_A-12`) or BioBERT (`BioBERT-Base ...
[ -0.12280306965112686, -0.08057813346385956, 0.013495441526174545, -0.030393457040190697, -0.015569285489618778, 0.07275798916816711, -0.05874161794781685, 0.1335667073726654, 0.05952850356698036, -0.012769686058163643, 0.010991579852998257, 0.014589333906769753, 0.02326119691133499, 0.0653...
dslim/bert-large-NER
95c62bc0d4109bd97d0578e5ff482e6b84c2b8b9
2022-06-27T20:58:09.000Z
[ "pytorch", "tf", "jax", "bert", "token-classification", "en", "dataset:conll2003", "arxiv:1810.04805", "transformers", "license:mit", "autotrain_compatible" ]
token-classification
false
dslim
null
dslim/bert-large-NER
327,366
10
transformers
--- language: en datasets: - conll2003 license: mit --- # bert-base-NER ## Model description **bert-large-NER** is a fine-tuned BERT model that is ready to use for **Named Entity Recognition** and achieves **state-of-the-art performance** for the NER task. It has been trained to recognize four types of entities: loca...
[ -0.09337218105792999, -0.06468984484672546, 0.06588803976774216, -0.04721028357744217, -0.019723959267139435, -0.009055743925273418, 0.00244380091316998, 0.021299362182617188, 0.04770536348223686, -0.018067138269543648, -0.020009981468319893, -0.04756350815296173, -0.018890533596277237, 0....