id
stringlengths
2
115
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]
downloads
int64
0
8.87M
likes
int64
0
3.84k
paperswithcode_id
stringlengths
2
45
tags
list
lastModified
timestamp[us, tz=UTC]
createdAt
stringlengths
24
24
key
stringclasses
1 value
created
timestamp[us]
card
stringlengths
1
1.01M
embedding
list
library_name
stringclasses
21 values
pipeline_tag
stringclasses
27 values
mask_token
null
card_data
null
widget_data
null
model_index
null
config
null
transformers_info
null
spaces
null
safetensors
null
transformersInfo
null
modelId
stringlengths
5
111
embeddings
list
teven/mpww_all_passages
teven
2022-01-10T01:06:10Z
12
0
null
[ "region:us" ]
2022-01-10T01:06:10Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
thomwolf/codeparrot-train
thomwolf
2021-07-27T23:09:19Z
12
0
null
[ "region:us" ]
2021-07-27T23:09:19Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
thomwolf/github-dataset
thomwolf
2021-07-09T15:54:18Z
12
0
null
[ "region:us" ]
2021-07-09T15:54:18Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
toriving/kosimcse
toriving
2021-08-02T05:11:44Z
12
0
null
[ "region:us" ]
2021-08-02T05:11:44Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
toriving/talktalk-sentiment-210713-multi-singleturn-custom-multiturn
toriving
2021-07-21T16:33:03Z
12
0
null
[ "region:us" ]
2021-07-21T16:33:03Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
versae/norwegian-t5-dataset-debug2
versae
2021-09-08T13:51:39Z
12
0
null
[ "region:us" ]
2021-09-08T13:51:39Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
vishnun/huggingpics-data
vishnun
2021-11-24T08:01:47Z
12
0
null
[ "region:us" ]
2021-11-24T08:01:47Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
vkhangpham/github-issues
vkhangpham
2022-01-30T07:56:36Z
12
0
null
[ "region:us" ]
2022-01-30T07:56:36Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
vumichien/ja_opus100_processed
vumichien
2022-01-29T11:14:34Z
12
0
null
[ "region:us" ]
2022-01-29T11:14:34Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
w-nicole/childes_data
w-nicole
2021-06-17T19:23:14Z
12
0
null
[ "region:us" ]
2021-06-17T19:23:14Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
w-nicole/childes_data_no_tags
w-nicole
2021-06-19T18:39:07Z
12
0
null
[ "region:us" ]
2021-06-19T18:39:07Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
w-nicole/childes_data_no_tags_
w-nicole
2021-06-24T17:47:17Z
12
0
null
[ "region:us" ]
2021-06-24T17:47:17Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
w-nicole/childes_data_with_tags
w-nicole
2021-06-19T19:10:10Z
12
0
null
[ "region:us" ]
2021-06-19T19:10:10Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
w-nicole/childes_data_with_tags_
w-nicole
2021-06-24T17:43:15Z
12
0
null
[ "region:us" ]
2021-06-24T17:43:15Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
wanagenst/maslow-six-choices
wanagenst
2022-01-03T13:06:22Z
12
0
null
[ "region:us" ]
2022-01-03T13:06:22Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
wanagenst/reiss-stories
wanagenst
2021-12-30T02:06:17Z
12
0
null
[ "region:us" ]
2021-12-30T02:06:17Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
wanagenst/reiss-twenty-choices
wanagenst
2022-01-02T16:56:45Z
12
0
null
[ "region:us" ]
2022-01-02T16:56:45Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
warwickai/financial_phrasebank_mirror
warwickai
2022-01-17T00:19:04Z
12
0
null
[ "region:us" ]
2022-01-17T00:19:04Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
webis/ms-marco-anchor-text
webis
2022-01-30T19:19:02Z
12
2
null
[ "region:us" ]
2022-01-30T19:19:02Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
# Webis MS MARCO Anchor Text 2022 The [Webis MS MARCO Anchor Text 2022 dataset](https://webis.de/data/webis-ms-marco-anchor-text-22.html) enriches Version 1 and 2 of the document collection of [MS MARCO](https://microsoft.github.io/msmarco/) with anchor text extracted from six [Common Crawl](https://commoncrawl.org/) snapshots. The six Common Crawl snapshots cover the years 2016 to 2021 (between 1.7-3.4 billion documents each). We sampled 1,000 anchor texts for documents with more than 1,000 anchor texts at random and all anchor texts for documents with less than 1,000 anchor texts (this sampling yields that all anchor text is included for 94% of the documents in Version 1 and 97% of documents for Version 2). Overall, the MS MARCO Anchor Text 2022 dataset enriches 1,703,834 documents for Version 1 and 4,821,244 documents for Version 2 with anchor text. Cleaned versions of the MS MARCO Anchor Text 2022 dataset are available in [ir_datasets](https://github.com/allenai/ir_datasets/issues/154), [Zenodo](https://zenodo.org/record/5883456) and [Hugging Face](https://huggingface.co/datasets/webis/ms-marco-anchor-text). The raw dataset with additional information and all metadata for the extracted anchor texts (roughly 100GB) is available on [Hugging Face](https://huggingface.co/datasets/webis/ms-marco-anchor-text/tree/main/ms-marco-v1/anchor-text) and [files.webis.de](https://files.webis.de/data-in-progress/ecir22-anchor-text/anchor-text-samples/). The details of the construction of the Webis MS MARCO Anchor Text 2022 dataset are described in the [associated paper](https://webis.de/publications.html#froebe_2022a). If you use this dataset, please cite ``` @InProceedings{froebe:2022a, address = {Berlin Heidelberg New York}, author = {Maik Fr{\"o}be and Sebastian G{\"u}nther and Maximilian Probst and Martin Potthast and Matthias Hagen}, booktitle = {Advances in Information Retrieval. 44th European Conference on IR Research (ECIR 2022)}, editor = {Matthias Hagen and Suzan Verberne and Craig Macdonald and Christin Seifert and Krisztian Balog and Kjetil N{\o}rv\r{a}g and Vinay Setty}, month = apr, publisher = {Springer}, series = {Lecture Notes in Computer Science}, site = {Stavanger, Norway}, title = {{The Power of Anchor Text in the Neural Retrieval Era}}, year = 2022 } ```
[ -0.3961944878101349, -0.33572670817375183, 0.3917498290538788, 0.371341735124588, -0.23028478026390076, -0.1488989293575287, -0.2303016185760498, -0.767518162727356, 0.39504069089889526, 0.1419956237077713, -0.5425488352775574, -0.6410403847694397, -0.503766655921936, 0.4305706024169922, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
wesamhaddad14/testdata
wesamhaddad14
2022-03-29T01:15:55Z
12
0
null
[ "region:us" ]
2022-03-29T01:15:55Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
wpicard/nostradamus-propheties
wpicard
2022-10-23T04:54:07Z
12
0
null
[ "task_ids:language-modeling", "annotations_creators:no-annotation", "multilinguality:monolingual", "size_categories:unknown", "language:en", "license:unknown", "region:us" ]
2022-10-23T04:54:07Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
--- annotations_creators: - no-annotation language_creators: [] language: - en language_bcp47: - en-US license: - unknown multilinguality: - monolingual pretty_name: nostradamus-propheties size_categories: - unknown source_datasets: [] task_categories: - sequence-modeling task_ids: - language-modeling --- # Dataset Card for "nostradamus-propheties" ## Dataset Description ### Dataset Summary The Nostradamus propheties dataset is a set of structured files containing the "Propheties" by Nostradamus, translated in modern English. The original text consists of 10 "Centuries", every century containing 100 numbered quatrains. In the dataset, every century is a separate file named `century**.json`. For instance, all the quatrains of Century I are in the file `century01.json`. The century and the quantrain number are kept for every quatrain. Every quatrain has been split in four separate lines. For example, the second quatrain of Century I is stored in `century01.json` as follows: ``` { "century":1, "index":2, "line1":"The wand in the hand is placed in the middle of the tripod's legs.", "line2":"With water he sprinkles both the hem of his garment and his foot.", "line3":"A voice, fear: he trembles in his robes.", "line4":"Divine splendor; the God sits nearby." } ```
[ -0.43148383498191833, -0.4907725751399994, 0.09977227449417114, 0.10211100429296494, -0.6625444293022156, -0.04259655252099037, 0.19540514051914215, -0.30003494024276733, 0.49963513016700745, 0.9592689275741577, -0.6143029928207397, -0.7233128547668457, -0.5846362113952637, 0.4918135106563...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/contract_nli-debug
yuvalkirstain
2022-01-05T15:35:43Z
12
0
null
[ "region:us" ]
2022-01-05T15:35:43Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/contract_nli_t5
yuvalkirstain
2022-01-09T06:16:30Z
12
0
null
[ "region:us" ]
2022-01-09T06:16:30Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/contract_nli_t5_lm
yuvalkirstain
2022-01-09T15:27:51Z
12
0
null
[ "region:us" ]
2022-01-09T15:27:51Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/qasper_t5
yuvalkirstain
2022-01-09T06:17:13Z
12
0
null
[ "region:us" ]
2022-01-09T06:17:13Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/qasper_t5_lm
yuvalkirstain
2022-01-09T15:26:22Z
12
0
null
[ "region:us" ]
2022-01-09T15:26:22Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/qmsum_t5
yuvalkirstain
2022-01-09T06:16:47Z
12
0
null
[ "region:us" ]
2022-01-09T06:16:47Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/qmsum_t5_lm
yuvalkirstain
2022-01-09T15:29:46Z
12
0
null
[ "region:us" ]
2022-01-09T15:29:46Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/quality
yuvalkirstain
2021-12-30T10:05:25Z
12
1
null
[ "region:us" ]
2021-12-30T10:05:25Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/quality_squad
yuvalkirstain
2021-12-30T10:28:26Z
12
0
null
[ "region:us" ]
2021-12-30T10:28:26Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/quality_squad_debug
yuvalkirstain
2021-12-30T11:49:16Z
12
0
null
[ "region:us" ]
2021-12-30T11:49:16Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/quality_t5
yuvalkirstain
2022-01-09T06:16:09Z
12
0
null
[ "region:us" ]
2022-01-09T06:16:09Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/quality_t5_lm
yuvalkirstain
2022-01-09T15:33:28Z
12
0
null
[ "region:us" ]
2022-01-09T15:33:28Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/scrolls_t5
yuvalkirstain
2022-01-09T06:09:07Z
12
0
null
[ "region:us" ]
2022-01-09T06:09:07Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/squad_full_doc
yuvalkirstain
2021-12-29T13:13:12Z
12
0
null
[ "region:us" ]
2021-12-29T13:13:12Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/squad_seq2seq
yuvalkirstain
2022-01-06T16:18:19Z
12
0
null
[ "region:us" ]
2022-01-06T16:18:19Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuvalkirstain/squad_t5
yuvalkirstain
2022-01-09T09:31:52Z
12
0
null
[ "region:us" ]
2022-01-09T09:31:52Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
zj88zj/SCIERC
zj88zj
2021-12-13T22:39:35Z
12
0
null
[ "region:us" ]
2021-12-13T22:39:35Z
2022-03-02T23:29:22.000Z
2022-03-02T23:29:22
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
ArnavL/finetune_preprocessed_yelp
ArnavL
2022-03-03T12:37:28Z
12
0
null
[ "region:us" ]
2022-03-03T12:37:28Z
2022-03-03T12:36:56.000Z
2022-03-03T12:36:56
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622264862060547, 0.43461528420448303, -0.52829909324646, 0.7012971639633179, 0.7915720343589783, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104477167129517, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
Alvenir/alvenir_asr_da_eval
Alvenir
2022-06-16T09:13:33Z
12
5
null
[ "license:cc-by-4.0", "region:us" ]
2022-06-16T09:13:33Z
2022-03-04T13:14:47.000Z
2022-03-04T13:14:47
--- license: cc-by-4.0 --- # Dataset Card alvenir_asr_da_eval ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Prompts/sentence selection](#prompts/sentence-selection) - [Recording](#recording) - [Evaluation](#evaluation) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Licensing Information](#licensing-information) ## Dataset Description - **Homepage:** https://alvenir.ai - **Repository:** https://github.com/danspeech/alvenir-asr-da-eval/ ### Dataset Summary This dataset was created by Alvenir in order to evaluate ASR models in Danish. It can also be used for training but the amount is very limited. The dataset consists of .wav files with corresponding reference text. The amount of data is just above 5 hours spread across 50 speakers with age in the interval 20-60 years old. The data was collected by a third party vendor through their software and people. All recordings have been validated. ## Dataset Structure ### Data Instances A data point consists of a path to the audio file, called path and its sentence. Additional fields will eventually be added such as age and gender. ` {'audio': {'path': `some_path.wav', 'array': array([-0.044223, -0.00031411, -0.00435671, ..., 0.00612312, 0.00014581, 0.00091009], dtype=float32), 'sampling_rate': 16000}} ` ### Data Fields audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`. sentence: The sentence the user was prompted to speak ### Data Splits Since the idea behind the dataset is for it to be used as a test/eval ASR dataset for Danish, there is only test split. ## Dataset Creation ### Prompts/sentence selection The sentences used for prompts were gathered from the danish part of open subtitles (OSS) (need reference) and wikipedia (WIKI). The OSS prompts sampled randomly across the dataset making sure that all prompts are unique. The WIKI prompts were selected by first training a topic model with 30 topics on wikipedia and than randomly sampling an equal amount of unique sentences from each topic. All sentences were manually inspected. ### Recording 50 unique speakers were all sent 20 WIKI sentences and 60 sentences from OSS. The recordings took place through third party recording software. ### Evaluation All recordings were evaluated by third party to confirm alignment between audio and text. ### Personal and Sensitive Information The dataset consists of people who have given their voice to the dataset for ASR purposes. You agree to not attempt to determine the identity of any of the speakers in the dataset. ### Licensing Information [cc-by-4.0](https://creativecommons.org/licenses/by/4.0/)
[ -0.658559262752533, -0.6488509178161621, -0.001022262149490416, 0.1218947097659111, -0.2829468548297882, -0.22388042509555817, -0.4582383930683136, -0.14146757125854492, 0.16127407550811768, 0.5100449323654175, -0.7254818081855774, -0.7107208967208862, -0.44281890988349915, 0.2843247652053...
null
null
null
null
null
null
null
null
null
null
null
null
null
anjandash/java-8m-methods-v1
anjandash
2022-07-01T20:32:32Z
12
1
null
[ "multilinguality:monolingual", "license:mit", "region:us" ]
2022-07-01T20:32:32Z
2022-03-04T17:16:46.000Z
2022-03-04T17:16:46
--- language: - java license: - mit multilinguality: - monolingual pretty_name: - java-8m-methods-v1 ---
[ -0.1285335123538971, -0.1861683875322342, 0.6529128551483154, 0.49436232447624207, -0.19319400191307068, 0.23607441782951355, 0.36072009801864624, 0.05056373029947281, 0.5793656706809998, 0.7400146722793579, -0.650810182094574, -0.23784008622169495, -0.7102247476577759, -0.0478255338966846...
null
null
null
null
null
null
null
null
null
null
null
null
null
rocca/emojis
rocca
2022-04-29T09:37:55Z
12
0
null
[ "region:us" ]
2022-04-29T09:37:55Z
2022-03-06T02:31:30.000Z
2022-03-06T02:31:30
A collection of 38,176 emoji images from Facebook, Google, Apple, WhatsApp, Samsung, [JoyPixels](https://www.joypixels.com/), Twitter, [emojidex](https://www.emojidex.com/), LG, [OpenMoji](https://openmoji.org/), and Microsoft. It includes all the emojis for these apps/platforms as of early 2022. * Counts: Facebook=3664, Google=3664, Apple=3961, WhatsApp=3519, Samsung=3752, JoyPixels=3538, Twitter=3544, emojidex=2040, LG=3051, OpenMoji=3512, Microsoft=3931. * Sizes: Facebook=144x144, Google=144x144, Apple=144x144, WhatsApp=144x144, Samsung=108x108, JoyPixels=144x144, Twitter=144x144, emojidex=144x144, LG=136x128, OpenMoji=144x144, Microsoft=144x144. * The tar files directly contain the image files (they're not inside a parent folder). * The emoji code points are at the end of the filename, but there are some adjustments needed to parse them into the Unicode character consistently across all sets of emojis in this dataset. Here's some JavaScript code to convert the file name of an emoji image into the actual Unicode emoji character: ```js let filename = ...; let fixedFilename = filename.replace(/(no|light|medium|medium-light|medium-dark|dark)-skin-tone/, "").replace(/__/, "_").replace(/--/, "-"); let emoji = String.fromCodePoint(...fixedFilename.split("_")[1].split(".")[0].split("-").map(hex => parseInt(hex, 16))); ``` ## Facebook examples: ![Facebook emoji grid](https://i.imgur.com/z0ZCHfO.jpg) ## Google examples: ![Google emoji grid](https://i.imgur.com/yhPVAzN.jpg) ## Apple examples: ![Apple emoji grid](https://i.imgur.com/Y0fUAIA.jpg) ## WhatsApp examples: ![WhatsApp emoji grid](https://i.imgur.com/6kqHLXW.jpg) ## Samsung examples: ![Samsung emoji grid](https://i.imgur.com/rERdop1.jpg) ## JoyPixels examples: ![JoyPixels emoji grid](https://i.imgur.com/nZSYsiN.jpg) ## Twitter examples: ![Twitter emoji grid](https://i.imgur.com/zRxJHfj.jpg) ## emojidex examples: ![emojidex emoji grid](https://i.imgur.com/BQYBu7a.jpg) ## LG examples: ![LG emoji grid](https://i.imgur.com/xv1lQRl.jpg) ## OpenMoji examples: ![OpenMoji emoji grid](https://i.imgur.com/Uk8aRXx.jpg) ## Microsoft examples: ![Microsoft emoji grid](https://i.imgur.com/Z01Tnn9.jpg)
[ -0.5169891715049744, -0.5014777779579163, 0.3688547909259796, 0.31490492820739746, -0.15585190057754517, 0.11401773244142532, -0.40492063760757446, -0.3095402121543884, 0.6346776485443115, 0.45400869846343994, -0.6509990692138672, -0.8561275601387024, -0.4085342288017273, 0.322839945554733...
null
null
null
null
null
null
null
null
null
null
null
null
null
pensieves/mimicause
pensieves
2022-03-29T14:54:48Z
12
3
null
[ "license:apache-2.0", "arxiv:2110.07090", "region:us" ]
2022-03-29T14:54:48Z
2022-03-07T20:33:38.000Z
2022-03-07T20:33:38
--- license: apache-2.0 pretty_name: MIMICause --- # Dataset Card for "MIMICause" ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additinal-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** [https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/](https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/) - **Paper:** [MIMICause: Representation and automatic extraction of causal relation types from clinical notes](https://arxiv.org/abs/2110.07090) - **Size of downloaded dataset files:** 333.4 KB - **Size of the generated dataset:** 491.2 KB - **Total amount of disk used:** 668.2 KB ### Dataset Summary MIMICause Dataset is a dataset for representation and automatic extraction of causal relation types from clinical notes. The MIMICause dataset requires manual download of the mimicause.zip file from the **Community Annotations Downloads** section of the n2c2 dataset on the [Harvard's DBMI Data Portal](https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/) after signing their agreement forms, which is a quick and easy procedure. The dataset has 2714 samples having both explicit and implicit causality in which entities are in the same sentence or different sentences. The nine semantic causal relations (with directionality) between entitities E1 and E2 in a text snippets are -- (1) Cause(E1,E2) (2) Cause(E2,E1) (3) Enable(E1,E2) (4) Enable(E2,E1) (5) Prevent(E1,E2) (6) Prevent(E2,E1) (7) Hinder(E1,E2) (8) Hinder(E2,E1) (9) Other. ### Supported Tasks Causal relation extraction between entities expressed implicitly or explicitly, in single or across multiple sentences. ## Dataset Structure ### Data Instances An example of a data sample looks as follows: ``` { "E1": "Florinef", "E2": "fluid retention", "Text": "Treated with <e1>Florinef</e1> in the past, was d/c'd due to <e2>fluid retention</e2>.", "Label": 0 } ``` ### Data Fields The data fields are the same among all the splits. - `E1`: a `string` value. - `E2`: a `string` value. - `Text`: a `large_string` value. - `Label`: a `ClassLabel` categorical value. ### Data Splits The original dataset that gets downloaded from the [Harvard's DBMI Data Portal](https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/) have all the data in a single split. The dataset loading provided here through huggingface datasets splits the data into the following train, validation and test splits for convenience. | name |train|validation|test| |---------|----:|---------:|---:| |mimicause| 1953| 489 | 272| ## Additional Information ### Citation Information ``` @inproceedings{khetan-etal-2022-mimicause, title={MIMICause: Representation and automatic extraction of causal relation types from clinical notes}, author={Vivek Khetan and Md Imbesat Hassan Rizvi and Jessica Huber and Paige Bartusiak and Bogdan Sacaleanu and Andrew Fano}, booktitle ={Findings of the Association for Computational Linguistics: ACL 2022}, month={may}, year={2022}, publisher={Association for Computational Linguistics}, address={Dublin, The Republic of Ireland}, url={}, doi={}, pages={}, } ```
[ -0.07446171343326569, -0.7444812655448914, 0.3269020915031433, 0.39892685413360596, -0.10814278572797775, -0.33342161774635315, 0.02025342918932438, -0.6348238587379456, 0.5608312487602234, 0.42272689938545227, -0.6775627136230469, -0.46164098381996155, -0.6282805800437927, 0.3271409869194...
null
null
null
null
null
null
null
null
null
null
null
null
null
McGill-NLP/feedbackQA
McGill-NLP
2023-06-14T17:27:23Z
12
5
null
[ "license:apache-2.0", "arxiv:2204.03025", "region:us" ]
2023-06-14T17:27:23Z
2022-03-10T23:50:07.000Z
2022-03-10T23:50:07
--- license: apache-2.0 --- # Dataset Card for FeedbackQA [📄 Read](https://arxiv.org/pdf/2204.03025.pdf)<br> [💾 Code](https://github.com/McGill-NLP/feedbackqa)<br> [🔗 Webpage](https://mcgill-nlp.github.io/feedbackqa/)<br> [💻 Demo](http://206.12.100.48:8080/)<br> [🤗 Huggingface Dataset](https://huggingface.co/datasets/McGill-NLP/feedbackQA)<br> [💬 Discussions](https://github.com/McGill-NLP/feedbackqa/discussions) ## Dataset Description - **Homepage: https://mcgill-nlp.github.io/feedbackqa-data/** - **Repository: https://github.com/McGill-NLP/feedbackqa-data/** - **Paper:** - **Leaderboard:** - **Tasks: Question Answering** ### Dataset Summary FeedbackQA is a retrieval-based QA dataset that contains interactive feedback from users. It has two parts: the first part contains a conventional RQA dataset, whilst this repo contains the second part, which contains feedback(ratings and natural language explanations) for QA pairs. ### Languages English ## Dataset Creation For each question-answer pair, we collected multiple feedback, each of which consists of a rating, selected from excellent, good, could be improved, bad, and a natural language explanation elaborating on the strengths and/or weaknesses of the answer. #### Initial Data Collection and Normalization We scraped Covid-19-related content from official websites. ### Annotations #### Who are the annotators? Crowd-workers ### Licensing Information Apache 2.0 ### Contributions [McGill-NLP](https://github.com/McGill-NLP)
[ -0.5588199496269226, -0.5368438959121704, 0.060707949101924896, 0.397737979888916, -0.19712871313095093, -0.03897589445114136, -0.17833639681339264, -0.3363552689552307, 0.5983455181121826, 0.4017391502857208, -0.762467086315155, -0.7748108506202698, -0.30697160959243774, 0.176997184753417...
null
null
null
null
null
null
null
null
null
null
null
null
null
Khedesh/ArmanNER
Khedesh
2022-03-11T10:42:30Z
12
0
null
[ "region:us" ]
2022-03-11T10:42:30Z
2022-03-11T08:13:29.000Z
2022-03-11T08:13:29
# PersianNER Named-Entity Recognition in Persian Language ## ArmanPersoNERCorpus This is the first manually-annotated Persian named-entity (NE) dataset (ISLRN 399-379-640-828-6). We are releasing it only for academic research use. The dataset includes 250,015 tokens and 7,682 Persian sentences in total. It is available in 3 folds to be used in turn as training and test sets. Each file contains one token, along with its manually annotated named-entity tag, per line. Each sentence is separated with a newline. The NER tags are in IOB format. According to the instructions provided to the annotators, NEs are categorized into six classes: person, organization (such as banks, ministries, embassies, teams, nationalities, networks and publishers), location (such as cities, villages, rivers, seas, gulfs, deserts and mountains), facility (such as schools, universities, research centers, airports, railways, bridges, roads, harbors, stations, hospitals, parks, zoos and cinemas), product (such as books, newspapers, TV shows, movies, airplanes, ships, cars, theories, laws, agreements and religions), and event (such as wars, earthquakes, national holidays, festivals and conferences); other are the remaining tokens.
[ -0.7297655940055847, -0.43024903535842896, 0.4376138150691986, 0.07096043229103088, -0.3918758034706116, 0.37622109055519104, -0.5484298467636108, -0.3746100664138794, 0.4583035111427307, 0.7496405243873596, -0.43801137804985046, -0.568155825138092, -0.5545560121536255, 0.5424662232398987,...
null
null
null
null
null
null
null
null
null
null
null
null
null
multiIR/toy_data
multiIR
2022-03-14T10:33:27Z
12
0
null
[ "region:us" ]
2022-03-14T10:33:27Z
2022-03-13T04:08:34.000Z
2022-03-13T04:08:34
#toy dataset This is a small portion of the full dataset, used for testing and formatting purposes.
[ -0.4529443383216858, -0.6563281416893005, -0.23367927968502045, 0.20035678148269653, -0.6299191117286682, -0.02531992644071579, -0.06826457381248474, 0.3909342885017395, 0.3221379518508911, 0.22493714094161987, -0.9596701264381409, -0.12030435353517532, 0.04100893810391426, -0.045200910419...
null
null
null
null
null
null
null
null
null
null
null
null
null
willcai/wav2vec2_common_voice_accents
willcai
2022-03-13T05:59:37Z
12
0
null
[ "region:us" ]
2022-03-13T05:59:37Z
2022-03-13T05:42:52.000Z
2022-03-13T05:42:52
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
GEM-submissions/lewtun__this-is-a-test__1647247409
GEM-submissions
2022-03-14T08:43:34Z
12
0
null
[ "benchmark:gem", "evaluation", "benchmark", "region:us" ]
2022-03-14T08:43:34Z
2022-03-14T08:43:33.000Z
2022-03-14T08:43:33
--- benchmark: gem type: prediction submission_name: This is a test tags: - evaluation - benchmark --- # GEM Submission Submission name: This is a test
[ -0.01583682745695114, -0.9654787182807922, 0.5841941833496094, 0.1292470544576645, -0.28037282824516296, 0.4549468457698822, 0.18859517574310303, 0.3502408564090729, 0.47759607434272766, 0.4162292778491974, -1.146683692932129, -0.13004909455776215, -0.4930274486541748, 0.040180496871471405...
null
null
null
null
null
null
null
null
null
null
null
null
null
shpotes/ImVisible
shpotes
2022-03-16T21:42:10Z
12
0
null
[ "region:us" ]
2022-03-16T21:42:10Z
2022-03-16T16:51:54.000Z
2022-03-16T16:51:54
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
elena-soare/crawled-ecommerce
elena-soare
2022-04-04T10:35:10Z
12
0
null
[ "region:us" ]
2022-04-04T10:35:10Z
2022-03-18T11:19:43.000Z
2022-03-18T11:19:43
This contains crawled ecommerce data from Common Crawl
[ -0.16009768843650818, -0.8780660033226013, 0.38352033495903015, 0.1022142544388771, -0.21465003490447998, 0.075398288667202, 0.4589155614376068, -0.414766401052475, 1.02777898311615, 0.7809202671051025, -1.166611671447754, -1.1972702741622925, 0.01851729117333889, 0.23629778623580933, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
tomekkorbak/pile-curse-chunk-2
tomekkorbak
2022-03-18T21:40:23Z
12
0
null
[ "region:us" ]
2022-03-18T21:40:23Z
2022-03-18T21:39:11.000Z
2022-03-18T21:39:11
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
fangyuan/lfqa_discourse
fangyuan
2023-06-08T04:55:00Z
12
1
null
[ "annotations_creators:crowdsourced", "annotations_creators:expert-generated", "language_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:unknown", "source_datasets:extended|natural_questions", "source_datasets:extended|eli5", "license:cc-by-sa-4...
2023-06-08T04:55:00Z
2022-03-21T16:37:57.000Z
2022-03-21T16:37:57
--- annotations_creators: - crowdsourced - expert-generated language_creators: - machine-generated - found language: - en-US license: - cc-by-sa-4.0 multilinguality: - monolingual pretty_name: lfqa_discourse size_categories: - unknown source_datasets: - extended|natural_questions - extended|eli5 task_categories: [] task_ids: [] --- # Dataset Card for LFQA Discourse ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** [Repo](https://github.com/utcsnlp/lfqa_discourse) - **Paper:** [How Do We Answer Complex Questions: Discourse Structure of Long-form Answers](https://arxiv.org/abs/2203.11048) - **Point of Contact:** fangyuan[at]utexas.edu ### Dataset Summary This dataset contains discourse annotation of long-form answers. There are two types of annotations: * **Validity:** whether a <question, answer> pair is valid based on a set of invalid reasons defined. * **Role:** sentence-level role annotation of functional roles for long-form answers. ### Languages The dataset contains data in English. ## Dataset Structure ### Data Instances Each instance is a (question, long-form answer) pair from one of the four data sources -- ELI5, WebGPT, NQ, and model-generated answers (denoted as ELI5-model), and our discourse annotation, which consists of QA-pair level validity label and sentence-level functional role label. We provide all validity and role annotations here. For further train/val/test split, please refer to our [github repository](https://github.com/utcsnlp/lfqa_discourse). ### Data Fields For validity annotations, each instance contains the following fields: * `dataset`: The dataset this QA pair belongs to, one of [`NQ`, `ELI5`, `Web-GPT`]. Note that `ELI5` contains both human-written answers and model-generated answers, with model-generated answer distinguished with the `a_id` field mentioned below. * `q_id`: The question id, same as the original NQ or ELI5 dataset. * `a_id`: The answer id, same as the original ELI5 dataset. For NQ, we populate a dummy `a_id` (1). For machine generated answers, this field corresponds to the name of the model. * `question`: The question. * `answer_paragraph`: The answer paragraph. * `answer_sentences`: The list of answer sentences, tokenized from the answer paragraph. * `is_valid`: A boolean value indicating whether the qa pair is valid, values: [`True`, `False`]. * `invalid_reason`: A list of list, each list contains the invalid reason the annotator selected. The invalid reason is one of [`no_valid_answer`, `nonsensical_question`, `assumptions_rejected`, `multiple_questions`]. For role annotations, each instance contains the following fields: * * `dataset`: The dataset this QA pair belongs to, one of [`NQ`, `ELI5`, `Web-GPT`]. Note that `ELI5` contains both human-written answers and model-generated answers, with model-generated answer distinguished with the `a_id` field mentioned below. * `q_id`: The question id, same as the original NQ or ELI5 dataset. * `a_id`: The answer id, same as the original ELI5 dataset. For NQ, we populate a dummy `a_id` (1). For machine generated answers, this field corresponds to the name of the model. * `question`: The question. * `answer_paragraph`: The answer paragraph. * `answer_sentences`: The list of answer sentences, tokenized from the answer paragraph. * `role_annotation`: The list of majority role (or adjudicated) role (if exists), for the sentences in `answer_sentences`. Each role is one of [`Answer`, `Answer - Example`, `Answer (Summary)`, `Auxiliary Information`, `Answer - Organizational sentence`, `Miscellaneous`] * `raw_role_annotation`: A list of list, each list contains the raw role annotations for sentences in `answer_sentences`. ### Data Splits For train/validation/test splits, please refer to our [repository]((https://github.com/utcsnlp/lfqa_discourse). ## Dataset Creation Please refer to our [paper](https://arxiv.org/abs/2203.11048) and datasheet for details on dataset creation, annotation process and discussion on limitations. ## Additional Information ### Licensing Information https://creativecommons.org/licenses/by-sa/4.0/legalcode ### Citation Information ``` @inproceedings{xu2022lfqadiscourse, title = {How Do We Answer Complex Questions: Discourse Structure of Long-form Answers}, author = {Xu, Fangyuan and Li, Junyi Jessy and Choi, Eunsol}, year = 2022, booktitle = {Proceedings of the Annual Meeting of the Association for Computational Linguistics}, note = {Long paper} } ``` ### Contributions Thanks to [@carriex](https://github.com/carriex) for adding this dataset.
[ -0.45880937576293945, -0.9005054235458374, 0.2519398629665375, 0.06598708778619766, -0.15293918550014496, -0.031617436558008194, 0.02938324399292469, -0.3711242377758026, 0.06065069139003754, 0.6162478923797607, -0.7348118424415588, -0.698878824710846, -0.24042776226997375, 0.2494271248579...
null
null
null
null
null
null
null
null
null
null
null
null
null
DFKI-SLT/scidtb
DFKI-SLT
2022-10-25T06:38:25Z
12
2
null
[ "task_categories:token-classification", "task_ids:parsing", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:unknown", "source_datasets:original", "language:en", "region:us" ]
2022-10-25T06:38:25Z
2022-03-25T09:07:59.000Z
2022-03-25T09:07:59
--- annotations_creators: - expert-generated language_creators: - found language: - en license: [] multilinguality: - monolingual size_categories: - unknown source_datasets: - original task_categories: - token-classification task_ids: - parsing pretty_name: Scientific Dependency Tree Bank language_bcp47: - en-US --- # Dataset Card for SciDTB ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://github.com/PKU-TANGENT/SciDTB - **Repository:** https://github.com/PKU-TANGENT/SciDTB - **Paper:** https://aclanthology.org/P18-2071/ - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary SciDTB is a domain-specific discourse treebank annotated on scientific articles written in English-language. Different from widely-used RST-DT and PDTB, SciDTB uses dependency trees to represent discourse structure, which is flexible and simplified to some extent but do not sacrifice structural integrity. Furthermore, this treebank is made as a benchmark for evaluating discourse dependency parsers. This dataset can benefit many downstream NLP tasks such as machine translation and automatic summarization. ### Supported Tasks and Leaderboards [Needs More Information] ### Languages English. ## Dataset Structure ### Data Instances A typical data point consist of `root` which is a list of nodes in dependency tree. Each node in the list has four fields: `id` containing id for the node, `parent` contains id of the parent node, `text` refers to the span that is part of the current node and finally `relation` represents relation between current node and parent node. An example from SciDTB train set is given below: ``` { "root": [ { "id": 0, "parent": -1, "text": "ROOT", "relation": "null" }, { "id": 1, "parent": 0, "text": "We propose a neural network approach ", "relation": "ROOT" }, { "id": 2, "parent": 1, "text": "to benefit from the non-linearity of corpus-wide statistics for part-of-speech ( POS ) tagging . <S>", "relation": "enablement" }, { "id": 3, "parent": 1, "text": "We investigated several types of corpus-wide information for the words , such as word embeddings and POS tag distributions . <S>", "relation": "elab-aspect" }, { "id": 4, "parent": 5, "text": "Since these statistics are encoded as dense continuous features , ", "relation": "cause" }, { "id": 5, "parent": 3, "text": "it is not trivial to combine these features ", "relation": "elab-addition" }, { "id": 6, "parent": 5, "text": "comparing with sparse discrete features . <S>", "relation": "comparison" }, { "id": 7, "parent": 1, "text": "Our tagger is designed as a combination of a linear model for discrete features and a feed-forward neural network ", "relation": "elab-aspect" }, { "id": 8, "parent": 7, "text": "that captures the non-linear interactions among the continuous features . <S>", "relation": "elab-addition" }, { "id": 9, "parent": 10, "text": "By using several recent advances in the activation functions for neural networks , ", "relation": "manner-means" }, { "id": 10, "parent": 1, "text": "the proposed method marks new state-of-the-art accuracies for English POS tagging tasks . <S>", "relation": "evaluation" } ] } ``` More such raw data instance can be found [here](https://github.com/PKU-TANGENT/SciDTB/tree/master/dataset) ### Data Fields - id: an integer identifier for the node - parent: an integer identifier for the parent node - text: a string containing text for the current node - relation: a string representing discourse relation between current node and parent node ### Data Splits Dataset consists of three splits: `train`, `dev` and `test`. | Train | Valid | Test | | ------ | ----- | ---- | | 743 | 154 | 152| ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process More information can be found [here](https://aclanthology.org/P18-2071/) #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information ``` @inproceedings{yang-li-2018-scidtb, title = "{S}ci{DTB}: Discourse Dependency {T}ree{B}ank for Scientific Abstracts", author = "Yang, An and Li, Sujian", booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)", month = jul, year = "2018", address = "Melbourne, Australia", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/P18-2071", doi = "10.18653/v1/P18-2071", pages = "444--449", abstract = "Annotation corpus for discourse relations benefits NLP tasks such as machine translation and question answering. In this paper, we present SciDTB, a domain-specific discourse treebank annotated on scientific articles. Different from widely-used RST-DT and PDTB, SciDTB uses dependency trees to represent discourse structure, which is flexible and simplified to some extent but do not sacrifice structural integrity. We discuss the labeling framework, annotation workflow and some statistics about SciDTB. Furthermore, our treebank is made as a benchmark for evaluating discourse dependency parsers, on which we provide several baselines as fundamental work.", } ```
[ -0.2734401822090149, -0.6113628149032593, 0.10679953545331955, 0.4083024263381958, -0.3376411199569702, 0.1340312659740448, -0.16148175299167633, -0.3349936008453369, 0.5318055152893066, 0.1009790301322937, -0.3396618366241455, -0.7750012278556824, -0.5290021896362305, 0.1522464007139206, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
Splend1dchan/NMSQA_w2v2-st-ft
Splend1dchan
2022-03-29T00:28:54Z
12
0
null
[ "region:us" ]
2022-03-29T00:28:54Z
2022-03-26T07:05:59.000Z
2022-03-26T07:05:59
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
Splend1dchan/NMSQA_w2v2-st-ft2
Splend1dchan
2022-03-30T11:50:39Z
12
0
null
[ "region:us" ]
2022-03-30T11:50:39Z
2022-03-29T02:02:02.000Z
2022-03-29T02:02:02
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
Pavithra/sampled-code-parrot-ds-valid
Pavithra
2022-03-29T03:50:34Z
12
0
null
[ "region:us" ]
2022-03-29T03:50:34Z
2022-03-29T03:50:31.000Z
2022-03-29T03:50:31
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
oscarfossey/NLP_Pole_emploi
oscarfossey
2022-04-01T18:05:24Z
12
0
null
[ "region:us" ]
2022-04-01T18:05:24Z
2022-03-30T22:50:01.000Z
2022-03-30T22:50:01
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
israel/Amharic-News-Text-classification-Dataset
israel
2022-04-06T09:27:52Z
12
0
null
[ "license:cc-by-4.0", "arxiv:2103.05639", "region:us" ]
2022-04-06T09:27:52Z
2022-04-06T09:20:35.000Z
2022-04-06T09:20:35
--- license: cc-by-4.0 --- # An Amharic News Text classification Dataset > In NLP, text classification is one of the primary problems we try to solve and its uses in language analyses are indisputable. The lack of labeled training data made it harder to do these tasks in low resource languages like Amharic. The task of collecting, labeling, annotating, and making valuable this kind of data will encourage junior researchers, schools, and machine learning practitioners to implement existing classification models in their language. In this short paper, we aim to introduce the Amharic text classification dataset that consists of more than 50k news articles that were categorized into 6 classes. This dataset is made available with easy baseline performances to encourage studies and better performance experiments. ``` @misc{https://doi.org/10.48550/arxiv.2103.05639, doi = {10.48550/ARXIV.2103.05639}, url = {https://arxiv.org/abs/2103.05639}, author = {Azime, Israel Abebe and Mohammed, Nebil}, keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {An Amharic News Text classification Dataset}, publisher = {arXiv}, year = {2021}, copyright = {arXiv.org perpetual, non-exclusive license} } ```
[ -0.2836627662181854, -0.5852522253990173, -0.25069382786750793, 0.20923015475273132, -0.1909715235233307, 0.3993585407733917, -0.3262786269187927, -0.6360543966293335, 0.16476231813430786, 0.36546680331230164, 0.06387705355882645, -0.8459925055503845, -0.7164035439491272, 0.314541906118392...
null
null
null
null
null
null
null
null
null
null
null
null
null
lulul/liludataset
lulul
2022-04-06T09:24:42Z
12
0
null
[ "region:us" ]
2022-04-06T09:24:42Z
2022-04-06T09:22:48.000Z
2022-04-06T09:22:48
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuxiangwang/flat_relation
yuxiangwang
2022-04-08T00:15:10Z
12
0
null
[ "region:us" ]
2022-04-08T00:15:10Z
2022-04-07T11:16:06.000Z
2022-04-07T11:16:06
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
NLPC-UOM/Sinhala-News-Category-classification
NLPC-UOM
2022-10-25T10:03:58Z
12
0
null
[ "task_categories:text-classification", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:si", "license:mit", "region:us" ]
2022-10-25T10:03:58Z
2022-04-07T12:21:01.000Z
2022-04-07T12:21:01
--- annotations_creators: [] language_creators: - crowdsourced language: - si license: - mit multilinguality: - monolingual pretty_name: sinhala-news-category-classification size_categories: - 1K<n<10K source_datasets: [] task_categories: - text-classification task_ids: [] --- This file contains news texts (sentences) belonging to 5 different news categories (political, business, technology, sports and Entertainment). The original dataset was released by Nisansa de Silva (*Sinhala Text Classification: Observations from the Perspective of a Resource Poor Language, 2015*). The original dataset is processed and cleaned of single word texts, English only sentences etc. If you use this dataset, please cite {*Nisansa de Silva, Sinhala Text Classification: Observations from the Perspective of a Resource Poor Language, 2015*} and {*Dhananjaya et al. BERTifying Sinhala - A Comprehensive Analysis of Pre-trained Language Models for Sinhala Text Classification, 2022*}
[ 0.047764502465724945, -0.626207172870636, -0.030690982937812805, 0.4575716555118561, -0.7010626196861267, 0.10409022867679596, -0.19435980916023254, -0.29576629400253296, 0.30169257521629333, 0.9032071232795715, -0.5078914761543274, -0.30669084191322327, -0.4229836165904999, 0.380574673414...
null
null
null
null
null
null
null
null
null
null
null
null
null
Splend1dchan/NMSQA_wav2vec2-large-960h-lv60-self
Splend1dchan
2022-06-23T18:22:11Z
12
0
null
[ "region:us" ]
2022-06-23T18:22:11Z
2022-04-12T07:32:40.000Z
2022-04-12T07:32:40
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
mteb/cqadupstack-retrieval
mteb
2022-04-12T17:28:40Z
12
0
null
[ "region:us" ]
2022-04-12T17:28:40Z
2022-04-12T17:20:07.000Z
2022-04-12T17:20:07
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
huggingnft/azuki
huggingnft
2022-04-16T17:59:08Z
12
1
null
[ "license:mit", "huggingnft", "nft", "huggan", "gan", "image", "images", "region:us" ]
2022-04-16T17:59:08Z
2022-04-14T20:36:39.000Z
2022-04-14T20:36:39
--- tags: - huggingnft - nft - huggan - gan - image - images task: - unconditional-image-generation datasets: - huggingnft/azuki license: mit --- # Dataset Card ## Disclaimer All rights belong to their owners. Models and datasets can be removed from the site at the request of the copyright holder. ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [How to use](#how-to-use) - [Dataset Structure](#dataset-structure) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [About](#about) ## Dataset Description - **Homepage:** [https://github.com/AlekseyKorshuk/huggingnft](https://github.com/AlekseyKorshuk/huggingnft) - **Repository:** [https://github.com/AlekseyKorshuk/huggingnft](https://github.com/AlekseyKorshuk/huggingnft) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Dataset Summary NFT images dataset for unconditional generation. NFT collection available [here](https://opensea.io/collection/azuki). Model is available [here](https://huggingface.co/huggingnft/azuki). Check Space: [link](https://huggingface.co/spaces/AlekseyKorshuk/huggingnft). ### Supported Tasks and Leaderboards [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## How to use How to load this dataset directly with the datasets library: ```python from datasets import load_dataset dataset = load_dataset("huggingnft/azuki") ``` ## Dataset Structure [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Data Fields The data fields are the same among all splits. - `image`: an `image` feature. - `id`: an `int` feature. - `token_metadata`: a `str` feature. - `image_original_url`: a `str` feature. ### Data Splits [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Dataset Creation ### Curation Rationale [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Source Data #### Initial Data Collection and Normalization [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the source language producers? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Annotations #### Annotation process [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the annotators? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Personal and Sensitive Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Discussion of Biases [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Other Known Limitations [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Additional Information ### Dataset Curators [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Licensing Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Citation Information ``` @InProceedings{huggingnft, author={Aleksey Korshuk} year=2022 } ``` ## About *Built by Aleksey Korshuk* [![Follow](https://img.shields.io/github/followers/AlekseyKorshuk?style=social)](https://github.com/AlekseyKorshuk) [![Follow](https://img.shields.io/twitter/follow/alekseykorshuk?style=social)](https://twitter.com/intent/follow?screen_name=alekseykorshuk) [![Follow](https://img.shields.io/badge/dynamic/json?color=blue&label=Telegram%20Channel&query=%24.result&url=https%3A%2F%2Fapi.telegram.org%2Fbot1929545866%3AAAFGhV-KKnegEcLiyYJxsc4zV6C-bdPEBtQ%2FgetChatMemberCount%3Fchat_id%3D-1001253621662&style=social&logo=telegram)](https://t.me/joinchat/_CQ04KjcJ-4yZTky) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/AlekseyKorshuk/huggingnft?style=social)](https://github.com/AlekseyKorshuk/huggingnft)
[ -0.6822050213813782, -0.6293134093284607, 0.14685149490833282, 0.3106032907962799, -0.3816165328025818, 0.10254693776369095, -0.18400253355503082, -0.5848355293273926, 0.8266411423683167, 0.4066658914089203, -0.912889838218689, -0.9181192517280579, -0.6241387128829956, 0.08672837167978287,...
null
null
null
null
null
null
null
null
null
null
null
null
null
dl4phys/top_tagging_images
dl4phys
2022-04-17T10:33:58Z
12
0
null
[ "license:cc-by-4.0", "region:us" ]
2022-04-17T10:33:58Z
2022-04-16T13:08:03.000Z
2022-04-16T13:08:03
--- license: cc-by-4.0 ---
[ -0.1285335123538971, -0.1861683875322342, 0.6529128551483154, 0.49436232447624207, -0.19319400191307068, 0.23607441782951355, 0.36072009801864624, 0.05056373029947281, 0.5793656706809998, 0.7400146722793579, -0.650810182094574, -0.23784008622169495, -0.7102247476577759, -0.0478255338966846...
null
null
null
null
null
null
null
null
null
null
null
null
null
NLPC-UOM/Writing-style-classification
NLPC-UOM
2022-10-25T10:12:46Z
12
0
null
[ "task_categories:text-classification", "language_creators:crowdsourced", "multilinguality:monolingual", "language:si", "license:mit", "region:us" ]
2022-10-25T10:12:46Z
2022-04-27T18:08:07.000Z
2022-04-27T18:08:07
--- annotations_creators: [] language_creators: - crowdsourced language: - si license: - mit multilinguality: - monolingual pretty_name: sinhala-writing-style-classification size_categories: [] source_datasets: [] task_categories: - text-classification task_ids: [] --- This file contains news texts (sentences) belonging to different writing styles. The original dataset created by {*Upeksha, D., Wijayarathna, C., Siriwardena, M., Lasandun, L., Wimalasuriya, C., de Silva, N., and Dias, G. (2015). Implementing a corpus for Sinhala language. 01*}is processed and cleaned. If you use this dataset, please cite {*Dhananjaya et al. BERTifying Sinhala - A Comprehensive Analysis of Pre-trained Language Models for Sinhala Text Classification, 2022*} and the above mentioned paper.
[ 0.09868428111076355, -0.7683289647102356, 0.10333435982465744, 0.4822775423526764, -0.5820792317390442, 0.005660847760736942, -0.5684820413589478, -0.2642841339111328, 0.5189138650894165, 0.919899582862854, -0.6223441958427429, -0.42133021354675293, -0.38672205805778503, 0.4392470419406891...
null
null
null
null
null
null
null
null
null
null
null
null
null
lightonai/SwissProt-EC-leaf
lightonai
2022-05-05T09:54:42Z
12
0
null
[ "Protein", "Enzyme Commission", "region:us" ]
2022-05-05T09:54:42Z
2022-04-30T15:14:51.000Z
2022-04-30T15:14:51
--- language: - protein sequences datasets: - Swissprot tags: - Protein - Enzyme Commission --- # Dataset Swissprot is a high quality manually annotated protein database. The dataset contains annotations with the functional properties of the proteins. Here we extract proteins with Enzyme Commission labels. The dataset is ported from Protinfer: https://github.com/google-research/proteinfer. The leaf level EC-labels are extracted and indexed, the mapping is provided in `idx_mapping.json`. Proteins without leaf-level-EC tags are removed. ## Example The protein Q87BZ2 have the following EC tags. EC:2.-.-.- (Transferases) EC:2.7.-.- (Transferring phosphorus-containing groups) EC:2.7.1.- (Phosphotransferases with an alcohol group as acceptor) EC:2.7.1.30 (Glycerol kinase) We only extract the leaf level labels, here EC:2.7.1.30, corresponding to glycerol kinase.
[ -0.49618884921073914, -0.3733765780925751, 0.30377066135406494, -0.16320215165615082, -0.24641677737236023, 0.08243261277675629, 0.26556238532066345, -0.4361589252948761, 0.5708795785903931, 0.7607208490371704, -0.854047954082489, -0.8972299098968506, -0.773614227771759, 0.4647203683853149...
null
null
null
null
null
null
null
null
null
null
null
null
null
Fhrozen/FSD50k
Fhrozen
2022-05-27T08:50:25Z
12
1
null
[ "task_categories:audio-classification", "annotations_creators:unknown", "language_creators:unknown", "size_categories:10K<n<100K", "source_datasets:unknown", "license:cc-by-4.0", "arxiv:2010.00475", "region:us" ]
2022-05-27T08:50:25Z
2022-05-06T08:51:56.000Z
2022-05-06T08:51:56
--- license: cc-by-4.0 annotations_creators: - unknown language_creators: - unknown size_categories: - 10K<n<100K source_datasets: - unknown task_categories: - audio-classification task_ids: - other-audio-slot-filling --- # Freesound Dataset 50k (FSD50K) ## Important **This data set is a copy from the original one located at Zenodo.** ## Dataset Description - **Homepage:** [FSD50K](https://zenodo.org/record/4060432) - **Repository:** [GitHub](https://github.com/edufonseca/FSD50K_baseline) - **Paper:** [FSD50K: An Open Dataset of Human-Labeled Sound Events](https://arxiv.org/abs/2010.00475) - **Leaderboard:** [Paperswithcode Leaderboard](https://paperswithcode.com/dataset/fsd50k) ## Citation If you use the FSD50K dataset, or part of it, please cite our paper: >Eduardo Fonseca, Xavier Favory, Jordi Pons, Frederic Font, Xavier Serra. "FSD50K: an Open Dataset of Human-Labeled Sound Events", arXiv 2020. ### Data curators Eduardo Fonseca, Xavier Favory, Jordi Pons, Mercedes Collado, Ceren Can, Rachit Gupta, Javier Arredondo, Gary Avendano and Sara Fernandez ### Contact You are welcome to contact Eduardo Fonseca should you have any questions at eduardo.fonseca@upf.edu. ## About FSD50K Freesound Dataset 50k (or **FSD50K** for short) is an open dataset of human-labeled sound events containing 51,197 <a href="https://freesound.org/">Freesound</a> clips unequally distributed in 200 classes drawn from the <a href="https://research.google.com/audioset/ontology/index.html">AudioSet Ontology</a> [1]. FSD50K has been created at the <a href="https://www.upf.edu/web/mtg">Music Technology Group of Universitat Pompeu Fabra</a>. What follows is a brief summary of FSD50K's most important characteristics. Please have a look at our paper (especially Section 4) to extend the basic information provided here with relevant details for its usage, as well as discussion, limitations, applications and more. **Basic characteristics:** - FSD50K is composed mainly of sound events produced by physical sound sources and production mechanisms. - Following AudioSet Ontology’s main families, the FSD50K vocabulary encompasses mainly *Human sounds*, *Sounds of things*, *Animal*, *Natural sounds* and *Music*. - The dataset has 200 sound classes (144 leaf nodes and 56 intermediate nodes) hierarchically organized with a subset of the AudioSet Ontology. The vocabulary can be inspected in `vocabulary.csv` (see Files section below). - FSD50K contains 51,197 audio clips totalling 108.3 hours of audio. - The audio content has been manually labeled by humans following a data labeling process using the <a href="https://annotator.freesound.org/">Freesound Annotator</a> platform [2]. - Clips are of variable length from 0.3 to 30s, due to the diversity of the sound classes and the preferences of Freesound users when recording sounds. - Ground truth labels are provided at the clip-level (i.e., weak labels). - The dataset poses mainly a multi-label sound event classification problem (but also allows a variety of sound event research tasks, see Sec. 4D). - All clips are provided as uncompressed PCM 16 bit 44.1 kHz mono audio files. - The audio clips are grouped into a development (*dev*) set and an evaluation (*eval*) set such that they do not have clips from the same Freesound uploader. **Dev set:** - 40,966 audio clips totalling 80.4 hours of audio - Avg duration/clip: 7.1s - 114,271 smeared labels (i.e., labels propagated in the upwards direction to the root of the ontology) - Labels are correct but could be occasionally incomplete - A train/validation split is provided (Sec. 3H). If a different split is used, it should be specified for reproducibility and fair comparability of results (see Sec. 5C of our paper) **Eval set:** - 10,231 audio clips totalling 27.9 hours of audio - Avg duration/clip: 9.8s - 38,596 smeared labels - Eval set is labeled exhaustively (labels are correct and complete for the considered vocabulary) **NOTE:** All classes in FSD50K are represented in AudioSet, except `Crash cymbal`, `Human group actions`, `Human voice`, `Respiratory sounds`, and `Domestic sounds, home sounds`. ## License All audio clips in FSD50K are released under Creative Commons (CC) licenses. Each clip has its own license as defined by the clip uploader in Freesound, some of them requiring attribution to their original authors and some forbidding further commercial reuse. For attribution purposes and to facilitate attribution of these files to third parties, we include a mapping from the audio clips to their corresponding licenses. The licenses are specified in the files `dev_clips_info_FSD50K.json` and `eval_clips_info_FSD50K.json`. These licenses are CC0, CC-BY, CC-BY-NC and CC Sampling+. In addition, FSD50K as a whole is the result of a curation process and it has an additional license: FSD50K is released under <a href="https://creativecommons.org/licenses/by/4.0/">CC-BY</a>. This license is specified in the `LICENSE-DATASET` file downloaded with the `FSD50K.doc` zip file. ## Files FSD50K can be downloaded as a series of zip files with the following directory structure: <div class="highlight"><pre><span></span>root │ └───clips/ Audio clips │ │ │ └─── dev/ Audio clips in the dev set │ │ │ └─── eval/ Audio clips in the eval set │ └───labels/ Files for FSD50K's ground truth │ │ │ └─── dev.csv Ground truth for the dev set │ │ │ └─── eval.csv Ground truth for the eval set │ │ │ └─── vocabulary.csv List of 200 sound classes in FSD50K │ └───metadata/ Files for additional metadata │ │ │ └─── class_info_FSD50K.json Metadata about the sound classes │ │ │ └─── dev_clips_info_FSD50K.json Metadata about the dev clips │ │ │ └─── eval_clips_info_FSD50K.json Metadata about the eval clips │ │ │ └─── pp_pnp_ratings_FSD50K.json PP/PNP ratings │ │ │ └─── collection/ Files for the *sound collection* format │ │ └───README.md The dataset description file that you are reading │ └───LICENSE-DATASET License of the FSD50K dataset as an entity </pre></div> Each row (i.e. audio clip) of `dev.csv` contains the following information: - `fname`: the file name without the `.wav` extension, e.g., the fname `64760` corresponds to the file `64760.wav` in disk. This number is the Freesound id. We always use Freesound ids as filenames. - `labels`: the class labels (i.e., the ground truth). Note these class labels are *smeared*, i.e., the labels have been propagated in the upwards direction to the root of the ontology. More details about the label smearing process can be found in Appendix D of our paper. - `mids`: the Freebase identifiers corresponding to the class labels, as defined in the <a href="https://github.com/audioset/ontology/blob/master/ontology.json">AudioSet Ontology specification</a> - `split`: whether the clip belongs to *train* or *val* (see paper for details on the proposed split) Rows in `eval.csv` follow the same format, except that there is no `split` column. **NOTE:** We use a slightly different format than AudioSet for the naming of class labels in order to avoid potential problems with spaces, commas, etc. Example: we use `Accelerating_and_revving_and_vroom` instead of the original `Accelerating, revving, vroom`. You can go back to the original AudioSet naming using the information provided in `vocabulary.csv` (class label and mid for the 200 classes of FSD50K) and the <a href="https://github.com/audioset/ontology/blob/master/ontology.json">AudioSet Ontology specification</a>. ### Files with additional metadata (metadata/) To allow a variety of analysis and approaches with FSD50K, we provide the following metadata: 1. `class_info_FSD50K.json`: python dictionary where each entry corresponds to one sound class and contains: `FAQs` utilized during the annotation of the class, `examples` (representative audio clips), and `verification_examples` (audio clips presented to raters during annotation as a quality control mechanism). Audio clips are described by the Freesound id. **NOTE:** It may be that some of these examples are not included in the FSD50K release. 2. `dev_clips_info_FSD50K.json`: python dictionary where each entry corresponds to one dev clip and contains: title, description, tags, clip license, and the uploader name. All these metadata are provided by the uploader. 3. `eval_clips_info_FSD50K.json`: same as before, but with eval clips. 4. `pp_pnp_ratings.json`: python dictionary where each entry corresponds to one clip in the dataset and contains the PP/PNP ratings for the labels associated with the clip. More specifically, these ratings are gathered for the labels validated in **the validation task** (Sec. 3 of paper). This file includes 59,485 labels for the 51,197 clips in FSD50K. Out of these labels: - 56,095 labels have inter-annotator agreement (PP twice, or PNP twice). Each of these combinations can be occasionally accompanied by other (non-positive) ratings. - 3390 labels feature other rating configurations such as *i)* only one PP rating and one PNP rating (and nothing else). This can be considered inter-annotator agreement at the ``Present” level; *ii)* only one PP rating (and nothing else); *iii)* only one PNP rating (and nothing else). Ratings' legend: PP=1; PNP=0.5; U=0; NP=-1. **NOTE:** The PP/PNP ratings have been provided in the *validation* task. Subsequently, a subset of these clips corresponding to the eval set was exhaustively labeled in the *refinement* task, hence receiving additional labels in many cases. For these eval clips, you might want to check their labels in `eval.csv` in order to have more info about their audio content (see Sec. 3 for details). 5. `collection/`: This folder contains metadata for what we call the ***sound collection format***. This format consists of the raw annotations gathered, featuring all generated class labels without any restriction. We provide the *collection* format to make available some annotations that do not appear in the FSD50K *ground truth* release. This typically happens in the case of classes for which we gathered human-provided annotations, but that were discarded in the FSD50K release due to data scarcity (more specifically, they were merged with their parents). In other words, the main purpose of the `collection` format is to make available annotations for tiny classes. The format of these files in analogous to that of the files in `FSD50K.ground_truth/`. A couple of examples show the differences between **collection** and **ground truth** formats: `clip`: `labels_in_collection` -- `labels_in_ground_truth` `51690`: `Owl` -- `Bird,Wild_Animal,Animal` `190579`: `Toothbrush,Electric_toothbrush` -- `Domestic_sounds_and_home_sounds` In the first example, raters provided the label `Owl`. However, due to data scarcity, `Owl` labels were merged into their parent `Bird`. Then, labels `Wild_Animal,Animal` were added via label propagation (smearing). The second example shows one of the most extreme cases, where raters provided the labels `Electric_toothbrush,Toothbrush`, which both had few data. Hence, they were merged into Toothbrush's parent, which unfortunately is `Domestic_sounds_and_home_sounds` (a rather vague class containing a variety of children sound classes). **NOTE:** Labels in the collection format are not smeared. **NOTE:** While in FSD50K's ground truth the vocabulary encompasses 200 classes (common for dev and eval), since the *collection* format is composed of raw annotations, the vocabulary here is much larger (over 350 classes), and it is slightly different in dev and eval. For further questions, please contact eduardo.fonseca@upf.edu, or join the <a href="https://groups.google.com/g/freesound-annotator">freesound-annotator Google Group</a>. ## Download Clone this repository: ``` git clone https://huggingface.co/Fhrozen/FSD50k ``` ## Baseline System Several baseline systems for FSD50K are available at <a href="https://github.com/edufonseca/FSD50K_baseline">https://github.com/edufonseca/FSD50K_baseline</a>. The experiments are described in Sec 5 of our paper. ## References and links [1] Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. "Audio set: An ontology and human-labeled dataset for audio events." In Proceedings of the International Conference on Acoustics, Speech and Signal Processing, 2017. [<a href="https://ai.google/research/pubs/pub45857">PDF</a>] [2] Eduardo Fonseca, Jordi Pons, Xavier Favory, Frederic Font, Dmitry Bogdanov, Andres Ferraro, Sergio Oramas, Alastair Porter, and Xavier Serra. "Freesound Datasets: A Platform for the Creation of Open Audio Datasets." In Proceedings of the International Conference on Music Information Retrieval, 2017. [<a href="https://repositori.upf.edu/bitstream/handle/10230/33299/fonseca_ismir17_freesound.pdf">PDF</a>] Companion site for FSD50K: <a href="https://annotator.freesound.org/fsd/release/FSD50K/">https://annotator.freesound.org/fsd/release/FSD50K/</a> Freesound Annotator: <a href="https://annotator.freesound.org/">https://annotator.freesound.org/</a> Freesound: <a href="https://freesound.org">https://freesound.org</a> Eduardo Fonseca's personal website: <a href="http://www.eduardofonseca.net/">http://www.eduardofonseca.net/</a> More datasets collected by us: <a href="http://www.eduardofonseca.net/datasets/">http://www.eduardofonseca.net/datasets/</a> ## Acknowledgments The authors would like to thank everyone who contributed to FSD50K with annotations, and especially Mercedes Collado, Ceren Can, Rachit Gupta, Javier Arredondo, Gary Avendano and Sara Fernandez for their commitment and perseverance. The authors would also like to thank Daniel P.W. Ellis and Manoj Plakal from Google Research for valuable discussions. This work is partially supported by the European Union’s Horizon 2020 research and innovation programme under grant agreement No 688382 <a href="https://www.audiocommons.org/">AudioCommons</a>, and two Google Faculty Research Awards <a href="https://ai.googleblog.com/2018/03/google-faculty-research-awards-2017.html">2017</a> and <a href="https://ai.googleblog.com/2019/03/google-faculty-research-awards-2018.html">2018</a>, and the Maria de Maeztu Units of Excellence Programme (MDM-2015-0502).
[ -0.6206883788108826, -0.07139652222394943, 0.188967764377594, 0.19172713160514832, -0.2610682249069214, -0.09601061046123505, -0.41267403960227966, -0.48952361941337585, 0.5161257386207581, 0.507869303226471, -0.929392397403717, -0.8172128796577454, -0.25983726978302, 0.03962727263569832, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
SetFit/amazon_massive_scenario_bn-BD
SetFit
2022-05-06T08:59:30Z
12
0
null
[ "region:us" ]
2022-05-06T08:59:30Z
2022-05-06T08:59:27.000Z
2022-05-06T08:59:27
Entry not found
[ -0.32276469469070435, -0.22568407654762268, 0.8622258901596069, 0.434614896774292, -0.5282987952232361, 0.7012966275215149, 0.7915717363357544, 0.07618635147809982, 0.7746022939682007, 0.25632190704345703, -0.7852814793586731, -0.22573821246623993, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
ufukhaman/uspto_balanced_200k_ipc_classification
ufukhaman
2023-11-20T03:16:38Z
12
0
null
[ "task_categories:text-classification", "task_ids:topic-classification", "annotations_creators:USPTO", "size_categories:100K<n<1M", "source_datasets:USPTO", "language:en", "license:mit", "patent", "refined_patents", "patent classification", "uspto", "ipc", "region:us" ]
2023-11-20T03:16:38Z
2022-05-08T16:50:41.000Z
2022-05-08T16:50:41
--- annotations_creators: - USPTO language: - en license: - mit pretty_name: uspto_balanced_filtered_200k_ipc_patents size_categories: - 100K<n<1M source_datasets: - USPTO tags: - patent - refined_patents - patent classification - uspto - ipc task_categories: - text-classification task_ids: - topic-classification ---
[ -0.128533735871315, -0.18616747856140137, 0.6529128551483154, 0.4943627715110779, -0.19319336116313934, 0.2360745221376419, 0.3607197701931, 0.05056330934166908, 0.5793653130531311, 0.740013837814331, -0.6508103013038635, -0.23783954977989197, -0.7102248668670654, -0.04782583937048912, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
kejian/pile-severetoxic-balanced
kejian
2022-05-09T04:32:05Z
12
0
null
[ "region:us" ]
2022-05-09T04:32:05Z
2022-05-09T04:31:31.000Z
2022-05-09T04:31:31
Entry not found
[ -0.32276469469070435, -0.22568407654762268, 0.8622258901596069, 0.434614896774292, -0.5282987952232361, 0.7012966275215149, 0.7915717363357544, 0.07618635147809982, 0.7746022939682007, 0.25632190704345703, -0.7852814793586731, -0.22573821246623993, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
milesbutler/consumer_complaints
milesbutler
2022-05-09T21:27:44Z
12
0
null
[ "license:mit", "region:us" ]
2022-05-09T21:27:44Z
2022-05-09T21:21:32.000Z
2022-05-09T21:21:32
--- license: mit --- This Dataset is from Kaggle. It originally comes from the US Consumer Finance Complaints. This is great dataset for NLP multi-class classification.
[ -0.19501937925815582, -0.546637773513794, -0.22371110320091248, 0.19241295754909515, 0.2986895442008972, 0.30316704511642456, -0.06477750092744827, -0.6907111406326294, 0.2843121588230133, 0.7481766939163208, -0.5741304159164429, -0.4534358084201813, -0.4301062226295471, -0.265856415033340...
null
null
null
null
null
null
null
null
null
null
null
null
null
bigscience-data/roots_indic-hi_iitb_english_hindi_corpus
bigscience-data
2022-12-12T11:08:44Z
12
0
null
[ "language:hi", "license:cc-by-nc-sa-4.0", "region:us" ]
2022-12-12T11:08:44Z
2022-05-18T09:16:01.000Z
2022-05-18T09:16:01
--- language: hi license: cc-by-nc-sa-4.0 extra_gated_prompt: 'By accessing this dataset, you agree to abide by the BigScience Ethical Charter. The charter can be found at: https://hf.co/spaces/bigscience/ethical-charter' extra_gated_fields: I have read and agree to abide by the BigScience Ethical Charter: checkbox --- ROOTS Subset: roots_indic-hi_iitb_english_hindi_corpus # IITB English-Hindi Corpus - Dataset uid: `iitb_english_hindi_corpus` ### Description The IIT Bombay English-Hindi corpus contains parallel corpus for English-Hindi as well as monolingual Hindi corpus collected from a variety of existing sources and corpora developed at the Center for Indian Language Technology, IIT Bombay over the years. This corpus has been used at the Workshop on Asian Language Translation Shared Task since 2016 the Hindi-to-English and English-to-Hindi languages pairs and as a pivot language pair for the Hindi-to-Japanese and Japanese-to-Hindi language pairs. ### Homepage https://www.cfilt.iitb.ac.in/iitb_parallel/ ### Licensing - non-commercial use - cc-by-nc-nd-4.0: Creative Commons Attribution Non Commercial No Derivatives 4.0 International ### Speaker Locations - Southern Asia - India - Pakistan ### Sizes - 0.6512 % of total - 28.5802 % of indic-hi ### BigScience processing steps #### Filters applied to: indic-hi - dedup_document - dedup_template_soft - filter_remove_empty_docs - filter_small_docs_bytes_300
[ -0.5238705277442932, -0.501654326915741, 0.04703569784760475, 0.6135655641555786, -0.3757167160511017, 0.5125410556793213, -0.3754207193851471, -0.12310531735420227, 0.4284205138683319, 0.15225857496261597, -0.653892993927002, -0.5048021674156189, -0.6718685626983643, 0.5868086814880371, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
bigscience-data/roots_pt_wikipedia
bigscience-data
2022-12-12T11:15:43Z
12
0
null
[ "language:pt", "license:cc-by-sa-3.0", "region:us" ]
2022-12-12T11:15:43Z
2022-05-18T09:19:00.000Z
2022-05-18T09:19:00
--- language: pt license: cc-by-sa-3.0 extra_gated_prompt: 'By accessing this dataset, you agree to abide by the BigScience Ethical Charter. The charter can be found at: https://hf.co/spaces/bigscience/ethical-charter' extra_gated_fields: I have read and agree to abide by the BigScience Ethical Charter: checkbox --- ROOTS Subset: roots_pt_wikipedia # wikipedia - Dataset uid: `wikipedia` ### Description ### Homepage ### Licensing ### Speaker Locations ### Sizes - 3.2299 % of total - 4.2071 % of en - 5.6773 % of ar - 3.3416 % of fr - 5.2815 % of es - 12.4852 % of ca - 0.4288 % of zh - 0.4286 % of zh - 5.4743 % of indic-bn - 8.9062 % of indic-ta - 21.3313 % of indic-te - 4.4845 % of pt - 4.0493 % of indic-hi - 11.3163 % of indic-ml - 22.5300 % of indic-ur - 4.4902 % of vi - 16.9916 % of indic-kn - 24.7820 % of eu - 11.6241 % of indic-mr - 9.8749 % of id - 9.3489 % of indic-pa - 9.4767 % of indic-gu - 24.1132 % of indic-as - 5.3309 % of indic-or ### BigScience processing steps #### Filters applied to: en - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_1024 #### Filters applied to: ar - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: fr - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_1024 #### Filters applied to: es - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_1024 #### Filters applied to: ca - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_1024 #### Filters applied to: zh #### Filters applied to: zh #### Filters applied to: indic-bn - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-ta - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-te - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: pt - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-hi - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-ml - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-ur - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: vi - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-kn - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: eu - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs #### Filters applied to: indic-mr - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: id - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-pa - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-gu - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-as - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs #### Filters applied to: indic-or - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs
[ -0.6962971091270447, -0.5988854169845581, 0.35326462984085083, 0.19124259054660797, -0.2220754325389862, -0.09201017767190933, -0.22527462244033813, -0.15435044467449188, 0.6995663046836853, 0.33350563049316406, -0.8147599101066589, -0.9191403985023499, -0.7183430194854736, 0.5040152072906...
null
null
null
null
null
null
null
null
null
null
null
null
null
HuggingFaceM4/yttemporal180m
HuggingFaceM4
2022-05-24T12:25:22Z
12
2
null
[ "license:other", "region:us" ]
2022-05-24T12:25:22Z
2022-05-19T12:25:39.000Z
2022-05-19T12:25:39
--- license: other ---
[ -0.1285335123538971, -0.1861683875322342, 0.6529128551483154, 0.49436232447624207, -0.19319400191307068, 0.23607441782951355, 0.36072009801864624, 0.05056373029947281, 0.5793656706809998, 0.7400146722793579, -0.650810182094574, -0.23784008622169495, -0.7102247476577759, -0.0478255338966846...
null
null
null
null
null
null
null
null
null
null
null
null
null
trashsock/hands-images
trashsock
2022-05-24T07:03:34Z
12
5
null
[ "license:gpl-3.0", "region:us" ]
2022-05-24T07:03:34Z
2022-05-24T06:33:07.000Z
2022-05-24T06:33:07
--- license: gpl-3.0 ---
[ -0.1285335123538971, -0.1861683875322342, 0.6529128551483154, 0.49436232447624207, -0.19319400191307068, 0.23607441782951355, 0.36072009801864624, 0.05056373029947281, 0.5793656706809998, 0.7400146722793579, -0.650810182094574, -0.23784008622169495, -0.7102247476577759, -0.0478255338966846...
null
null
null
null
null
null
null
null
null
null
null
null
null
GEM/squality
GEM
2022-10-25T12:58:23Z
12
1
null
[ "task_categories:summarization", "annotations_creators:crowd-sourced", "language_creators:unknown", "multilinguality:unknown", "size_categories:unknown", "source_datasets:original", "language:en", "license:cc-by-4.0", "arxiv:2205.11465", "arxiv:2112.07637", "arxiv:2104.05938", "region:us" ]
2022-10-25T12:58:23Z
2022-05-29T16:40:50.000Z
2022-05-29T16:40:50
--- annotations_creators: - crowd-sourced language_creators: - unknown language: - en license: - cc-by-4.0 multilinguality: - unknown size_categories: - unknown source_datasets: - original task_categories: - summarization task_ids: [] pretty_name: squality --- # Dataset Card for GEM/squality ## Dataset Description - **Homepage:** https://github.com/nyu-mll/SQuALITY - **Repository:** https://github.com/nyu-mll/SQuALITY/data - **Paper:** https://arxiv.org/abs/2205.11465 - **Leaderboard:** N/A - **Point of Contact:** Alex Wang ### Link to Main Data Card You can find the main data card on the [GEM Website](https://gem-benchmark.com/data_cards/squality). ### Dataset Summary SQuALITY (Summarization-format QUestion Answering with Long Input Texts, Yes!) is a summarization dataset that is: * Abstractive * Long-input: The input document are short stories between 3000--6000 words. * Question-focused: Each story is associated with multiple question-summary pairs. * Multi-reference: Each question is paired with 4 summaries. * High-quality: The summaries are crowdsourced from skilled and trained writers. You can load the dataset via: ``` import datasets data = datasets.load_dataset('GEM/squality') ``` The data loader can be found [here](https://huggingface.co/datasets/GEM/squality). #### website [Github](https://github.com/nyu-mll/SQuALITY) #### paper [ArXiv](https://arxiv.org/abs/2205.11465) #### authors Alex Wang (NYU); Angelica Chen (NYU); Richard Yuanzhe Pang (NYU); Nitish Joshi (NYU); Samuel R. Bowman (NYU) ## Dataset Overview ### Where to find the Data and its Documentation #### Webpage <!-- info: What is the webpage for the dataset (if it exists)? --> <!-- scope: telescope --> [Github](https://github.com/nyu-mll/SQuALITY) #### Download <!-- info: What is the link to where the original dataset is hosted? --> <!-- scope: telescope --> [Github](https://github.com/nyu-mll/SQuALITY/data) #### Paper <!-- info: What is the link to the paper describing the dataset (open access preferred)? --> <!-- scope: telescope --> [ArXiv](https://arxiv.org/abs/2205.11465) #### BibTex <!-- info: Provide the BibTex-formatted reference for the dataset. Please use the correct published version (ACL anthology, etc.) instead of google scholar created Bibtex. --> <!-- scope: microscope --> ``` @article{wang2022squality, title={S{Q}u{ALITY}: Building a Long-Document Summarization Dataset the Hard Way}, author={Wang, Alex and Pang, Richard Yuanzhe and Chen, Angelica and Phang, Jason and Bowman, Samuel R.}, journal={arXiv preprint 2205.11465}, year={2022} } ``` #### Contact Name <!-- quick --> <!-- info: If known, provide the name of at least one person the reader can contact for questions about the dataset. --> <!-- scope: periscope --> Alex Wang #### Contact Email <!-- info: If known, provide the email of at least one person the reader can contact for questions about the dataset. --> <!-- scope: periscope --> wangalexc@gmail.com #### Has a Leaderboard? <!-- info: Does the dataset have an active leaderboard? --> <!-- scope: telescope --> no ### Languages and Intended Use #### Multilingual? <!-- quick --> <!-- info: Is the dataset multilingual? --> <!-- scope: telescope --> no #### Covered Dialects <!-- info: What dialects are covered? Are there multiple dialects per language? --> <!-- scope: periscope --> stories: 1930--1970 American English summaries: modern American English #### Covered Languages <!-- quick --> <!-- info: What languages/dialects are covered in the dataset? --> <!-- scope: telescope --> `English` #### Whose Language? <!-- info: Whose language is in the dataset? --> <!-- scope: periscope --> stories: 1930--1970 American science fiction writers (predominantly American men) summaries: Upwork writers (college-educated, native-English) and NYU undergraduates (English-fluent college students) #### License <!-- quick --> <!-- info: What is the license of the dataset? --> <!-- scope: telescope --> cc-by-4.0: Creative Commons Attribution 4.0 International #### Intended Use <!-- info: What is the intended use of the dataset? --> <!-- scope: microscope --> summarization research #### Primary Task <!-- info: What primary task does the dataset support? --> <!-- scope: telescope --> Summarization #### Communicative Goal <!-- quick --> <!-- info: Provide a short description of the communicative goal of a model trained for this task on this dataset. --> <!-- scope: periscope --> Given a question about a particular high-level aspect of a short story, provide a summary about that aspect in the story (e.g., plot, character relationships, setting, theme, etc.). ### Credit #### Curation Organization Type(s) <!-- info: In what kind of organization did the dataset curation happen? --> <!-- scope: telescope --> `academic` #### Curation Organization(s) <!-- info: Name the organization(s). --> <!-- scope: periscope --> New York University #### Dataset Creators <!-- info: Who created the original dataset? List the people involved in collecting the dataset and their affiliation(s). --> <!-- scope: microscope --> Alex Wang (NYU); Angelica Chen (NYU); Richard Yuanzhe Pang (NYU); Nitish Joshi (NYU); Samuel R. Bowman (NYU) #### Funding <!-- info: Who funded the data creation? --> <!-- scope: microscope --> Eric and Wendy Schmidt; Apple; NSF #### Who added the Dataset to GEM? <!-- info: Who contributed to the data card and adding the dataset to GEM? List the people+affiliations involved in creating this data card and who helped integrate this dataset into GEM. --> <!-- scope: microscope --> Alex Wang (NYU) ### Dataset Structure #### Data Fields <!-- info: List and describe the fields present in the dataset. --> <!-- scope: telescope --> * metadata: Project Gutenberg ID, internal UID, Project Gutenberg license * document: the story * questions: a list where each element contains * question text: the question * question number: the order in which workers answered the question * responses: a list where each element contains * worker ID: anonymous * internal UID * response text: the response #### Reason for Structure <!-- info: How was the dataset structure determined? --> <!-- scope: microscope --> The dataset is arranged with responses grouped by question (for ease of multi-reference training and evaluation) and questions grouped by story (to avoid duplicating the story in the dataset) #### Example Instance <!-- info: Provide a JSON formatted example of a typical instance in the dataset. --> <!-- scope: periscope --> ``` {"metadata": {"passage_id": "63833", "uid": "ea0017c487a245668698cf527019b2b6", "license": ""}, "document": "Story omitted for readability", "questions": [{"question_text": "What is the plot of the story?", "question_number": 1, "responses": [{"worker_id": "6", "uid": "0c27bef1b7b644ffba735fdb005f9529", "response_text": "Brevet Lieutenant Commander David Farragut Stryakalski III, AKA Strike, is charged with commanding a run-down and faulty vessel, the Aphrodite. Aphrodite was the brain-child of Harlan Hendricks, an engineer who ushered in new technology ten years back. All three of his creations failed spectacularly, resulting in death and a failed career. The Aphrodite was the only ship to survive, and she is now used for hauling mail back and forth between Venus and Mars.\nStrike and Cob, the Aphrodite\u2019s only executive to last more than six months, recount Strike\u2019s great failures and how he ended up here. He used to fly the Ganymede, but was removed after he left his position to rescue colonists who didn\u2019t need rescuing. Strike was no longer trustworthy in Admiral Gorman\u2019s eyes, so he banished him to the Aphrodite. \nThe circuit that caused the initial demise of Aphrodite was sealed off. After meeting some members of his crew, Strike orders a conference for all personnel and calls in an Engineering Officer, one I.V. Hendricks. \nAfter Lieutenant Ivy Hendricks arrives--not I.V.--Strike immediately insults her by degrading the ship\u2019s designer, Harlan Hendricks. As it turns out, Hendricks is his daughter, and she vows to prove him wrong and all those who doubted her father. \nDespite their initial conflict, Strike and Hendricks\u2019 relationship soon evolves from resentment to respect. During this time, Strike\u2019s confidence in the Aphrodite plummets as she suffers from mechanical issues. \nThe Aphrodite starts to heat up as they get closer to the sun. The refrigeration units could not handle the heat, causing discomfort among the crew. As they get closer, a radar contact reveals that two dreadnaughts, the Lachesis and the Atropos, are doing routine patrolling. Nothing to worry about, except the Atropos had Admiral Gorman on board, hated by Strike and Hendricks.\nStrike and Hendricks make a joke about Gorman falling into the sun. As the temperature steadily climbs, the crew members overheat and begin fighting, resulting in a black eye. A distress signal came through from the Lachesis: the Atropos, with Gorman on board, was tumbling into the sun. The Lachesis was attempting to rescue them with an unbreakable cord, but they too were being pulled in. \nHendricks had fixed the surge-circuit rheostat, the one her father designed, and claimed it could help them rescue the ships. After some tension, Strike agrees and they race down to the sun to pick up the drifting dreadnaughts. \nStrike puts Hendricks in charge, but soon the heat overtakes her, and she is unable to continue. Strike takes over, attaches the Aphrodite to the Lachesis with a cord, and turns on the surge-circuit. They blast themselves out of there, rescuing the two ships and Admiral Gorman at the same time. \nCob and Strike are awarded Spatial Cross awards, while Hendricks is promoted to an engineering position at the Bureau of Ships. The story ends with Cob and Strike flipping through the pages of an address book until they land on Canalopolis, Mars. \n"}, {"worker_id": "1", "uid": "04e79312dede4a0da5993101e55a796a", "response_text": "Strike joins the crew of the Aphrodite after he has made several poor decisions while he was the captain of another spaceship. He is essentially being punished by his boss, Gorman, and put somewhere where he can do little harm. His job is to deliver the mail from Venus to Mars, so it\u2019s pretty straightforward. \n\nWhen he meets the Officer of the Deck, Celia Graham, he immediately becomes uncomfortable. He does not like to work with women in space, although it\u2019s a pretty common occurrence. He holds a captain\u2019s meeting the first day on the job, and he waits to meet his Engineering Officer, I.V. Hendricks. He makes a rude comment about how the man is late for his first meeting, but actually, the female Ivy has already shown up. \n\nAfter meeting Ivy formally, he makes a comment about how the ship Aphrodite was built by an imbecile. Ivy immediately tells him that he\u2019s wrong, and she knows this because the designer of the ship was none other than her own father. \n\nHis first week as captain on the new ship goes very poorly. Several repairs need to be done to Aphrodite, they run behind schedule, and the new crew members have a tough time getting a handle on Aphrodite\u2019s intricacies. \n\nThe heat index in the ship begins to rise, and the crew members can no longer wear their uniforms without fainting. Suddenly a distress call comes in, and it\u2019s coming from the Atropos, a ship Captained by Gorman, and the Lachesis. The crew members hesitate to take the oldest and most outdated machinery on a rescue trip. Strike has been in trouble for refusing to follow commands before, and he knows it\u2019s a risky move. However, Ivy insists that she knows how to pilot the Aphrodite, and she can save the crew members on the Atropos and the Lachesis from death. They are quickly tumbling towards the sun, and they will perish if someone doesn\u2019t do something quickly. \n\nIvy takes control of the ship, and the heat on the Aphrodite continues to rise steadily. Eventually, she faints from pure heat exhaustion, and she tells Strike that he must take over. He does, and he manages to essentially lasso the other two ships, and with just the right amount of power, he pulls them back into orbit. \n\nAt a bar, after the whole ordeal, Cob pokes fun at Strike for staying on the Aphrodite. He then admits that he actually respects Strike\u2019s loyalty to the ship that saved his reputation. Cob asks about Strike\u2019s relationship with Ivy, but Strike tells him that she has taken her dad\u2019s former job, so she no longer works with him. Strike takes the moment to look up her info, presumably to restart the relationship. \n"}, {"worker_id": "5", "uid": "71efb8636b504f42a6989bb90e360186", "response_text": "The narrative follows commander Strike as he begins his command of the spaceship Aphrodite. Strike comes from a long line of military greats but himself is prone to poor professional decision making.\n\nAs he takes command, the mission is a simple mail run. However, in the course of their journey, they receive word of two ships in dire need of rescue. Strike and his engineering officer, Ivy Hendricks, decide to use the ships extremely risky surge-circuit to aid the ships.\n\nThe rescue is a success and the crew is hailed for its bravery in saving the doomed vessels. "}, {"worker_id": "3", "uid": "8aa46ba8bd2945c98babd7dd2d9ecc38", "response_text": "The story starts in a muddy swamp on Venus, where Strike, a Brevet Lieutenant Commander, is encountering his new ship, the Aphrodite, for the first time. Here on Venusport Base, he is introduced to the executive officer of the ship, a man who goes by Cob. Strike comes from a line of servicemen who were all well respected, but he himself has more of a reputation for causing trouble by saying the wrong things or deviating from mission plans. His reputation preceded him, as Cob had specific questions about some of these events. The Aphrodite was incredibly impressive when it was designed, but did not live up to its expectations. It had been refitted, and the new mission that Strike was to lead was a mail run between Venus and Mars. As he entered the ship, Strike began to meet his new crew, including Celia Graham, his Radar Officer. Strike is not used to women being on ships and is decidedly uncomfortable with the idea. As he is briefing the officers who were already present, Strike is surprised when he meets his new engineering officer, Ivy Hendricks. Ivy is the daughter of the man who designed the ship, and she is cold to Strike at first, as he is to her. However, her expertise in engineering generally, the ship specifically, and other skills as well as piloting, meant that Strike warmed up to her as their mission went on. As the ship was flying towards Mars on their route, the crew picked up a distress signal from the Lachesis, which was trying to pull the Atropos away from the gravitational pull of the sun after it was damaged in an equipment malfunction. The Admiral who had put Strike in charge of the Aphrodite was on the Atropos, and Ivy dislikes him even more than Strike does, but they know they have to try to save the crews. Strike is hesitant, but Ivy has a plan and insists that they try. She has spent all of her free time tinkering with the circuits, and takes charge. She turned the Aphrodite towards the ships in danger, and sends out a cable to connect the Aphrodite to those ships. After they are all connected, the ships continue to spin towards the sun, which causes Ivy to pass out, leaving Strike in charge. He manages to pull the ships into line and send the Aphrodite in the right direction before passing out himself. The Aphrodite has the power to pull everyone away from the Sun\u2019s gravity, but the acceleration knocks everyone out on all three ships. In the end, it was a successful rescue mission of multiple crews. Strike and Cob find themselves in an officer\u2019s club at the end of the story, discussing Ivy\u2019s new job, and Strike acknowledges that Cob is right about the Aphrodite having grown on him, and plans to stay its captain."}]}, {"question_text": "Who is Ivy Hendricks and what happens to her throughout the story?", "question_number": 2, "responses": [{"worker_id": "6", "uid": "0c27bef1b7b644ffba735fdb005f9529", "response_text": "Lieutenant Ivy Hendricks is the daughter of Harlan Hendricks, a formerly respected engineer. He created the surge-circuit, an innovation in interstellar astrogation, and he was awarded a Legion of Merit. He designed three famous ships: the Artemis, the Andromeda, and the Aphrodite, the prototype. Despite being hailed as the latest and greatest in technology, all three ships either exploded or failed. \nAccording to Lieutenant Ivy Hendricks, their failures were due to the lack of education on board. She claimed that her father asked for the crew members to be trained in surge-circuit technology, so they could use it properly and correctly. That wish was not granted and after all three ships failed, his reputation and career were doomed. Admiral Gorman pulled the plug on his career and therefore became the target of all Lieutenant Hendricks\u2019 hate. \nWith a bone to pick, Lieutenant Hendricks, a knowledgeable engineer herself, comes aboard the Aphrodite to serve as her engineer and occasional pilot. She wants to prove to the world that her father\u2019s creation was genius and deserving of praise. \nAlthough they started off on the wrong foot, Lieutenant Hendricks and Strike, her commander, develop a friendship and appreciation for each other. They bond over their deep hatred of Admiral Gorman and the joy of piloting a ship. She soon proves herself to Strike, and he begins to trust her. Their relationship walks the fine line between friendship and romance. \nAs the Aphrodite is attempting to rescue the fallen dreadnaughts, Lieutenant Hendricks comes up with the solution. Due to her constant tinkering on the ship, she had fixed the surge-circuit rheostat and made it ready to use. Initially, no one trusts her, seeing as the last time it was used people died. But Strike\u2019s trust in her is strong and true, so he approves the use of the surge-circuit. Hendricks pilots the ship, but soon becomes too overheated and comes close to fainting. Strike takes over piloting and eventually activates the surge-circuit. It works and they are able to rescue the two ships, one of which had Admiral Gorman, her sworn enemy, onboard. \nLieutenant Hendricks receives a major promotion; she is now an engineer at the Bureau of Ships. She proved them wrong, and restored her father\u2019s legacy and good name. The story ends with their romance left in the air, but Hendricks has much to be proud of. \n"}, {"worker_id": "1", "uid": "04e79312dede4a0da5993101e55a796a", "response_text": "\nLieutenant Ivy Hendricks is the new Engineering Officer on Aphrodite. Strike and Cob assume that Ivy is a man before she arrives because they are sexist and because her name is listed as I.V. in the orders. Ivy is actually the daughter of the man who designed the award-winning craft.\n\nShe is cold and unfriendly towards Strike after she meets him, and that\u2019s probably because he makes a rude comment about the ship which her father created. After a couple weeks of working together, the two begin to get along very well. Strike admires Ivy\u2019s piloting skills and her depth of knowledge about the Aphrodite. \n\nThe two also bond over their shared hatred of Strike\u2019s former boss, Gorman. Strike feels as though he has ruined his career, and Ivy thinks that Gorman torpedoed her father\u2019s career. Ivy wants nothing more than to prove that Gorman is an idiot. \n\nHowever, when Gorman\u2019s ship is hurtling towards the sun and he and his crew members are about to die, Ivy sees that it\u2019s the perfect opportunity to show Gorman just how wrong he was about the ship her father designed. It\u2019s a very dangerous mission, but Ivy is steadfast in her decision and she\u2019s deeply courageous. She pilots the ship for most of the rescue mission, but eventually faints from the extreme heat. She tells Strike that he needs to take over, and he does a great job. \n\nIvy is then promoted, and she moves to Canalopolis, Mars. She now outranks her former Captain, Strike. \n"}, {"worker_id": "5", "uid": "71efb8636b504f42a6989bb90e360186", "response_text": "Ivy Hendricks is the engineering officer assigned to the Aphrodite. She is the daughter of Harlan Hendricks, the ship's original designer. She is fiercely protective of her father's legacy and resents Admiral Gorman for the way he treated him.\n\nHendricks and Strike, form an alliance of sorts after his initial surprise of seeing a woman assigned to this officer's role. When news arrives that two ships are in danger of falling into the sun, Ivy lobbies to use her father's technology to save the ship. Strike agrees to her plan although the risks are high. The Aphrodite eventually saves the ships although Ivy faints in the process from the heat and command has to be taken over by Strike.\n\nThe successful mission results in a promotion for Ivy as she works as a designer in the Bureau of Ships like her father."}, {"worker_id": "3", "uid": "8aa46ba8bd2945c98babd7dd2d9ecc38", "response_text": "Ivy Hendricks is the new engineering officer on the Aphrodite, having been transferred from the Antigone. She is a tall woman with dark hair and contrasting pale blue eyes, who has a very wide range of experience in ship operations and engineering. Her father, Harlan Hendricks, was the man who designed the Aphrodite, so she knows the ship needs a lot of specific training. At first, the captain did not expect her to be a woman, and managed to imply that many people found her father incompetent. Although she seemed cold at first, as she reacted to the situation, she and the captain eventually got along fairly well, as he learned to appreciate her wide skill set that ranged from engineering to piloting. Ivy and Strike also had a common enemy in the higher ranks: Space Admiral Gorman. Once Spike trusted her he appreciated that Ivy spent a lot of spare time working on the old circuits, so she knew the ship like the back of her hand. When the Aphrodite found the Lachesis and the Atropos when following up on a distress signal, Ivy new the ship well enough to be able to formulate a plan to save everyone. She piloted the Aphrodite carefully, using cables shot with a rocket to connect the three ships together, but the spinning of the ships in the heat inside meant that she passed out and had to leave Strike to take over for her. Her plan was successful; she was promoted, and instead of returning to the Aphrodite she started a design job with the Bureau of Ships."}]}, {"question_text": "What is the relationship between Strike and Aphrodite?", "question_number": 3, "responses": [{"worker_id": "6", "uid": "0c27bef1b7b644ffba735fdb005f9529", "response_text": "Strike is a member of a famous, well-behaved, and well-trained service family. His father and grandfather served in World War II and the Atomic War, respectively. Both earned medals for their heroic service. Strike, however, did not follow in his family\u2019s footsteps. \n\tWith a tendency to say the wrong thing at the wrong time, Strike often offended those around him and garnered a negative reputation. After being put in charge of the Ganymede, he soon lost his position after abandoning his station to rescue colonists who were not in danger. As well, he accused a Martian Ambassador of being a spy at a respectable ball. Admiral Gorman soon demoted him, and he became the commander of the Aphrodite. \n\tAt first, Strike was not a fan. He sees her as ugly, fat, and cantankerous. He misses the Ganymede, a shiny and new rocketship, and views the Aphrodite as less-than. \n\tWithin the first week of flying her, the Aphrodite had a burned steering tube, which made it necessary to go into free-fall as the damage control party made repairs. Strike\u2019s faith in Lover-Girl continued to plummet. \n\tHowever, after Lieutenant Hendricks, the resident engineer, got her hands on the Aphrodite, Strike\u2019s opinion started to change. Her knowledge of the ship, engineering, and piloting helped him gain confidence in both her abilities and those of Aphrodite.\nNear the end of the story, the Aphrodite is tasked with rescuing two ships that are falling into the sun. Previously Lieutenant Hendricks had fixed up the surge-circuit rheostat, and so she offered it up as the only solution. Strike agrees to try it, which shows his faith and trust in the Aphrodite. Luckily, all things go to plan, and the Aphrodite, with Strike piloting, is able to save the two ships and Admiral Gorman. \nAfter Strike won a medal himself, finally following in the family footsteps, he is offered his old position back on the Ganymede. He refuses, and instead returns to old Lover-Girl. He has grown fond of her over the course of their adventure, and they develop a partnership. "}, {"worker_id": "1", "uid": "04e79312dede4a0da5993101e55a796a", "response_text": "Strike is completely unimpressed by the rocket ship Aphrodite. He comments that she looks like a pregnant carp, and he knows that he\u2019s been assigned captain of the ship because he messed up terribly on his other missions. \n\nAphrodite was built 10 years ago, and now she is completely outdated and a laughing stock compared to the other spaceships in the fleet. She was designed by Harlan Hendricks, and the engineer received a Legion of Merit award for her design. \n\nStrike\u2019s mission is to fly Aphrodite to take the mail from Venusport to Canalopolis, Mars. It\u2019s boring and straightforward.\n\nWhen a disaster occurs and two other ships, the Atropos and the Lachesis, are in serious danger of getting too close to the sun, Strike agrees to take the old girl on a rescue mission. He is convinced by Ivy, since she knows the ship better than anyone else and she believes in her. \n\nAlthough Ivy takes Aphrodite most of the way there, its Strike who finishes the mission and saves his former boss, Gorman, and many other people from certain death. Aphrodite is the entire reason that Strike is able to mend his terrible reputation and he wins back respect from Gorman. Although they got off to a rocky start, Strike finds it impossible to leave his best girl, even when he is offered a job on another ship. He is loyal to the ship that made him a hero. \n"}, {"worker_id": "5", "uid": "71efb8636b504f42a6989bb90e360186", "response_text": "Strike is assigned to be commander of the spaceship Aphrodite. The ship is assigned as a mail carrier for the inner part of the solar system. The Aphrodite is a dilapidated design with an awful reputation. Strike ended up with the Aphrodite as a result of a series of poor professional decisions that resulted in him getting command of the more prestigious ship Ganymede taken away from him.\n\nHis initial impression of the Aphrodite softens to a grudging respect after the successful mission to save the Atropos and Lachesis. Although he presumably is in line to command the Ganymede again, another faux pas resulting in Strike continuing to command the Aphrodite. "}, {"worker_id": "3", "uid": "8aa46ba8bd2945c98babd7dd2d9ecc38", "response_text": "At the beginning of the story, Strike is very reluctant to accept Aphrodite, because being in charge of the ship means a demotion for him. His perception of the ship at the beginning of the story is colored by this history, and his first impression of the ship is not a positive one, even from the outside. Besides the actual construction of the ship, the technology that ran it was not something he showed much faith in. The first week that he was in charge after leaving Venus, it seemed things were going drastically wrong. When one important piece of equipment burnt out, the ship went into freefall, requiring a lot of repair work from the engineers, and anyone in charge of navigation was handed more work because of this as well. The ship was really put to the test when the Aphrodite responded to the distress call from the Lachesis, whose crew was trying to keep the Atropos from falling into the sun. Because Ivy knew the Aphrodite so well, and had been working on the circuits, it turned out the Aphrodite was the perfect ship to save the day. She could not see the rescue all the way through to the end, because she passed out early, but Strike was conscious a little bit longer and took over until he also passed out. After this unexpected rescue mission, Cob, the Executive Officer, noted that Strike has a newfound appreciation for the ship, and has no intention of leaving. Strike is dedicated to his new mission, even though at the beginning of the story he wanted nothing more than to pilot something the same rank as his old ship."}]}, {"question_text": "Describe the setting of the story.", "question_number": 4, "responses": [{"worker_id": "6", "uid": "0c27bef1b7b644ffba735fdb005f9529", "response_text": "Jinx Ship to the Rescue by Alfred Coppel, Jr. takes place in space, but more specifically in the Aphrodite. \n\tIt starts in the muddy Venusport Base on Venus. Venusport is famous for its warm, slimy, and green rain that falls for 480 hours of every day. A fog rolls in and degrades visibility. \n\tDespite starting on Venusport Base, the characters actually spend most of their time onboard the Aphrodite, a Tellurian Rocket Ship. The Aphrodite had a surge-circuit monitor of twenty guns built into her frame. She was bulky, fat, and ugly, and occasionally had some technical and mechanical struggles as well. \n\tAlthough her frame may not be appealing, she soon becomes victorious as she gains the trust of Strike and other members of his crew and saves two fallen dreadnaughts. With her surge-circuit rheostat rebuilt, the Aphrodite is finally able to accomplish what she was always meant to. "}, {"worker_id": "1", "uid": "04e79312dede4a0da5993101e55a796a", "response_text": "The story starts on the planet of Venus. Venus has days that are 720 hours long, and rain is common. The rain is hot, slimy, and green, and it makes the already wet swamplands even more mushy. Fog is common on Venus.\n\nThe middle of the story takes place on the old and outdated ship, Aphrodite. She gives the crew members a lot of trouble on their first mission. She is in dire need of repairs, she\u2019s slow, and it\u2019s impossible to control her temperature. The crew members are unable to wear their uniforms because the temperature is over 100 degrees. \n\nAphrodite\u2019s mission is simple. She needs to take the mail from Venus to Mars, and it\u2019s the only thing she can be trusted to do successfully. So it\u2019s very impressive when she ends up being the hero of the day and manages to rescue two other ships that are headed towards the sun. \n"}, {"worker_id": "5", "uid": "71efb8636b504f42a6989bb90e360186", "response_text": "The narrative is set in the early 21st century primarily aboard the spaceship Aphrodite. The ship's mission is to deliver mail in the inner part of the solar system.\n\nThe ships route takes them around the sun and as a result the ambient temperature inside the ship begins to rise to intolerable levels due to proximity to the sun. Because of the heat, the coed crew is allowed to operate with very little clothing. Aphrodite is a ship of an outdated design that gives it a lack of comfort and subjects it to numerous small problems that make its operation frustrating."}, {"worker_id": "3", "uid": "8aa46ba8bd2945c98babd7dd2d9ecc38", "response_text": "The story starts at a spaceport on Venus, where it has been raining for hundreds of hours straight. The rain has stopped by the time the story starts, but it is left a lot of mud in the swampy marshes. It was nearing the end of the day, and the fog was enveloping the surroundings as it grew darker outside. It was hot and sticky at Venusport Base, but after Strike left the service on his mission in the Aphrodite, it would only grow hotter on board. The ship itself, where most of the story takes place, is an older, refitted, bulky type of ship. There were only two others like it, and their designer had been awarded a Legion of Merit for the three. However, this is the only one still in use, as the others were destroyed in a much earlier mission. Strike\u2019s disappointment in the ship seems to mirror the sentiment. Inside the ship, there are many systems of pipes connected the control panels, and the captain had to navigate carefully so that he didn\u2019t hit his head on the bulkhead. While in space, as the ship flew closer and closer to the sun, the interior of the ship grew hotter and hotter. The crew opted to wear as little clothing as possible in an attempt to handle the heat. When the Aphrodite received the distress call from the Lachesis, the ships were close enough to the sun to be affected by its gravitational pull. After the close call near the sun, once everyone regained consciousness, the story ends at an officer\u2019s club on Mars. It was a formal environment, and the Aphrodite\u2019s captain and executive officer planned the rest of their route from there."}]}, {"question_text": "Who is Strike and what happens to him throughout the story?", "question_number": 5, "responses": [{"worker_id": "6", "uid": "0c27bef1b7b644ffba735fdb005f9529", "response_text": "Strike is a member of an esteemed service family on Venus; seven generations of well-behaved and well-trained operators. Unfortunately, Strike struggles to carry on the family tradition, and is known for misspeaking and offending those around him. By trusting his gut, he wound up failing his higher-ups and crew several times. All this culminated in an eventual mistrust of Strike, which led to him being charged with the Aphrodite. \n\tHis deep hatred of Space Admiral Gordon is passionate, but not without reason. Gordon is the one who demoted him to the Aphrodite. At the start, Strike is checking out his new vessel and notes how ugly the ship is. After examining the ship and it\u2019s crew, it is revealed that Strike is uncomfortable around women and believes they don\u2019t belong on a spaceship. \n\tIn order to start flying, he calls in an expert engineer to come aboard and travel with them. Thinking I.V. Hendricks is a man, he is excited to have them onboard. But when Ivy Hendricks shows up, a female engineer and the daughter of the Aphrodite\u2019s creator, his world is soon turned upside down. \n\tHis initial negative reaction to her is soon displaced by begrudging appreciation and eventually trust and friendship. Hendricks proves his previous theories about women wrong, and Strike is forced to accept that perhaps women do belong on a spaceship. She especially impresses him with her total knowledge of spaceship engineering and the Aphrodite in general. And it helped that she hated Admiral Gorman just as much as Strike, if not more. \n\tWhile flying by the sun to deliver mail, the Aphrodite receives a distress call from two ships: the Lachesis and the Atropos, the latter of which carried Admiral Gorman onboard. After the Aphrodite reached orbit, the Lachesis reached out and reported the Atropos was falling into the sun, due to a burst chamber. They couldn\u2019t move those onboard over thanks to all the radiation, so the Lachesis was attempting to pull the Atropos back using an unbreakable cord. But it wasn\u2019t enough. \n\tSince Ivy Hendricks had fixed the surge-circuit rheostat--the feature that crashed the original Aphrodite--, they were able to save the Lachesis and the Atropos and regain some of their dignity and former glory. \n\tStrike is awarded the Spatial Cross, as well as Cob, his friend and longtime executive of the Aphrodite. Strike was asked to return to the Ganymede, a beautiful sleek ship, but allegedly said the wrong thing to Gorman, and was instead sent back to the Aphrodite. Cob believes he did it on purpose, as Strike had grown quite fond of Lover-Girl. \n\tIvy has gone to the Bureau of Ships to engineer vessels, a great upgrade from her previous job. Cob pressures Strike to reach out to her, but he refuses. However, it ends on a hopeful note, with the potential for romance between Strike and Hendricks, and even more adventures on the clunky Aphrodite. "}, {"worker_id": "1", "uid": "04e79312dede4a0da5993101e55a796a", "response_text": "Strike\u2019s real name is Brevet Lieutenant Commander David Farragut Strykalski III. After serving on the Ganymede, he is put in charge of the Aphrodite. He comes from many generations of officers. However, he doesn\u2019t feel like he fits the mold of his grandfather and great-grandfather and so on. His boss, Gorman, disagreed with several decisions he made in the past and sent him to work on the Aphrodite, the unimpressive spaceship.\n\nStrike does not like working with women in space, so he is disappointed when two of his crew members are powerful and successful females. He learns his lesson after working with Ivy Hendricks for a few weeks. She impresses him with her piloting skills and her knowledge of the ship that her father designed. \n\nStrike is skeptical at first when Ivy wants to take Aphrodite to rescue two ships whose crew members are in grave danger. He knows that the mistakes he made before got him on the Aphrodite, and there\u2019s a big chance that he\u2019ll be fired for trying to save the day, or worse, the mission could end in death for him and all of his crew members. He has feelings for Ivy, and her intense passion convinces him that she\u2019s right, Aphrodite can handle the mission and they can save those peoples\u2019 lives.\n\nIvy pilots the ship almost the entire route, but she is unable to finish the job when she passes out from the intense heat. Captain Strike takes over and saves the crews on the Atropos and the Lachesis. He is hailed as a hero, and he repairs his terrible reputation with the selfless act. He decides not to leave the Aphrodite. He wants to be loyal to the ship that worked so hard for him. He does decide to give Ivy a call. Even though she outranks him, he has to admit that he has a crush on her. "}, {"worker_id": "5", "uid": "71efb8636b504f42a6989bb90e360186", "response_text": "Strike is the commander of the Aphrodite. He was originally the commander of the prestigious Ganymede. However a number of decisions made out of bravado as well as some unprofessional comments lost him that command.\n\nNow in command of a dilapidated ship, Strike comes to terms with his job. He commands a crew including a large number of women which makes him somewhat uncomfortable. His engineering officer Ivy Hendricks in particular seems to be of romantic interest to Strike.\n\nStrike ends up teaming with Ivy to save two ships from falling into the sun earning him a small promotion but an ill-advised comment prevents him from leaving the Aphrodite, perhaps to the satisfaction of Strike himself."}, {"worker_id": "3", "uid": "8aa46ba8bd2945c98babd7dd2d9ecc38", "response_text": "Strike is a highly decorated lieutenant commander in the Navy, who comes from a long line of ship operators. Although he has run many successful missions, he has a reputation of causing trouble\u2014his new Executive Officer, Cob, has heard a number of stories that he asks Strike for details about. Strike has lost command of the ship that he had been captaining, and is sent by Admiral Gorman to captain a mail route on the Aphrodite. He is extremely hesitant to have any positive feelings about the experience, from the ship itself, to the inclusion of women on its crew. Not only is this not the type of ship he is used to, he is never served with women on board. He has to navigate adapting to the new situation while adapting to the new job. Through the first week of his assignment, the ship and its crew grow on him. He comes to trust Ivy Hendricks, the Engineering Officer, and he lets her take charge to try to save the other ships when they respond to a distress call. Eventually, she passes out, and has to leave Strike in charge of getting the ships to safety. Eventually, Strike passes out just like everyone else, from the ship\u2019s acceleration to break the sun\u2019s gravity. At the end of the story, it is clear that his increased appreciation for the ship means he plans on staying, to the delight of his Executive Officer. Cob alludes to Strike having feelings for Ivy, but he says that although she is nice, he has no interest in being with a woman with a higher ranked title than he has. "}]}]} ``` #### Data Splits <!-- info: Describe and name the splits in the dataset if there are more than one. --> <!-- scope: periscope --> train, dev, test #### Splitting Criteria <!-- info: Describe any criteria for splitting the data, if used. If there are differences between the splits (e.g., if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here. --> <!-- scope: microscope --> Stories that appear in both SQuALITY and [QuALITY](https://github.com/nyu-mll/quality) are assigned to the same split in both datasets. ## Dataset in GEM ### Rationale for Inclusion in GEM #### Why is the Dataset in GEM? <!-- info: What does this dataset contribute toward better generation evaluation and why is it part of GEM? --> <!-- scope: microscope --> The summaries in the dataset were crowdsourced, allowing us to use input documents that are easily understood by crowdworkers (as opposed to technical domains, such as scientific papers). Additionally, there is no lede bias in stories, as is typically in news articles used in benchmark summarization datasets like CNN/DM and XSum. Additionally, the dataset is multi-reference and the references for each task are highly diverse. Having a diverse set of references better represents the set of acceptable summaries for an input, and opens the door for creative evaluation methodologies using these multiple references. #### Similar Datasets <!-- info: Do other datasets for the high level task exist? --> <!-- scope: telescope --> yes #### Unique Language Coverage <!-- info: Does this dataset cover other languages than other datasets for the same task? --> <!-- scope: periscope --> no #### Difference from other GEM datasets <!-- info: What else sets this dataset apart from other similar datasets in GEM? --> <!-- scope: microscope --> The inputs (story-question pairs) are multi-reference. The questions are high-level and are written to draw from multiple parts of the story, instead of a single section of the story. ### GEM-Specific Curation #### Modificatied for GEM? <!-- info: Has the GEM version of the dataset been modified in any way (data, processing, splits) from the original curated data? --> <!-- scope: telescope --> no #### Additional Splits? <!-- info: Does GEM provide additional splits to the dataset? --> <!-- scope: telescope --> no ### Getting Started with the Task #### Pointers to Resources <!-- info: Getting started with in-depth research on the task. Add relevant pointers to resources that researchers can consult when they want to get started digging deeper into the task. --> <!-- scope: microscope --> * [original paper](https://arxiv.org/abs/2205.11465) * [modeling question-focused summarization](https://arxiv.org/abs/2112.07637) * [similar task format but different domain](https://arxiv.org/abs/2104.05938) ## Previous Results ### Previous Results #### Metrics <!-- info: What metrics are typically used for this task? --> <!-- scope: periscope --> `ROUGE`, `BERT-Score` #### Proposed Evaluation <!-- info: List and describe the purpose of the metrics and evaluation methodology (including human evaluation) that the dataset creators used when introducing this task. --> <!-- scope: microscope --> Following norms in summarization, we have evaluated with automatic evaluation metrics like ROUGE and BERTScore, but these metrics do not correlate with human judgments of summary quality when comparing model summaries (see paper for details). We highly recommend users of the benchmark use human evaluation as the primary method for evaluating systems. We present one example of such in the paper in which we ask Upwork workers to read the short story and then rate sets of three responses to each question. While this is close to the gold standard in how we would want to evaluate systems on this task, we recognize that finding workers who will read the whole story (~30m) is difficult and expensive, and doing efficient human evaluation for long document tasks is an open problem. #### Previous results available? <!-- info: Are previous results available? --> <!-- scope: telescope --> yes #### Other Evaluation Approaches <!-- info: What evaluation approaches have others used? --> <!-- scope: periscope --> Human evaluation #### Relevant Previous Results <!-- info: What are the most relevant previous results for this task/dataset? --> <!-- scope: microscope --> See paper (https://arxiv.org/abs/2205.11465) ## Dataset Curation ### Original Curation #### Sourced from Different Sources <!-- info: Is the dataset aggregated from different data sources? --> <!-- scope: telescope --> no ### Language Data #### How was Language Data Obtained? <!-- info: How was the language data obtained? --> <!-- scope: telescope --> `Crowdsourced` #### Where was it crowdsourced? <!-- info: If crowdsourced, where from? --> <!-- scope: periscope --> `Other crowdworker platform` #### Language Producers <!-- info: What further information do we have on the language producers? --> <!-- scope: microscope --> Upwork: US-born, native English speakers with backgrounds in the humanities and copywriting NYU undergraduates: English-fluent undergraduates from a diverse set of nationalities and majors #### Topics Covered <!-- info: Does the language in the dataset focus on specific topics? How would you describe them? --> <!-- scope: periscope --> The short stories are primarily science fiction and from the 1930s -- 1970s. #### Data Validation <!-- info: Was the text validated by a different worker or a data curator? --> <!-- scope: telescope --> validated by crowdworker #### Was Data Filtered? <!-- info: Were text instances selected or filtered? --> <!-- scope: telescope --> not filtered ### Structured Annotations #### Additional Annotations? <!-- quick --> <!-- info: Does the dataset have additional annotations for each instance? --> <!-- scope: telescope --> crowd-sourced #### Number of Raters <!-- info: What is the number of raters --> <!-- scope: telescope --> 11<n<50 #### Rater Qualifications <!-- info: Describe the qualifications required of an annotator. --> <!-- scope: periscope --> English-fluent, with experience reading and writing about literature #### Raters per Training Example <!-- info: How many annotators saw each training example? --> <!-- scope: periscope --> 4 #### Raters per Test Example <!-- info: How many annotators saw each test example? --> <!-- scope: periscope --> 4 #### Annotation Service? <!-- info: Was an annotation service used? --> <!-- scope: telescope --> no #### Any Quality Control? <!-- info: Quality control measures? --> <!-- scope: telescope --> validated by another rater #### Quality Control Details <!-- info: Describe the quality control measures that were taken. --> <!-- scope: microscope --> Each response was reviewed by three reviewers, who ranked the response (against two other responses), highlighted errors in the response, and provided feedback to the original response writer. ### Consent #### Any Consent Policy? <!-- info: Was there a consent policy involved when gathering the data? --> <!-- scope: telescope --> yes #### Consent Policy Details <!-- info: What was the consent policy? --> <!-- scope: microscope --> Writers were informed that their writing and reviewing would be used in the development of AI. ### Private Identifying Information (PII) #### Contains PII? <!-- quick --> <!-- info: Does the source language data likely contain Personal Identifying Information about the data creators or subjects? --> <!-- scope: telescope --> unlikely #### Any PII Identification? <!-- info: Did the curators use any automatic/manual method to identify PII in the dataset? --> <!-- scope: periscope --> no identification ### Maintenance #### Any Maintenance Plan? <!-- info: Does the original dataset have a maintenance plan? --> <!-- scope: telescope --> no ## Broader Social Context ### Previous Work on the Social Impact of the Dataset #### Usage of Models based on the Data <!-- info: Are you aware of cases where models trained on the task featured in this dataset ore related tasks have been used in automated systems? --> <!-- scope: telescope --> no ### Impact on Under-Served Communities #### Addresses needs of underserved Communities? <!-- info: Does this dataset address the needs of communities that are traditionally underserved in language technology, and particularly language generation technology? Communities may be underserved for exemple because their language, language variety, or social or geographical context is underepresented in NLP and NLG resources (datasets and models). --> <!-- scope: telescope --> no ### Discussion of Biases #### Any Documented Social Biases? <!-- info: Are there documented social biases in the dataset? Biases in this context are variations in the ways members of different social categories are represented that can have harmful downstream consequences for members of the more disadvantaged group. --> <!-- scope: telescope --> yes ## Considerations for Using the Data ### PII Risks and Liability ### Licenses #### Copyright Restrictions on the Dataset <!-- info: Based on your answers in the Intended Use part of the Data Overview Section, which of the following best describe the copyright and licensing status of the dataset? --> <!-- scope: periscope --> `open license - commercial use allowed` #### Copyright Restrictions on the Language Data <!-- info: Based on your answers in the Language part of the Data Curation Section, which of the following best describe the copyright and licensing status of the underlying language data? --> <!-- scope: periscope --> `public domain` ### Known Technical Limitations #### Unsuited Applications <!-- info: When using a model trained on this dataset in a setting where users or the public may interact with its predictions, what are some pitfalls to look out for? In particular, describe some applications of the general task featured in this dataset that its curation or properties make it less suitable for. --> <!-- scope: microscope --> The stories in the dataset are from the 1930--1970s and may contain harmful stances on topics like race and gender. Models trained on the stories may reproduce these stances in their outputs. #### Discouraged Use Cases <!-- info: What are some discouraged use cases of a model trained to maximize the proposed metrics on this dataset? In particular, think about settings where decisions made by a model that performs reasonably well on the metric my still have strong negative consequences for user or members of the public. --> <!-- scope: microscope --> The proposed automatic metrics for this dataset (ROUGE, BERTScore) are not sensitive to factual errors in summaries, and have been shown to not correlate well with human judgments of summary quality along a number of axes.
[ -0.31263598799705505, -0.7266704440116882, 0.4686817526817322, -0.08332791924476624, -0.13940180838108063, -0.1288800835609436, -0.11753204464912415, -0.3877411484718323, 0.567328929901123, 0.3662300407886505, -0.6987625360488892, -0.6503143310546875, -0.36112600564956665, 0.14256256818771...
null
null
null
null
null
null
null
null
null
null
null
null
null
aaraki/github-issues8
aaraki
2022-06-01T02:09:17Z
12
0
null
[ "region:us" ]
2022-06-01T02:09:17Z
2022-05-30T07:48:17.000Z
2022-05-30T07:48:17
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
EddieChen372/tokenized-512-react
EddieChen372
2022-06-19T05:35:57Z
12
0
null
[ "region:us" ]
2022-06-19T05:35:57Z
2022-06-10T07:26:57.000Z
2022-06-10T07:26:57
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
rajistics/auditor_review
rajistics
2022-07-19T21:48:59Z
12
1
null
[ "task_categories:text-classification", "task_ids:multi-class-classification", "task_ids:sentiment-classification", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:cc-b...
2022-07-19T21:48:59Z
2022-06-13T21:49:54.000Z
2022-06-13T21:49:54
--- annotations_creators: - expert-generated language_creators: - found language: - en license: - cc-by-nc-sa-3.0 multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - multi-class-classification - sentiment-classification paperswithcode_id: null pretty_name: Auditor_Review --- # Dataset Card for financial_phrasebank ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description Auditor review data collected by News Department - **Point of Contact:** Talked to COE for Auditing ### Dataset Summary Auditor sentiment dataset of sentences from financial news. The dataset consists of *** sentences from English language financial news categorized by sentiment. The dataset is divided by agreement rate of 5-8 annotators. ### Supported Tasks and Leaderboards Sentiment Classification ### Languages English ## Dataset Structure ### Data Instances ``` { "sentence": "Pharmaceuticals group Orion Corp reported a fall in its third-quarter earnings that were hit by larger expenditures on R&D and marketing .", "label": "negative" } ``` ### Data Fields - sentence: a tokenized line from the dataset - label: a label corresponding to the class as a string: 'positive', 'negative' or 'neutral' ### Data Splits A test train split was created randomly with a 75/25 split ## Dataset Creation ### Curation Rationale The key arguments for the low utilization of statistical techniques in financial sentiment analysis have been the difficulty of implementation for practical applications and the lack of high quality training data for building such models. *** ### Source Data #### Initial Data Collection and Normalization The corpus used in this paper is made out of English news on all listed companies in **** #### Who are the source language producers? The source data was written by various auditors ### Annotations #### Annotation process This release of the financial phrase bank covers a collection of 4840 sentences. The selected collection of phrases was annotated by 16 people with adequate background knowledge on financial markets. Given the large number of overlapping annotations (5 to 8 annotations per sentence), there are several ways to define a majority vote based gold standard. To provide an objective comparison, we have formed 4 alternative reference datasets based on the strength of majority agreement: ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases All annotators were from the same institution and so interannotator agreement should be understood with this taken into account. ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information License: Creative Commons Attribution 4.0 International License (CC-BY) ### Contributions
[ -0.3504559099674225, -0.5390042662620544, -0.02797626703977585, 0.3927314579486847, -0.41674500703811646, 0.13806398212909698, -0.10673584043979645, -0.20886527001857758, 0.392019122838974, 0.7711713910102844, -0.5793429613113403, -0.920759379863739, -0.6439344882965088, 0.2632558643817901...
null
null
null
null
null
null
null
null
null
null
null
null
null
loulely/glue_cola_processed
loulely
2022-06-17T08:35:07Z
12
0
null
[ "region:us" ]
2022-06-17T08:35:07Z
2022-06-17T08:35:00.000Z
2022-06-17T08:35:00
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
BeIR/scidocs-generated-queries
BeIR
2022-10-23T06:12:52Z
12
2
beir
[ "task_categories:text-retrieval", "task_ids:entity-linking-retrieval", "task_ids:fact-checking-retrieval", "multilinguality:monolingual", "language:en", "license:cc-by-sa-4.0", "region:us" ]
2022-10-23T06:12:52Z
2022-06-17T12:53:49.000Z
2022-06-17T12:53:49
--- annotations_creators: [] language_creators: [] language: - en license: - cc-by-sa-4.0 multilinguality: - monolingual paperswithcode_id: beir pretty_name: BEIR Benchmark size_categories: msmarco: - 1M<n<10M trec-covid: - 100k<n<1M nfcorpus: - 1K<n<10K nq: - 1M<n<10M hotpotqa: - 1M<n<10M fiqa: - 10K<n<100K arguana: - 1K<n<10K touche-2020: - 100K<n<1M cqadupstack: - 100K<n<1M quora: - 100K<n<1M dbpedia: - 1M<n<10M scidocs: - 10K<n<100K fever: - 1M<n<10M climate-fever: - 1M<n<10M scifact: - 1K<n<10K source_datasets: [] task_categories: - text-retrieval - zero-shot-retrieval - information-retrieval - zero-shot-information-retrieval task_ids: - passage-retrieval - entity-linking-retrieval - fact-checking-retrieval - tweet-retrieval - citation-prediction-retrieval - duplication-question-retrieval - argument-retrieval - news-retrieval - biomedical-information-retrieval - question-answering-retrieval --- # Dataset Card for BEIR Benchmark ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://github.com/UKPLab/beir - **Repository:** https://github.com/UKPLab/beir - **Paper:** https://openreview.net/forum?id=wCu6T5xFjeJ - **Leaderboard:** https://docs.google.com/spreadsheets/d/1L8aACyPaXrL8iEelJLGqlMqXKPX2oSP_R10pZoy77Ns - **Point of Contact:** nandan.thakur@uwaterloo.ca ### Dataset Summary BEIR is a heterogeneous benchmark that has been built from 18 diverse datasets representing 9 information retrieval tasks: - Fact-checking: [FEVER](http://fever.ai), [Climate-FEVER](http://climatefever.ai), [SciFact](https://github.com/allenai/scifact) - Question-Answering: [NQ](https://ai.google.com/research/NaturalQuestions), [HotpotQA](https://hotpotqa.github.io), [FiQA-2018](https://sites.google.com/view/fiqa/) - Bio-Medical IR: [TREC-COVID](https://ir.nist.gov/covidSubmit/index.html), [BioASQ](http://bioasq.org), [NFCorpus](https://www.cl.uni-heidelberg.de/statnlpgroup/nfcorpus/) - News Retrieval: [TREC-NEWS](https://trec.nist.gov/data/news2019.html), [Robust04](https://trec.nist.gov/data/robust/04.guidelines.html) - Argument Retrieval: [Touche-2020](https://webis.de/events/touche-20/shared-task-1.html), [ArguAna](tp://argumentation.bplaced.net/arguana/data) - Duplicate Question Retrieval: [Quora](https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs), [CqaDupstack](http://nlp.cis.unimelb.edu.au/resources/cqadupstack/) - Citation-Prediction: [SCIDOCS](https://allenai.org/data/scidocs) - Tweet Retrieval: [Signal-1M](https://research.signal-ai.com/datasets/signal1m-tweetir.html) - Entity Retrieval: [DBPedia](https://github.com/iai-group/DBpedia-Entity/) All these datasets have been preprocessed and can be used for your experiments. ```python ``` ### Supported Tasks and Leaderboards The dataset supports a leaderboard that evaluates models against task-specific metrics such as F1 or EM, as well as their ability to retrieve supporting information from Wikipedia. The current best performing models can be found [here](https://eval.ai/web/challenges/challenge-page/689/leaderboard/). ### Languages All tasks are in English (`en`). ## Dataset Structure All BEIR datasets must contain a corpus, queries and qrels (relevance judgments file). They must be in the following format: - `corpus` file: a `.jsonl` file (jsonlines) that contains a list of dictionaries, each with three fields `_id` with unique document identifier, `title` with document title (optional) and `text` with document paragraph or passage. For example: `{"_id": "doc1", "title": "Albert Einstein", "text": "Albert Einstein was a German-born...."}` - `queries` file: a `.jsonl` file (jsonlines) that contains a list of dictionaries, each with two fields `_id` with unique query identifier and `text` with query text. For example: `{"_id": "q1", "text": "Who developed the mass-energy equivalence formula?"}` - `qrels` file: a `.tsv` file (tab-seperated) that contains three columns, i.e. the `query-id`, `corpus-id` and `score` in this order. Keep 1st row as header. For example: `q1 doc1 1` ### Data Instances A high level example of any beir dataset: ```python corpus = { "doc1" : { "title": "Albert Einstein", "text": "Albert Einstein was a German-born theoretical physicist. who developed the theory of relativity, \ one of the two pillars of modern physics (alongside quantum mechanics). His work is also known for \ its influence on the philosophy of science. He is best known to the general public for his mass–energy \ equivalence formula E = mc2, which has been dubbed 'the world's most famous equation'. He received the 1921 \ Nobel Prize in Physics 'for his services to theoretical physics, and especially for his discovery of the law \ of the photoelectric effect', a pivotal step in the development of quantum theory." }, "doc2" : { "title": "", # Keep title an empty string if not present "text": "Wheat beer is a top-fermented beer which is brewed with a large proportion of wheat relative to the amount of \ malted barley. The two main varieties are German Weißbier and Belgian witbier; other types include Lambic (made\ with wild yeast), Berliner Weisse (a cloudy, sour beer), and Gose (a sour, salty beer)." }, } queries = { "q1" : "Who developed the mass-energy equivalence formula?", "q2" : "Which beer is brewed with a large proportion of wheat?" } qrels = { "q1" : {"doc1": 1}, "q2" : {"doc2": 1}, } ``` ### Data Fields Examples from all configurations have the following features: ### Corpus - `corpus`: a `dict` feature representing the document title and passage text, made up of: - `_id`: a `string` feature representing the unique document id - `title`: a `string` feature, denoting the title of the document. - `text`: a `string` feature, denoting the text of the document. ### Queries - `queries`: a `dict` feature representing the query, made up of: - `_id`: a `string` feature representing the unique query id - `text`: a `string` feature, denoting the text of the query. ### Qrels - `qrels`: a `dict` feature representing the query document relevance judgements, made up of: - `_id`: a `string` feature representing the query id - `_id`: a `string` feature, denoting the document id. - `score`: a `int32` feature, denoting the relevance judgement between query and document. ### Data Splits | Dataset | Website| BEIR-Name | Type | Queries | Corpus | Rel D/Q | Down-load | md5 | | -------- | -----| ---------| --------- | ----------- | ---------| ---------| :----------: | :------:| | MSMARCO | [Homepage](https://microsoft.github.io/msmarco/)| ``msmarco`` | ``train``<br>``dev``<br>``test``| 6,980 | 8.84M | 1.1 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/msmarco.zip) | ``444067daf65d982533ea17ebd59501e4`` | | TREC-COVID | [Homepage](https://ir.nist.gov/covidSubmit/index.html)| ``trec-covid``| ``test``| 50| 171K| 493.5 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/trec-covid.zip) | ``ce62140cb23feb9becf6270d0d1fe6d1`` | | NFCorpus | [Homepage](https://www.cl.uni-heidelberg.de/statnlpgroup/nfcorpus/) | ``nfcorpus`` | ``train``<br>``dev``<br>``test``| 323 | 3.6K | 38.2 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/nfcorpus.zip) | ``a89dba18a62ef92f7d323ec890a0d38d`` | | BioASQ | [Homepage](http://bioasq.org) | ``bioasq``| ``train``<br>``test`` | 500 | 14.91M | 8.05 | No | [How to Reproduce?](https://github.com/UKPLab/beir/blob/main/examples/dataset#2-bioasq) | | NQ | [Homepage](https://ai.google.com/research/NaturalQuestions) | ``nq``| ``train``<br>``test``| 3,452 | 2.68M | 1.2 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/nq.zip) | ``d4d3d2e48787a744b6f6e691ff534307`` | | HotpotQA | [Homepage](https://hotpotqa.github.io) | ``hotpotqa``| ``train``<br>``dev``<br>``test``| 7,405 | 5.23M | 2.0 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/hotpotqa.zip) | ``f412724f78b0d91183a0e86805e16114`` | | FiQA-2018 | [Homepage](https://sites.google.com/view/fiqa/) | ``fiqa`` | ``train``<br>``dev``<br>``test``| 648 | 57K | 2.6 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/fiqa.zip) | ``17918ed23cd04fb15047f73e6c3bd9d9`` | | Signal-1M(RT) | [Homepage](https://research.signal-ai.com/datasets/signal1m-tweetir.html)| ``signal1m`` | ``test``| 97 | 2.86M | 19.6 | No | [How to Reproduce?](https://github.com/UKPLab/beir/blob/main/examples/dataset#4-signal-1m) | | TREC-NEWS | [Homepage](https://trec.nist.gov/data/news2019.html) | ``trec-news`` | ``test``| 57 | 595K | 19.6 | No | [How to Reproduce?](https://github.com/UKPLab/beir/blob/main/examples/dataset#1-trec-news) | | ArguAna | [Homepage](http://argumentation.bplaced.net/arguana/data) | ``arguana``| ``test`` | 1,406 | 8.67K | 1.0 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/arguana.zip) | ``8ad3e3c2a5867cdced806d6503f29b99`` | | Touche-2020| [Homepage](https://webis.de/events/touche-20/shared-task-1.html) | ``webis-touche2020``| ``test``| 49 | 382K | 19.0 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/webis-touche2020.zip) | ``46f650ba5a527fc69e0a6521c5a23563`` | | CQADupstack| [Homepage](http://nlp.cis.unimelb.edu.au/resources/cqadupstack/) | ``cqadupstack``| ``test``| 13,145 | 457K | 1.4 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/cqadupstack.zip) | ``4e41456d7df8ee7760a7f866133bda78`` | | Quora| [Homepage](https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs) | ``quora``| ``dev``<br>``test``| 10,000 | 523K | 1.6 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/quora.zip) | ``18fb154900ba42a600f84b839c173167`` | | DBPedia | [Homepage](https://github.com/iai-group/DBpedia-Entity/) | ``dbpedia-entity``| ``dev``<br>``test``| 400 | 4.63M | 38.2 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/dbpedia-entity.zip) | ``c2a39eb420a3164af735795df012ac2c`` | | SCIDOCS| [Homepage](https://allenai.org/data/scidocs) | ``scidocs``| ``test``| 1,000 | 25K | 4.9 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/scidocs.zip) | ``38121350fc3a4d2f48850f6aff52e4a9`` | | FEVER | [Homepage](http://fever.ai) | ``fever``| ``train``<br>``dev``<br>``test``| 6,666 | 5.42M | 1.2| [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/fever.zip) | ``5a818580227bfb4b35bb6fa46d9b6c03`` | | Climate-FEVER| [Homepage](http://climatefever.ai) | ``climate-fever``|``test``| 1,535 | 5.42M | 3.0 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/climate-fever.zip) | ``8b66f0a9126c521bae2bde127b4dc99d`` | | SciFact| [Homepage](https://github.com/allenai/scifact) | ``scifact``| ``train``<br>``test``| 300 | 5K | 1.1 | [Link](https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/scifact.zip) | ``5f7d1de60b170fc8027bb7898e2efca1`` | | Robust04 | [Homepage](https://trec.nist.gov/data/robust/04.guidelines.html) | ``robust04``| ``test``| 249 | 528K | 69.9 | No | [How to Reproduce?](https://github.com/UKPLab/beir/blob/main/examples/dataset#3-robust04) | ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information Cite as: ``` @inproceedings{ thakur2021beir, title={{BEIR}: A Heterogeneous Benchmark for Zero-shot Evaluation of Information Retrieval Models}, author={Nandan Thakur and Nils Reimers and Andreas R{\"u}ckl{\'e} and Abhishek Srivastava and Iryna Gurevych}, booktitle={Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2)}, year={2021}, url={https://openreview.net/forum?id=wCu6T5xFjeJ} } ``` ### Contributions Thanks to [@Nthakur20](https://github.com/Nthakur20) for adding this dataset.
[ -0.5227212905883789, -0.5249219536781311, 0.14435674250125885, 0.04820423573255539, 0.055916160345077515, 0.0011022627586498857, -0.1081070527434349, -0.24874727427959442, 0.28598034381866455, 0.07840226590633392, -0.45233607292175293, -0.7186435461044312, -0.347678542137146, 0.20300328731...
null
null
null
null
null
null
null
null
null
null
null
null
null
Rami/adhd_question
Rami
2022-07-06T17:03:16Z
12
0
null
[ "license:mit", "region:us" ]
2022-07-06T17:03:16Z
2022-06-18T16:17:20.000Z
2022-06-18T16:17:20
--- license: mit ---
[ -0.1285335123538971, -0.1861683875322342, 0.6529128551483154, 0.49436232447624207, -0.19319400191307068, 0.23607441782951355, 0.36072009801864624, 0.05056373029947281, 0.5793656706809998, 0.7400146722793579, -0.650810182094574, -0.23784008622169495, -0.7102247476577759, -0.0478255338966846...
null
null
null
null
null
null
null
null
null
null
null
null
null
nateraw/lung-cancer
nateraw
2022-10-25T10:32:46Z
12
1
null
[ "license:cc-by-nc-sa-4.0", "region:us" ]
2022-10-25T10:32:46Z
2022-06-21T23:57:00.000Z
2022-06-21T23:57:00
--- kaggle_id: nancyalaswad90/lung-cancer license: - cc-by-nc-sa-4.0 --- # Dataset Card for Lung Cancer ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://kaggle.com/datasets/nancyalaswad90/lung-cancer - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary The effectiveness of cancer prediction system helps the people to know their cancer risk with low cost and it also helps the people to take the appropriate decision based on their cancer risk status. The data is collected from the website online lung cancer prediction system . ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators This dataset was shared by [@nancyalaswad90](https://kaggle.com/nancyalaswad90) ### Licensing Information The license for this dataset is cc-by-nc-sa-4.0 ### Citation Information ```bibtex [More Information Needed] ``` ### Contributions [More Information Needed]
[ -0.14782731235027313, -0.47625458240509033, 0.2678765058517456, 0.2114071100950241, -0.36319661140441895, -0.0058837030082941055, -0.070363849401474, -0.09739323705434799, 0.49672695994377136, 0.9473801851272583, -0.8936583995819092, -1.215012550354004, -0.9876242280006409, -0.015319959260...
null
null
null
null
null
null
null
null
null
null
null
null
null
Nexdata/Japanese_Speech_Data
Nexdata
2023-08-28T08:57:36Z
12
0
null
[ "region:us" ]
2023-08-28T08:57:36Z
2022-06-22T08:05:33.000Z
2022-06-22T08:05:33
--- YAML tags: - copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging --- # Dataset Card for Nexdata/Japanese_Speech_Datae ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://www.nexdata.ai/datasets/934?source=Huggingface - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary 1006 Japanese native speakers participated in the recording, coming from eastern, western, and Kyushu regions, while the eastern region accounting for the largest proportion. The recording content is rich and all texts have been manually transferred with high accuracy. For more details, please refer to the link: https://www.nexdata.ai/datasets/934?source=Huggingface ### Supported Tasks and Leaderboards automatic-speech-recognition, audio-speaker-identification: The dataset can be used to train a model for Automatic Speech Recognition (ASR). ### Languages Japanese ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing ### Citation Information [More Information Needed] ### Contributions
[ -0.5477847456932068, -0.7035501599311829, -0.009934697300195694, 0.25297826528549194, -0.10764628648757935, 0.07681433856487274, -0.4113163948059082, -0.39702796936035156, 0.6636462807655334, 0.7654150128364563, -0.8229421377182007, -0.9672282934188843, -0.6945710778236389, 0.2306799888610...
null
null
null
null
null
null
null
null
null
null
null
null
null
phihung/titanic
phihung
2022-06-22T16:25:32Z
12
1
null
[ "license:other", "region:us" ]
2022-06-22T16:25:32Z
2022-06-22T16:16:15.000Z
2022-06-22T16:16:15
--- license: other --- The legendary Titanic dataset from [this](https://www.kaggle.com/competitions/titanic/overview) Kaggle competition
[ -0.2752212584018707, -0.47802111506462097, 0.18675293028354645, 0.10365166515111923, -0.3161486089229584, 0.23943987488746643, 0.6401233673095703, -0.07817454636096954, 0.6244029998779297, 1.059063196182251, -0.6699391007423401, -0.31781989336013794, -0.1658225655555725, -0.343364417552948...
null
null
null
null
null
null
null
null
null
null
null
null
null
GEM-submissions/lewtun__this-is-another-test-name__1655982268
GEM-submissions
2022-06-23T11:04:35Z
12
0
null
[ "benchmark:gem", "evaluation", "benchmark", "region:us" ]
2022-06-23T11:04:35Z
2022-06-23T11:04:31.000Z
2022-06-23T11:04:31
--- benchmark: gem type: prediction submission_name: This is another test name tags: - evaluation - benchmark --- # GEM Submission Submission name: This is another test name
[ -0.014066352508962154, -0.8986244201660156, 0.6770598888397217, -0.02584214322268963, -0.09468409419059753, 0.4287986755371094, 0.05152108520269394, 0.10768046975135803, 0.46615633368492126, 0.49758970737457275, -1.1106656789779663, 0.015152091160416603, -0.5872305631637573, -0.20092248916...
null
null
null
null
null
null
null
null
null
null
null
null
null
LHF/escorpius
LHF
2023-01-05T10:55:48Z
12
12
null
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "multilinguality:monolingual", "size_categories:100M<n<1B", "source_datasets:original", "language:es", "license:cc-by-nc-nd-4.0", "arxiv:2206.15147", "region:us" ]
2023-01-05T10:55:48Z
2022-06-24T20:58:40.000Z
2022-06-24T20:58:40
--- license: cc-by-nc-nd-4.0 language: - es multilinguality: - monolingual size_categories: - 100M<n<1B source_datasets: - original task_categories: - text-generation - fill-mask task_ids: - language-modeling - masked-language-modeling --- # esCorpius: A Massive Spanish Crawling Corpus ## Introduction In the recent years, Transformer-based models have lead to significant advances in language modelling for natural language processing. However, they require a vast amount of data to be (pre-)trained and there is a lack of corpora in languages other than English. Recently, several initiatives have presented multilingual datasets obtained from automatic web crawling. However, the results in Spanish present important shortcomings, as they are either too small in comparison with other languages, or present a low quality derived from sub-optimal cleaning and deduplication. In this work, we introduce esCorpius, a Spanish crawling corpus obtained from near 1 Pb of Common Crawl data. It is the most extensive corpus in Spanish with this level of quality in the extraction, purification and deduplication of web textual content. Our data curation process involves a novel highly parallel cleaning pipeline and encompasses a series of deduplication mechanisms that together ensure the integrity of both document and paragraph boundaries. Additionally, we maintain both the source web page URL and the WARC shard origin URL in order to complain with EU regulations. esCorpius has been released under CC BY-NC-ND 4.0 license. ## Statistics | **Corpus** | OSCAR<br>22.01 | mC4 | CC-100 | ParaCrawl<br>v9 | esCorpius<br>(ours) | |-------------------------|----------------|--------------|-----------------|-----------------|-------------------------| | **Size (ES)** | 381.9 GB | 1,600.0 GB | 53.3 GB | 24.0 GB | 322.5 GB | | **Docs (ES)** | 51M | 416M | - | - | 104M | | **Words (ES)** | 42,829M | 433,000M | 9,374M | 4,374M | 50,773M | | **Lang.<br>identifier** | fastText | CLD3 | fastText | CLD2 | CLD2 + fastText | | **Elements** | Document | Document | Document | Sentence | Document and paragraph | | **Parsing quality** | Medium | Low | Medium | High | High | | **Cleaning quality** | Low | No cleaning | Low | High | High | | **Deduplication** | No | No | No | Bicleaner | dLHF | | **Language** | Multilingual | Multilingual | Multilingual | Multilingual | Spanish | | **License** | CC-BY-4.0 | ODC-By-v1.0 | Common<br>Crawl | CC0 | CC-BY-NC-ND | ## Citation Link to the paper: https://www.isca-speech.org/archive/pdfs/iberspeech_2022/gutierrezfandino22_iberspeech.pdf / https://arxiv.org/abs/2206.15147 Cite this work: ``` @inproceedings{gutierrezfandino22_iberspeech, author={Asier Gutiérrez-Fandiño and David Pérez-Fernández and Jordi Armengol-Estapé and David Griol and Zoraida Callejas}, title={{esCorpius: A Massive Spanish Crawling Corpus}}, year=2022, booktitle={Proc. IberSPEECH 2022}, pages={126--130}, doi={10.21437/IberSPEECH.2022-26} } ``` ## Disclaimer We did not perform any kind of filtering and/or censorship to the corpus. We expect users to do so applying their own methods. We are not liable for any misuse of the corpus.
[ -0.4888957738876343, -0.6505939960479736, 0.34118887782096863, 0.5600492358207703, -0.21963639557361603, 0.4849811792373657, -0.08707016706466675, -0.4715771973133087, 0.7538990378379822, 0.5693415999412537, -0.32007843255996704, -0.8294093012809753, -0.8137730956077576, 0.3211983144283294...
null
null
null
null
null
null
null
null
null
null
null
null
null
autoevaluate/autoeval-staging-eval-project-d60b4e7e-7574888
autoevaluate
2022-06-26T20:11:48Z
12
0
null
[ "autotrain", "evaluation", "region:us" ]
2022-06-26T20:11:48Z
2022-06-26T20:08:59.000Z
2022-06-26T20:08:59
--- type: predictions tags: - autotrain - evaluation datasets: - xtreme eval_info: task: entity_extraction model: OneFly/xlm-roberta-base-finetuned-panx-de metrics: [] dataset_name: xtreme dataset_config: PAN-X.de dataset_split: test col_mapping: tokens: tokens tags: ner_tags --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Token Classification * Model: OneFly/xlm-roberta-base-finetuned-panx-de * Dataset: xtreme To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
[ -0.4268358051776886, -0.10946956276893616, 0.1572227030992508, 0.1181664913892746, -0.04665398970246315, -0.10113059729337692, 0.12062431126832962, -0.4135160446166992, 0.2554154098033905, 0.5470842123031616, -1.061152458190918, -0.40505099296569824, -0.6341866850852966, -0.125387713313102...
null
null
null
null
null
null
null
null
null
null
null
null
null
loubnabnl/github-code-duplicate
loubnabnl
2022-06-27T20:02:27Z
12
0
null
[ "region:us" ]
2022-06-27T20:02:27Z
2022-06-27T15:12:35.000Z
2022-06-27T15:12:35
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
loubnabnl/github-clean-v1
loubnabnl
2022-06-28T00:59:02Z
12
0
null
[ "region:us" ]
2022-06-28T00:59:02Z
2022-06-27T20:29:39.000Z
2022-06-27T20:29:39
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
autoevaluate/autoeval-staging-eval-project-34433c04-8625146
autoevaluate
2022-06-29T00:39:30Z
12
0
null
[ "autotrain", "evaluation", "region:us" ]
2022-06-29T00:39:30Z
2022-06-29T00:38:57.000Z
2022-06-29T00:38:57
--- type: predictions tags: - autotrain - evaluation datasets: - lewtun/dog_food eval_info: task: image_multi_class_classification model: abhishek/convnext-tiny-finetuned-dogfood metrics: [] dataset_name: lewtun/dog_food dataset_config: lewtun--dog_food dataset_split: test col_mapping: image: image target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Image Classification * Model: abhishek/convnext-tiny-finetuned-dogfood * Dataset: lewtun/dog_food To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@haesun](https://huggingface.co/haesun) for evaluating this model.
[ -0.4464048147201538, -0.1881573349237442, 0.0880771353840828, 0.07559619843959808, 0.0164920873939991, -0.2743195593357086, 0.043282363563776016, -0.5651324987411499, 0.1125439703464508, 0.32909438014030457, -0.7595334649085999, -0.14278081059455872, -0.6406891345977783, -0.031551696360111...
null
null
null
null
null
null
null
null
null
null
null
null
null
autoevaluate/autoeval-staging-eval-project-00ac2adb-9115197
autoevaluate
2022-06-29T22:41:58Z
12
0
null
[ "autotrain", "evaluation", "region:us" ]
2022-06-29T22:41:58Z
2022-06-29T22:40:57.000Z
2022-06-29T22:40:57
--- type: predictions tags: - autotrain - evaluation datasets: - cifar10 eval_info: task: image_multi_class_classification model: abhishek/autotrain_cifar10_vit_base metrics: [] dataset_name: cifar10 dataset_config: plain_text dataset_split: test col_mapping: image: img target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Image Classification * Model: abhishek/autotrain_cifar10_vit_base * Dataset: cifar10 To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@davidberg](https://huggingface.co/davidberg) for evaluating this model.
[ -0.5799369812011719, -0.13354820013046265, 0.1372492015361786, 0.13761301338672638, -0.043086547404527664, -0.1775314211845398, 0.00884221587330103, -0.5936290621757507, 0.07550720870494843, 0.22760292887687683, -0.8252488970756531, -0.15424539148807526, -0.6409091353416443, -0.13765422999...
null
null
null
null
null
null
null
null
null
null
null
null
null
Paul/hatecheck-mandarin
Paul
2022-07-05T10:32:33Z
12
2
null
[ "task_categories:text-classification", "task_ids:hate-speech-detection", "annotations_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:zh", "license:cc-by-4.0", "arxiv:2206.09917", "regi...
2022-07-05T10:32:33Z
2022-07-05T10:31:28.000Z
2022-07-05T10:31:28
--- annotations_creators: - crowdsourced language_creators: - expert-generated language: - zh license: - cc-by-4.0 multilinguality: - monolingual pretty_name: Mandarin HateCheck size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - hate-speech-detection --- # Dataset Card for Multilingual HateCheck ## Dataset Description Multilingual HateCheck (MHC) is a suite of functional tests for hate speech detection models in 10 different languages: Arabic, Dutch, French, German, Hindi, Italian, Mandarin, Polish, Portuguese and Spanish. For each language, there are 25+ functional tests that correspond to distinct types of hate and challenging non-hate. This allows for targeted diagnostic insights into model performance. For more details, please refer to our paper about MHC, published at the 2022 Workshop on Online Abuse and Harms (WOAH) at NAACL 2022. If you are using MHC, please cite our work! - **Paper:** Röttger et al. (2022) - Multilingual HateCheck: Functional Tests for Multilingual Hate Speech Detection Models. https://arxiv.org/abs/2206.09917 - **Repository:** https://github.com/rewire-online/multilingual-hatecheck - **Point of Contact:** paul@rewire.online ## Dataset Structure The csv format mostly matches the original HateCheck data, with some adjustments for specific languages. **mhc_case_id** The test case ID that is unique to each test case across languages (e.g., "mandarin-1305") **functionality** The shorthand for the functionality tested by the test case (e.g, "target_obj_nh"). The same functionalities are tested in all languages, except for Mandarin and Arabic, where non-Latin script required adapting the tests for spelling variations. **test_case** The test case text. **label_gold** The gold standard label ("hateful" or "non-hateful") of the test case. All test cases within a given functionality have the same gold standard label. **target_ident** Where applicable, the protected group that is targeted or referenced in the test case. All HateChecks cover seven target groups, but their composition varies across languages. **ref_case_id** For hateful cases, where applicable, the ID of the hateful case which was perturbed to generate this test case. For non-hateful cases, where applicable, the ID of the hateful case which is contrasted by this test case. **ref_templ_id** The equivalent to ref_case_id, but for template IDs. **templ_id** The ID of the template from which the test case was generated. **case_templ** The template from which the test case was generated (where applicable). **gender_male** and **gender_female** For gender-inflected languages (French, Spanish, Portuguese, Hindi, Arabic, Italian, Polish, German), only for cases where gender inflection is relevant, separate entries for gender_male and gender_female replace case_templ. **label_annotated** A list of labels given by the three annotators who reviewed the test case (e.g., "['hateful', 'hateful', 'hateful']"). **label_annotated_maj** The majority vote of the three annotators (e.g., "hateful"). In some cases this differs from the gold label given by our language experts. **disagreement_in_case** True if label_annotated_maj does not match label_gold for the entry. **disagreement_in_template** True if the test case is generated from an IDENT template and there is at least one case with disagreement_in_case generated from the same template. This can be used to exclude entire templates from MHC.
[ -0.6419409513473511, -0.7158888578414917, -0.05510092154145241, 0.09203927218914032, -0.11549574881792068, 0.10751984268426895, -0.030292538926005363, -0.5101843476295471, 0.39948996901512146, 0.3274587094783783, -0.7589273452758789, -0.7721040844917297, -0.5623311400413513, 0.460262417793...
null
null
null
null
null
null
null
null
null
null
null
null
null
Paul/hatecheck-dutch
Paul
2022-07-05T10:41:31Z
12
1
null
[ "task_categories:text-classification", "task_ids:hate-speech-detection", "annotations_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:nl", "license:cc-by-4.0", "arxiv:2206.09917", "regi...
2022-07-05T10:41:31Z
2022-07-05T10:40:49.000Z
2022-07-05T10:40:49
--- annotations_creators: - crowdsourced language_creators: - expert-generated language: - nl license: - cc-by-4.0 multilinguality: - monolingual pretty_name: Dutch HateCheck size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - hate-speech-detection --- # Dataset Card for Multilingual HateCheck ## Dataset Description Multilingual HateCheck (MHC) is a suite of functional tests for hate speech detection models in 10 different languages: Arabic, Dutch, French, German, Hindi, Italian, Mandarin, Polish, Portuguese and Spanish. For each language, there are 25+ functional tests that correspond to distinct types of hate and challenging non-hate. This allows for targeted diagnostic insights into model performance. For more details, please refer to our paper about MHC, published at the 2022 Workshop on Online Abuse and Harms (WOAH) at NAACL 2022. If you are using MHC, please cite our work! - **Paper:** Röttger et al. (2022) - Multilingual HateCheck: Functional Tests for Multilingual Hate Speech Detection Models. https://arxiv.org/abs/2206.09917 - **Repository:** https://github.com/rewire-online/multilingual-hatecheck - **Point of Contact:** paul@rewire.online ## Dataset Structure The csv format mostly matches the original HateCheck data, with some adjustments for specific languages. **mhc_case_id** The test case ID that is unique to each test case across languages (e.g., "mandarin-1305") **functionality** The shorthand for the functionality tested by the test case (e.g, "target_obj_nh"). The same functionalities are tested in all languages, except for Mandarin and Arabic, where non-Latin script required adapting the tests for spelling variations. **test_case** The test case text. **label_gold** The gold standard label ("hateful" or "non-hateful") of the test case. All test cases within a given functionality have the same gold standard label. **target_ident** Where applicable, the protected group that is targeted or referenced in the test case. All HateChecks cover seven target groups, but their composition varies across languages. **ref_case_id** For hateful cases, where applicable, the ID of the hateful case which was perturbed to generate this test case. For non-hateful cases, where applicable, the ID of the hateful case which is contrasted by this test case. **ref_templ_id** The equivalent to ref_case_id, but for template IDs. **templ_id** The ID of the template from which the test case was generated. **case_templ** The template from which the test case was generated (where applicable). **gender_male** and **gender_female** For gender-inflected languages (French, Spanish, Portuguese, Hindi, Arabic, Italian, Polish, German), only for cases where gender inflection is relevant, separate entries for gender_male and gender_female replace case_templ. **label_annotated** A list of labels given by the three annotators who reviewed the test case (e.g., "['hateful', 'hateful', 'hateful']"). **label_annotated_maj** The majority vote of the three annotators (e.g., "hateful"). In some cases this differs from the gold label given by our language experts. **disagreement_in_case** True if label_annotated_maj does not match label_gold for the entry. **disagreement_in_template** True if the test case is generated from an IDENT template and there is at least one case with disagreement_in_case generated from the same template. This can be used to exclude entire templates from MHC.
[ -0.6419409513473511, -0.7158888578414917, -0.05510092154145241, 0.09203927218914032, -0.11549574881792068, 0.10751984268426895, -0.030292538926005363, -0.5101843476295471, 0.39948996901512146, 0.3274587094783783, -0.7589273452758789, -0.7721040844917297, -0.5623311400413513, 0.460262417793...
null
null
null
null
null
null
null
null
null
null
null
null
null
Paul/hatecheck-arabic
Paul
2022-07-05T10:43:02Z
12
1
null
[ "task_categories:text-classification", "task_ids:hate-speech-detection", "annotations_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:ar", "license:cc-by-4.0", "arxiv:2206.09917", "regi...
2022-07-05T10:43:02Z
2022-07-05T10:42:16.000Z
2022-07-05T10:42:16
--- annotations_creators: - crowdsourced language_creators: - expert-generated language: - ar license: - cc-by-4.0 multilinguality: - monolingual pretty_name: Arabic HateCheck size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - hate-speech-detection --- # Dataset Card for Multilingual HateCheck ## Dataset Description Multilingual HateCheck (MHC) is a suite of functional tests for hate speech detection models in 10 different languages: Arabic, Dutch, French, German, Hindi, Italian, Mandarin, Polish, Portuguese and Spanish. For each language, there are 25+ functional tests that correspond to distinct types of hate and challenging non-hate. This allows for targeted diagnostic insights into model performance. For more details, please refer to our paper about MHC, published at the 2022 Workshop on Online Abuse and Harms (WOAH) at NAACL 2022. If you are using MHC, please cite our work! - **Paper:** Röttger et al. (2022) - Multilingual HateCheck: Functional Tests for Multilingual Hate Speech Detection Models. https://arxiv.org/abs/2206.09917 - **Repository:** https://github.com/rewire-online/multilingual-hatecheck - **Point of Contact:** paul@rewire.online ## Dataset Structure The csv format mostly matches the original HateCheck data, with some adjustments for specific languages. **mhc_case_id** The test case ID that is unique to each test case across languages (e.g., "mandarin-1305") **functionality** The shorthand for the functionality tested by the test case (e.g, "target_obj_nh"). The same functionalities are tested in all languages, except for Mandarin and Arabic, where non-Latin script required adapting the tests for spelling variations. **test_case** The test case text. **label_gold** The gold standard label ("hateful" or "non-hateful") of the test case. All test cases within a given functionality have the same gold standard label. **target_ident** Where applicable, the protected group that is targeted or referenced in the test case. All HateChecks cover seven target groups, but their composition varies across languages. **ref_case_id** For hateful cases, where applicable, the ID of the hateful case which was perturbed to generate this test case. For non-hateful cases, where applicable, the ID of the hateful case which is contrasted by this test case. **ref_templ_id** The equivalent to ref_case_id, but for template IDs. **templ_id** The ID of the template from which the test case was generated. **case_templ** The template from which the test case was generated (where applicable). **gender_male** and **gender_female** For gender-inflected languages (French, Spanish, Portuguese, Hindi, Arabic, Italian, Polish, German), only for cases where gender inflection is relevant, separate entries for gender_male and gender_female replace case_templ. **label_annotated** A list of labels given by the three annotators who reviewed the test case (e.g., "['hateful', 'hateful', 'hateful']"). **label_annotated_maj** The majority vote of the three annotators (e.g., "hateful"). In some cases this differs from the gold label given by our language experts. **disagreement_in_case** True if label_annotated_maj does not match label_gold for the entry. **disagreement_in_template** True if the test case is generated from an IDENT template and there is at least one case with disagreement_in_case generated from the same template. This can be used to exclude entire templates from MHC.
[ -0.6419409513473511, -0.7158888578414917, -0.05510092154145241, 0.09203927218914032, -0.11549574881792068, 0.10751984268426895, -0.030292538926005363, -0.5101843476295471, 0.39948996901512146, 0.3274587094783783, -0.7589273452758789, -0.7721040844917297, -0.5623311400413513, 0.460262417793...
null
null
null
null
null
null
null
null
null
null
null
null
null
loubnabnl/github-small-near-dedup
loubnabnl
2022-07-08T10:28:11Z
12
0
null
[ "region:us" ]
2022-07-08T10:28:11Z
2022-07-06T14:48:15.000Z
2022-07-06T14:48:15
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
MicPie/unpredictable_cluster02
MicPie
2022-08-04T19:44:14Z
12
0
null
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
2022-08-04T19:44:14Z
2022-07-08T18:25:06.000Z
2022-07-08T18:25:06
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster02 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster02" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
[ -0.5546532869338989, -0.5640119910240173, 0.47088226675987244, 0.32676172256469727, 0.08714975416660309, 0.15217609703540802, -0.14034715294837952, -0.6151542663574219, 0.5133213996887207, 0.2814868986606598, -1.011489748954773, -0.653624415397644, -0.6682239770889282, 0.17284812033176422,...
null
null
null
null
null
null
null
null
null
null
null
null
null
Sreyan88/librispeech_asr
Sreyan88
2022-07-09T11:34:31Z
12
0
null
[ "region:us" ]
2022-07-09T11:34:31Z
2022-07-09T11:34:20.000Z
2022-07-09T11:34:20
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
saadob12/chart-to-text
saadob12
2022-07-10T10:09:33Z
12
3
null
[ "arxiv:2203.06486", "region:us" ]
2022-07-10T10:09:33Z
2022-07-09T12:10:51.000Z
2022-07-09T12:10:51
This dataset only consists of linearized underlying data table of charts and their corresponding summaries. Model that use this dataset: https://huggingface.co/saadob12/t5_C2T_big ## Created By: Kanthara, S., Leong, R. T. K., Lin, X., Masry, A., Thakkar, M., Hoque, E., & Joty, S. (2022). Chart-to-Text: A Large-Scale Benchmark for Chart Summarization. arXiv preprint arXiv:2203.06486. **Paper**: https://arxiv.org/abs/2203.06486 **Orignal github repo**: https://github.com/vis-nlp/Chart-to-text # Abstract from the Paper Charts are commonly used for exploring data and communicating insights. Generating nat- ural language summaries from charts can be very helpful for people in inferring key in- sights that would otherwise require a lot of cognitive and perceptual efforts. We present Chart-to-text, a large-scale benchmark with two datasets and a total of 44,096 charts cover- ing a wide range of topics and chart types. We explain the dataset construction process and analyze the datasets. We also introduce a num- ber of state-of-the-art neural models as base- lines that utilize image captioning and data-to- text generation techniques to tackle two prob- lem variations: one assumes the underlying data table of the chart is available while the other needs to extract data from chart images. Our analysis with automatic and human eval- uation shows that while our best models usu- ally generate fluent summaries and yield rea- sonable BLEU scores, they also suffer from hallucinations and factual errors as well as dif- ficulties in correctly explaining complex pat- terns and trends in charts. ### Note The original paper published two sub-datasets one collected from statista and the other from pew. The dataset upload here is from statista. Images can be downloaded from the github repo mentioned above. # Langugage The data is in english and the summaries are in english. # Dataset split | train | valid | test | |:---:|:---:| :---:| | 24367 | 5222 | 5222 | **Name of Contributor:** Saad Obaid ul Islam
[ -0.2934081256389618, -0.6616927981376648, 0.1048627570271492, 0.1421089917421341, -0.5347809791564941, -0.018957672640681267, -0.29482728242874146, -0.32267260551452637, 0.3301694095134735, 0.6661865711212158, -0.34933146834373474, -0.6963697671890259, -0.6475691199302673, 0.06311405450105...
null
null
null
null
null
null
null
null
null
null
null
null
null