id
stringlengths
2
115
lastModified
stringlengths
24
24
tags
list
author
stringlengths
2
42
description
stringlengths
0
6.67k
citation
stringlengths
0
10.7k
likes
int64
0
3.66k
downloads
int64
0
8.89M
created
timestamp[us]
card
stringlengths
11
977k
card_len
int64
11
977k
embeddings
list
shyam-incedoinc/qa-finetune-dataset
2023-10-02T10:33:15.000Z
[ "region:us" ]
shyam-incedoinc
null
null
0
85
2023-10-02T10:32:57
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
csolheim/risk_sig_train_multilabel_OPR
2023-10-04T18:03:10.000Z
[ "region:us" ]
csolheim
null
null
0
85
2023-10-04T04:17:12
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
nlewins/fleurs_ceb_to_en
2023-10-10T15:44:08.000Z
[ "region:us" ]
nlewins
null
null
0
85
2023-10-10T15:39:39
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* dataset_info: features: - name: id dtype: int32 - name: transcription dtype: string - name: language dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: transcription_en dtype: string - name: audio_en dtype: audio: sampling_rate: 16000 splits: - name: train num_bytes: 4837972886.028 num_examples: 3188 - name: validation num_bytes: 332770769.0 num_examples: 225 - name: test num_bytes: 834809869.0 num_examples: 541 download_size: 5885482902 dataset_size: 6005553524.028 --- # Dataset Card for "fleurs_ceb_to_en_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
947
[ [ -0.042327880859375, -0.0260162353515625, 0.018585205078125, 0.03460693359375, -0.01480865478515625, -0.0158538818359375, 0.0133514404296875, -0.02593994140625, 0.05303955078125, 0.0440673828125, -0.050445556640625, -0.046173095703125, -0.052978515625, -0.016...
DangFutures/instruct_convo
2023-10-19T02:47:35.000Z
[ "region:us" ]
DangFutures
null
null
0
85
2023-10-19T02:24:21
Entry not found
15
[ [ -0.02142333984375, -0.014984130859375, 0.057220458984375, 0.0288238525390625, -0.03509521484375, 0.04656982421875, 0.052520751953125, 0.00506591796875, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060455322265625, 0.03793334...
alexandrainst/lexdk-open
2023-10-27T20:22:31.000Z
[ "size_categories:10K<n<100K", "language:da", "license:cc0-1.0", "region:us" ]
alexandrainst
null
null
1
85
2023-10-25T09:36:11
--- language: - da license: cc0-1.0 size_categories: - 10K<n<100K pretty_name: Lex.dk Open dataset_info: features: - name: url dtype: string - name: title dtype: string - name: clarification dtype: string - name: authors sequence: string - name: date dtype: string - name: license dtype: string - name: text dtype: string splits: - name: train num_bytes: 18335490 num_examples: 11887 download_size: 10050922 dataset_size: 18335490 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for Lex.dk Open ## Dataset Description - **Point of Contact:** [Dan Saattrup Nielsen](mailto:dan.nielsen@alexandra.dk) - **Size of downloaded dataset files:** 10.05 MB - **Size of the generated dataset:** 18.34 MB - **Total amount of disk used:** 28.39 MB ### Dataset Summary This dataset consists of articles from the Danish encyclopedia [Lex.dk](https://www.lex.dk). Only the articles released with a permissive license are included here, which constitutes about 7.5% of the total amount of articles. ### Languages The dataset is available in Danish (`da`). ## Dataset Structure ### Data Instances - **Size of downloaded dataset files:** 10.05 MB - **Size of the generated dataset:** 18.34 MB - **Total amount of disk used:** 28.39 MB An example from the dataset looks as follows. ``` { 'url': 'https://denstoredanske.lex.dk/Kullmanns_M%C3%B8lle', 'title': 'Kullmanns Mølle', 'clarification': '', 'authors': ['https://brugere.lex.dk/6929'], 'date': '2021-01-20T13:23:20+01:00', 'license': 'fri anvendelse', 'text': 'Kullmanns Mølle er en mølle i Gudhjem, opkaldt efter Matts Kullmann, der byggede møllen i 1893 til sin søn, Christian Kullmann, se Gudhjem Mølle.' } ``` ### Data Fields The data fields are the same among all splits. - `url`: a `string` feature. - `title`: a `string` feature. - `clarification`: a `string` feature. - `authors`: a `list` feature. - `authors`: a `string` feature. - `license`: a `string` feature. - `text`: a `string` feature. ### Dataset Statistics There are 11,887 samples in the dataset. #### Article Length Distribution ![image/png](https://cdn-uploads.huggingface.co/production/uploads/60d368a613f774189902f555/6TU-GWs59AjhbndOWgvDE.png) ## Additional Information ### Dataset Curators [Dan Saattrup Nielsen](https://saattrupdan.github.io/) from the [The Alexandra Institute](https://alexandra.dk/) built the dataset and uploaded it to the Hugging Face Hub. ### Licensing Information The dataset is licensed under the [CC0 license](https://creativecommons.org/share-your-work/public-domain/cc0/).
2,662
[ [ -0.04632568359375, -0.0255889892578125, 0.0272216796875, 0.0095672607421875, -0.031646728515625, -0.00913238525390625, -0.01555633544921875, -0.0241546630859375, 0.037017822265625, 0.0285797119140625, -0.060089111328125, -0.0799560546875, -0.0242919921875, 0...
opus_dogc
2022-11-03T16:07:43.000Z
[ "task_categories:translation", "annotations_creators:no-annotation", "language_creators:expert-generated", "multilinguality:translation", "size_categories:1M<n<10M", "source_datasets:original", "language:ca", "language:es", "license:cc0-1.0", "region:us" ]
null
This is a collection of documents from the Official Journal of the Government of Catalonia, in Catalan and Spanish languages, provided by Antoni Oliver Gonzalez from the Universitat Oberta de Catalunya.
@inproceedings{tiedemann-2012-parallel, title = "Parallel Data, Tools and Interfaces in {OPUS}", author = {Tiedemann, J{\"o}rg}, booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)", month = may, year = "2012", address = "Istanbul, Turkey", publisher = "European Language Resources Association (ELRA)", url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf", pages = "2214--2218", abstract = "This paper presents the current status of OPUS, a growing language resource of parallel corpora and related tools. The focus in OPUS is to provide freely available data sets in various formats together with basic annotation to be useful for applications in computational linguistics, translation studies and cross-linguistic corpus studies. In this paper, we report about new data sets and their features, additional annotation tools and models provided from the website and essential interfaces and on-line services included in the project.", }
0
84
2022-03-02T23:29:22
--- annotations_creators: - no-annotation language_creators: - expert-generated language: - ca - es license: - cc0-1.0 multilinguality: - translation size_categories: - 1M<n<10M source_datasets: - original task_categories: - translation task_ids: [] paperswithcode_id: null pretty_name: OPUS DOGC dataset_info: features: - name: translation dtype: translation: languages: - ca - es config_name: tmx splits: - name: train num_bytes: 1258924464 num_examples: 4763575 download_size: 331724078 dataset_size: 1258924464 --- # Dataset Card for OPUS DOGC ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** http://opus.nlpl.eu/DOGC.php - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary OPUS DOGC is a collection of documents from the Official Journal of the Government of Catalonia, in Catalan and Spanish languages, provided by Antoni Oliver Gonzalez from the Universitat Oberta de Catalunya. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages Dataset is multilingual with parallel text in: - Catalan - Spanish ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields A data instance contains the following fields: - `ca`: the Catalan text - `es`: the aligned Spanish text ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information Dataset is in the Public Domain under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/). ### Citation Information ``` @inproceedings{tiedemann-2012-parallel, title = "Parallel Data, Tools and Interfaces in {OPUS}", author = {Tiedemann, J{\"o}rg}, booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)", month = may, year = "2012", address = "Istanbul, Turkey", publisher = "European Language Resources Association (ELRA)", url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf", pages = "2214--2218", abstract = "This paper presents the current status of OPUS, a growing language resource of parallel corpora and related tools. The focus in OPUS is to provide freely available data sets in various formats together with basic annotation to be useful for applications in computational linguistics, translation studies and cross-linguistic corpus studies. In this paper, we report about new data sets and their features, additional annotation tools and models provided from the website and essential interfaces and on-line services included in the project.", } ``` ### Contributions Thanks to [@albertvillanova](https://github.com/albertvillanova) for adding this dataset.
4,454
[ [ -0.04400634765625, -0.0237274169921875, 0.024017333984375, 0.036224365234375, -0.01934814453125, 0.000026106834411621094, -0.040283203125, -0.0190887451171875, 0.030548095703125, 0.035125732421875, -0.051544189453125, -0.08062744140625, -0.03985595703125, 0....
opus_elhuyar
2022-11-03T16:07:47.000Z
[ "task_categories:translation", "annotations_creators:found", "language_creators:found", "multilinguality:translation", "size_categories:100K<n<1M", "source_datasets:original", "language:es", "language:eu", "license:unknown", "region:us" ]
null
Dataset provided by the foundation Elhuyar, which is having data in languages Spanish to Basque.
@InProceedings{opus:Elhuyar, title = {Parallel Data, Tools and Interfaces in OPUS. In Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC 2012)}, authors={J. Tiedemann}, year={2012} }
0
84
2022-03-02T23:29:22
--- annotations_creators: - found language_creators: - found language: - es - eu license: - unknown multilinguality: - translation size_categories: - 100K<n<1M source_datasets: - original task_categories: - translation task_ids: [] paperswithcode_id: null pretty_name: OpusElhuyar dataset_info: features: - name: translation dtype: translation: languages: - es - eu config_name: es-eu splits: - name: train num_bytes: 127833939 num_examples: 642348 download_size: 44468751 dataset_size: 127833939 --- # Dataset Card for [opus_elhuyar] ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:**[Opus Elhuyar](http://opus.nlpl.eu/Elhuyar.php) - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Dataset provided by the foundation Elhuyar (http://webcorpusak.elhuyar.eus/sarrera_paraleloa.html) and submitted to OPUS by Joseba Garcia Beaumont ### Supported Tasks and Leaderboards The underlying task is machine translation from Spanish to Basque ### Languages Spanish to Basque ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information J. Tiedemann, 2012, Parallel Data, Tools and Interfaces in OPUS. In Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC 2012) ### Contributions Thanks to [@spatil6](https://github.com/spatil6) for adding this dataset.
3,319
[ [ -0.02923583984375, -0.041412353515625, 0.0020751953125, 0.0311737060546875, -0.0228729248046875, 0.0039520263671875, -0.036224365234375, -0.0252532958984375, 0.045074462890625, 0.043365478515625, -0.055267333984375, -0.07635498046875, -0.054901123046875, 0.0...
the_pile_openwebtext2
2023-02-24T13:58:16.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_categories:text-classification", "task_ids:language-modeling", "task_ids:masked-language-modeling", "task_ids:text-scoring", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:monolingual", "size_c...
null
OpenWebText2 is part of EleutherAi/The Pile dataset and is an enhanced version of the original OpenWebTextCorpus covering all Reddit submissions from 2005 up until April 2020, with further months becoming available after the corresponding PushShift dump files are released.
@article{pile, title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling}, author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor}, journal={arXiv preprint arXiv:2101.00027}, year={2020} }
10
84
2022-03-02T23:29:22
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - mit multilinguality: - monolingual pretty_name: OpenWebText2 size_categories: - 10M<n<100M source_datasets: - original task_categories: - text-generation - fill-mask - text-classification task_ids: - language-modeling - masked-language-modeling - text-scoring dataset_info: features: - name: title dtype: string - name: text dtype: string config_name: plain_text splits: - name: train num_bytes: 68571017395 num_examples: 17103059 download_size: 29344276480 dataset_size: 68571017395 --- # Dataset Card for the_pile_openwebtext2 ## Table of Contents - [Dataset Card for the_pile_openwebtext2](#dataset-card-for-the_pile_openwebtext2) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [|split|num examples|](#splitnum-examples) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Who are the source language producers?](#who-are-the-source-language-producers) - [Annotations](#annotations) - [Annotation process](#annotation-process) - [Who are the annotators?](#who-are-the-annotators) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://openwebtext2.readthedocs.io/en/latest/ - **Repository:** [GitHub](https://github.com/EleutherAI/openwebtext2) - **Paper:** https://arxiv.org/abs/2101.00027 - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary OpenWebText2 is part of EleutherAi/The Pile dataset and is an enhanced version of the original OpenWebTextCorpus covering all Reddit submissions from 2005 up until April 2020, with further months becoming available after the corresponding PushShift dump files are released. |download_size|27.3 Gib| |dataset_size|63.8 Gib| ### Supported Tasks and Leaderboards This dataset is used for Language Modeling. ### Languages This dataset is in English. ## Dataset Structure ### Data Instances ``` This example was too long and was cropped: {'title': Xiaomi Mi Note 10 Gearbest Coupon Promo Code [6+128GB] [France Warehouse], 'text': '27% off Xiaomi Mi Note 10 (CC9 Pro) 108MP Penta Camera Mobile Phone Global Version Online Smartphone – Black Gearbest Coupon Promo Code\n\nGearbest Coupon Price :$439.99\n\nRegular Price : $603.19 Your Save : $163.20 Coupon Limit: 100 times Warehouse: France Expires : September 30, 2020 Coupon Valid for...', 'reddit_scores': [6],} ``` ### Data Fields - `title`: title of the web page - `text`: text content of the web page - `reddit_scores`: scores of the reddit submissions that mention this web page, as a list of integers ### Data Splits |split|num examples| -------------------------------- |train|17103059| ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information ``` @article{pile, title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling}, author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor}, journal={arXiv preprint arXiv:2101.00027}, year={2020} } ``` ### Contributions [researcher2](https://github.com/researcher2) Wrote much of this code, with inspiration and some straight copying of the scraping code found [here](https://github.com/yet-another-account/openwebtext/).<br/> [sdtblck](https://github.com/sdtblck/) kindly put together the Colab notebook, and performed a chunk of the scraping. <br/> [leogao2](https://github.com/leogao2/) provided overall design guidance, lm_dataformat, and performed another chunk of scraping. <br /> [Colaboratory](https://colab.research.google.com/) VMs helped with about 10% of our overall scraping. <br /> [The Eye](http://the-eye.eu/) host the processed datasets.<br /> [Read The Docs](https://readthedocs.org/) host our documentation.<br /> [@richarddwang](https://github.com/richarddwang) added this dataset to HF/datasets.
5,784
[ [ -0.042449951171875, -0.0526123046875, 0.0018892288208007812, 0.0192413330078125, -0.0196533203125, -0.000015914440155029297, -0.0225830078125, -0.044830322265625, 0.0369873046875, 0.032684326171875, -0.03179931640625, -0.054931640625, -0.034820556640625, 0.0...
0n1xus/pytorrent-standalone
2021-12-02T06:13:15.000Z
[ "region:us" ]
0n1xus
pytorrent-standalone is a subset of the PyTorrent dataset, where only functions that does not depend on external libraries are kept.
@article{Bahrami2021, author = {Bahrami, Mehdi and Shrikanth, N. C. and Ruangwan, Shade and Liu, Lei and Mizobuchi, Yuji and Fukuyori, Masahiro and Chen, Wei-Peng and Munakata, Kazuki and Menzies, Tim}, year = {2021}, journal = {arXiv}, title = {PyTorrent: A Python Library Corpus for Large-scale Language Models} }
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
Check/region_1
2021-09-04T10:57:07.000Z
[ "region:us" ]
Check
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
Nexdata/multi_language_conversation
2023-08-31T03:08:14.000Z
[ "region:us" ]
Nexdata
null
null
5
84
2022-03-02T23:29:22
--- YAML tags: - copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging --- # Dataset Card for multi_language_conversation ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://nexdata.ai/?source=Huggingface - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary The dataset contains 12,000 hours of multi-language conversation speech data. It's recorded by native speakers, covering English, French, German, Russian, Spanish, Japanese, Korean, Hindi, Vietnamese etc. The speakers start the conversation around a familar topic, to ensure the smoothness and nature of the conversation. The format is 16kHz, 16bit, uncompressed wav, mono channel. The sentence accuracy is over 95%. For more details, please refer to the link: https://nexdata.ai/speechRecognition?source=Huggingface ### Supported Tasks and Leaderboards automatic-speech-recognition, audio-speaker-identification: The dataset can be used to train a model for Automatic Speech Recognition (ASR). ### Languages English, French, German, Russian, Spanish, Japanese, Korean, Hindi, Vietnamese etc. ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing ### Citation Information [More Information Needed] ### Contributions
3,438
[ [ -0.035491943359375, -0.058929443359375, -0.004459381103515625, 0.03936767578125, -0.01082611083984375, 0.01197052001953125, -0.0307159423828125, -0.037261962890625, 0.0430908203125, 0.043792724609375, -0.0509033203125, -0.06939697265625, -0.04364013671875, 0...
DeskDown/ALTDataset
2022-02-13T17:03:25.000Z
[ "region:us" ]
DeskDown
null
null
0
84
2022-03-02T23:29:22
# Asian Language Treebank (ALT) This is a **subset** of ALT dataset published by Riza et al. It included following low-resource languages: - fil - vi - id - ms - khm - th - hi - my It also includes ja and zh languages.
221
[ [ -0.0141754150390625, -0.03790283203125, -0.006145477294921875, -0.018402099609375, -0.0029125213623046875, 0.051910400390625, 0.0035152435302734375, -0.0306396484375, 0.023651123046875, 0.042816162109375, -0.053741455078125, -0.033233642578125, -0.00804901123046...
DeskDown/ALTDataset_en-to-fil-vi-id-ms-ja-khm
2022-01-03T22:31:36.000Z
[ "region:us" ]
DeskDown
null
null
0
84
2022-03-02T23:29:22
__Introduction__ The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese). In this dataset you can find parallel corpus of fil, vi, id, ms, ja, khm languages. Dataset is tokenized using mbart50-like tokenizer. (To be added soon) Tokens are padded\truncated at a size of 128.
940
[ [ -0.037078857421875, -0.029541015625, 0.012451171875, 0.0144500732421875, -0.0174713134765625, 0.021453857421875, -0.03564453125, -0.049957275390625, 0.012939453125, 0.04168701171875, -0.0186614990234375, -0.052001953125, -0.017333984375, 0.06512451171875, ...
DiFronzo/Human_Activity_Recognition
2022-02-08T11:18:07.000Z
[ "region:us" ]
DiFronzo
null
null
1
84
2022-03-02T23:29:22
Human Activity Recognition (HAR) using smartphones dataset. Classifying the type of movement amongst five categories: - WALKING, - WALKING_UPSTAIRS, - WALKING_DOWNSTAIRS, - SITTING, - STANDING The experiments have been carried out with a group of 16 volunteers within an age bracket of 19-26 years. Each person performed five activities (WALKING, WALKING_UPSTAIRS, WALKING_DOWNSTAIRS, SITTING, STANDING) wearing a smartphone (Samsung Galaxy S8) in the pucket. Using its embedded accelerometer and gyroscope, we captured 3-axial linear acceleration and 3-axial angular velocity at a constant rate of 50Hz. The experiments have been video-recorded to label the data manually. ```bash 'raw_data/labels.txt': include all the activity labels available for the dataset (1 per row). Column 1: experiment number ID, Column 2: user number ID, Column 3: activity number ID Column 4: Label start point (in number of signal log samples (recorded at 50Hz)) Column 5: Label end point (in number of signal log samples) activity_type: 1 WALKING 2 WALKING_UPSTAIRS 3 WALKING_DOWNSTAIRS 4 SITTING 5 STANDING ``` Repository: [DiFronzo/LSTM-for-Human-Activity-Recognition-classification](https://github.com/DiFronzo/LSTM-for-Human-Activity-Recognition-classification)
1,271
[ [ 0.0036296844482421875, -0.0308685302734375, 0.0070953369140625, -0.004344940185546875, -0.0303955078125, 0.0093231201171875, 0.04150390625, -0.03485107421875, 0.033782958984375, 0.022186279296875, -0.0313720703125, -0.048187255859375, -0.0168609619140625, -0...
DoyyingFace/github-embeddings-doy
2022-01-20T04:19:21.000Z
[ "region:us" ]
DoyyingFace
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
DrishtiSharma/as_opus100_processed
2022-02-09T17:36:03.000Z
[ "region:us" ]
DrishtiSharma
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
DrishtiSharma/bg_opus100_processed
2022-02-09T07:33:17.000Z
[ "region:us" ]
DrishtiSharma
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
DrishtiSharma/br_opus100_processed
2022-02-10T01:50:05.000Z
[ "region:us" ]
DrishtiSharma
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
DrishtiSharma/kk_opus100_processed
2022-02-09T08:23:36.000Z
[ "region:us" ]
DrishtiSharma
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
DrishtiSharma/sl_opus100_processed
2022-02-09T03:53:47.000Z
[ "region:us" ]
DrishtiSharma
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
DrishtiSharma/sr_opus100_processed
2022-02-08T17:32:38.000Z
[ "region:us" ]
DrishtiSharma
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Firoj/HumAID
2022-05-18T04:45:03.000Z
[ "region:us" ]
Firoj
The HumAID Twitter dataset consists of several thousands of manually annotated tweets that has been collected during 19 major natural disaster events including earthquakes, hurricanes, wildfires, and floods, which happened from 2016 to 2019 across different parts of the World. The annotations in the provided datasets consists of following humanitarian categories. The dataset consists only english tweets and it is the largest dataset for crisis informatics so far. ** Humanitarian categories ** - Caution and advice - Displaced people and evacuations - Dont know cant judge - Infrastructure and utility damage - Injured or dead people - Missing or found people - Not humanitarian - Other relevant information - Requests or urgent needs - Rescue volunteering or donation effort - Sympathy and support
@inproceedings{humaid2020, Author = {Firoj Alam, Umair Qazi, Muhammad Imran, Ferda Ofli}, booktitle={Proceedings of the Fifteenth International AAAI Conference on Web and Social Media}, series={ICWSM~'21}, Keywords = {Social Media, Crisis Computing, Tweet Text Classification, Disaster Response}, Title = {HumAID: Human-Annotated Disaster Incidents Data from Twitter}, Year = {2021}, publisher={AAAI}, address={Online}, }
1
84
2022-03-02T23:29:22
# Dataset Card for HumAID ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://crisisnlp.qcri.org/humaid_dataset - **Repository:** https://crisisnlp.qcri.org/data/humaid/humaid_data_all.zip - **Paper:** https://ojs.aaai.org/index.php/ICWSM/article/view/18116/17919 <!-- - **Leaderboard:** [Needs More Information] --> <!-- - **Point of Contact:** [Needs More Information] --> ### Dataset Summary The HumAID Twitter dataset consists of several thousands of manually annotated tweets that has been collected during 19 major natural disaster events including earthquakes, hurricanes, wildfires, and floods, which happened from 2016 to 2019 across different parts of the World. The annotations in the provided datasets consists of following humanitarian categories. The dataset consists only english tweets and it is the largest dataset for crisis informatics so far. ** Humanitarian categories ** - Caution and advice - Displaced people and evacuations - Dont know cant judge - Infrastructure and utility damage - Injured or dead people - Missing or found people - Not humanitarian - Other relevant information - Requests or urgent needs - Rescue volunteering or donation effort - Sympathy and support The resulting annotated dataset consists of 11 labels. ### Supported Tasks and Benchmark The dataset can be used to train a model for multiclass tweet classification for disaster response. The benchmark results can be found in https://ojs.aaai.org/index.php/ICWSM/article/view/18116/17919. Dataset is also released with event-wise and JSON objects for further research. Full set of the dataset can be found in https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/A7NVF7 ### Languages English ## Dataset Structure ### Data Instances ``` { "tweet_text": "@RT_com: URGENT: Death toll in #Ecuador #quake rises to 233 \u2013 President #Correa #1 in #Pakistan", "class_label": "injured_or_dead_people" } ``` ### Data Fields * tweet_text: corresponds to the tweet text. * class_label: corresponds to a label assigned to a given tweet text ### Data Splits * Train * Development * Test ## Dataset Creation <!-- ### Curation Rationale --> ### Source Data #### Initial Data Collection and Normalization Tweets has been collected during several disaster events. ### Annotations #### Annotation process AMT has been used to annotate the dataset. Please check the paper for a more detail. #### Who are the annotators? - crowdsourced <!-- ## Considerations for Using the Data --> <!-- ### Social Impact of Dataset --> <!-- ### Discussion of Biases --> <!-- [Needs More Information] --> <!-- ### Other Known Limitations --> <!-- [Needs More Information] --> ## Additional Information ### Dataset Curators Authors of the paper. ### Licensing Information - cc-by-nc-4.0 ### Citation Information ``` @inproceedings{humaid2020, Author = {Firoj Alam, Umair Qazi, Muhammad Imran, Ferda Ofli}, booktitle={Proceedings of the Fifteenth International AAAI Conference on Web and Social Media}, series={ICWSM~'21}, Keywords = {Social Media, Crisis Computing, Tweet Text Classification, Disaster Response}, Title = {HumAID: Human-Annotated Disaster Incidents Data from Twitter}, Year = {2021}, publisher={AAAI}, address={Online}, } ```
4,276
[ [ -0.026580810546875, -0.041107177734375, -0.00336456298828125, 0.047760009765625, -0.0188446044921875, 0.038055419921875, -0.005352020263671875, -0.0288543701171875, 0.0189971923828125, 0.01096343994140625, -0.044952392578125, -0.06353759765625, -0.043212890625, ...
GEM/RiSAWOZ
2022-10-24T15:30:01.000Z
[ "task_categories:conversational", "annotations_creators:crowd-sourced", "language_creators:unknown", "multilinguality:unknown", "size_categories:unknown", "source_datasets:original", "language:zh", "license:cc-by-4.0", "dialog-response-generation", "region:us" ]
GEM
RiSAWOZ contains 11.2K human-to-human (H2H) multiturn semantically annotated dialogues, with more than 150K utterances spanning over 12 domains, which is larger than all previous annotated H2H conversational datasets.Both single- and multi-domain dialogues are constructed, accounting for 65% and 35%, respectively.
@inproceedings{quan-etal-2020-risawoz, title = "{R}i{SAWOZ}: A Large-Scale Multi-Domain {W}izard-of-{O}z Dataset with Rich Semantic Annotations for Task-Oriented Dialogue Modeling", author = "Quan, Jun and Zhang, Shian and Cao, Qian and Li, Zizhong and Xiong, Deyi", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.67", pages = "930--940", }
5
84
2022-03-02T23:29:22
--- annotations_creators: - crowd-sourced language_creators: - unknown language: - zh license: - cc-by-4.0 multilinguality: - unknown size_categories: - unknown source_datasets: - original task_categories: - conversational task_ids: [] pretty_name: RiSAWOZ tags: - dialog-response-generation --- # Dataset Card for GEM/RiSAWOZ ## Dataset Description - **Homepage:** https://terryqj0107.github.io/RiSAWOZ_webpage - **Repository:** https://github.com/terryqj0107/RiSAWOZ - **Paper:** https://aclanthology.org/2020.emnlp-main.67 - **Leaderboard:** N/A - **Point of Contact:** Deyi Xiong ### Link to Main Data Card You can find the main data card on the [GEM Website](https://gem-benchmark.com/data_cards/RiSAWOZ). ### Dataset Summary RiSAWOZ is a Chinese dialog dataset. It can be used to study various dialogue tasks, such as Dialogue State Tracking, Dialogue Context-to-Text Generation, Coreference Resolution and Unified Generative Ellipsis and Coreference Resolution. You can load the dataset via: ``` import datasets data = datasets.load_dataset('GEM/RiSAWOZ') ``` The data loader can be found [here](https://huggingface.co/datasets/GEM/RiSAWOZ). #### website [Website](https://terryqj0107.github.io/RiSAWOZ_webpage) #### paper [ACL Anthology](https://aclanthology.org/2020.emnlp-main.67) #### authors Jun Quan (Soochow University, Suzhou, China), Shian Zhang (Soochow University, Suzhou, China), Qian Cao(Soochow University, Suzhou, China), Zizhong Li (Tianjin University, Tianjin, China), Deyi Xiong (Tianjin University, Tianjin, China) ## Dataset Overview ### Where to find the Data and its Documentation #### Webpage <!-- info: What is the webpage for the dataset (if it exists)? --> <!-- scope: telescope --> [Website](https://terryqj0107.github.io/RiSAWOZ_webpage) #### Download <!-- info: What is the link to where the original dataset is hosted? --> <!-- scope: telescope --> [Github](https://github.com/terryqj0107/RiSAWOZ) #### Paper <!-- info: What is the link to the paper describing the dataset (open access preferred)? --> <!-- scope: telescope --> [ACL Anthology](https://aclanthology.org/2020.emnlp-main.67) #### BibTex <!-- info: Provide the BibTex-formatted reference for the dataset. Please use the correct published version (ACL anthology, etc.) instead of google scholar created Bibtex. --> <!-- scope: microscope --> ``` @inproceedings{quan-etal-2020-risawoz, title = "{R}i{SAWOZ}: A Large-Scale Multi-Domain {W}izard-of-{O}z Dataset with Rich Semantic Annotations for Task-Oriented Dialogue Modeling", author = "Quan, Jun and Zhang, Shian and Cao, Qian and Li, Zizhong and Xiong, Deyi", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.67", pages = "930--940", } ``` #### Contact Name <!-- quick --> <!-- info: If known, provide the name of at least one person the reader can contact for questions about the dataset. --> <!-- scope: periscope --> Deyi Xiong #### Contact Email <!-- info: If known, provide the email of at least one person the reader can contact for questions about the dataset. --> <!-- scope: periscope --> dyxiong@tju.edu.cn #### Has a Leaderboard? <!-- info: Does the dataset have an active leaderboard? --> <!-- scope: telescope --> no ### Languages and Intended Use #### Multilingual? <!-- quick --> <!-- info: Is the dataset multilingual? --> <!-- scope: telescope --> no #### Covered Dialects <!-- info: What dialects are covered? Are there multiple dialects per language? --> <!-- scope: periscope --> Only Mandarin Chinese is covered in this dataset. #### Covered Languages <!-- quick --> <!-- info: What languages/dialects are covered in the dataset? --> <!-- scope: telescope --> `Mandarin Chinese` #### License <!-- quick --> <!-- info: What is the license of the dataset? --> <!-- scope: telescope --> cc-by-4.0: Creative Commons Attribution 4.0 International #### Intended Use <!-- info: What is the intended use of the dataset? --> <!-- scope: microscope --> RiSAWOZ can be used to support the study under various dialogue tasks, such as Natural Language Understanding, Dialogue State Tracking, Dialogue Context-to-Text Generation, Coreference Resolution and Unified Generative Ellipsis and Coreference Resolution. #### Primary Task <!-- info: What primary task does the dataset support? --> <!-- scope: telescope --> Dialog Response Generation #### Communicative Goal <!-- quick --> <!-- info: Provide a short description of the communicative goal of a model trained for this task on this dataset. --> <!-- scope: periscope --> Generate system response given dialogue context across multiple domains. ### Credit #### Curation Organization Type(s) <!-- info: In what kind of organization did the dataset curation happen? --> <!-- scope: telescope --> `academic` #### Curation Organization(s) <!-- info: Name the organization(s). --> <!-- scope: periscope --> Soochow University and Tianjin University #### Dataset Creators <!-- info: Who created the original dataset? List the people involved in collecting the dataset and their affiliation(s). --> <!-- scope: microscope --> Jun Quan (Soochow University, Suzhou, China), Shian Zhang (Soochow University, Suzhou, China), Qian Cao(Soochow University, Suzhou, China), Zizhong Li (Tianjin University, Tianjin, China), Deyi Xiong (Tianjin University, Tianjin, China) #### Funding <!-- info: Who funded the data creation? --> <!-- scope: microscope --> the National Key Research and Development Project #### Who added the Dataset to GEM? <!-- info: Who contributed to the data card and adding the dataset to GEM? List the people+affiliations involved in creating this data card and who helped integrate this dataset into GEM. --> <!-- scope: microscope --> Tianhao Shen (Tianjin University, Tianjin, China), Chaobin You (Tianjin University, Tianjin, China), Deyi Xiong (Tianjin University, Tianjin, China) ### Dataset Structure #### Data Fields <!-- info: List and describe the fields present in the dataset. --> <!-- scope: telescope --> - gem_id (string): GEM-RiSAWOZ-{split}-{id} - dialogue_id (string): dialogue ID - goal (string): natural language descriptions of the user goal - domains (list of strings): domains mentioned in current dialogue session - dialogue (list of dicts): dialog turns and corresponding annotations. Each turn includes: - turn_id (int): turn ID - turn_domain (list of strings): domain mentioned in current turn - user_utterance (string): user utterance - system_utterance (string): system utterance - belief_state (dict): dialogue state, including: - inform slot-values (dict): the slots and corresponding values informed until current turn - turn_inform (dict): the slots and corresponding values informed in current turn - turn request (dict): the slots requested in current turn - user_actions (list of lists): user dialogue acts in current turn - user_actions (list of lists): system dialogue acts in current turn - db_results (list of strings): database search results - segmented_user_utterance (string): word segmentation result of user utterance - segmented_system_utterance (string): word segmentation result of system utterance #### Example Instance <!-- info: Provide a JSON formatted example of a typical instance in the dataset. --> <!-- scope: periscope --> ``` [ { "dialogue_id": "attraction_goal_4-63###6177", "goal": "attraction_goal_4-63: 你是苏州人,但不怎么出去玩。你朋友来苏州找你,你准备带他逛逛“水乡古镇”,你希望客服给你推荐个消费水平“中等”的地方。然后你要问清楚这地方“是否地铁直达”、“特点”、“门票价格”这些信息。最后,你要感谢客服的帮助,然后说再见。", "domains": [ "旅游景点" ], "dialogue": [ { "turn_id": 0, "turn_domain": [ "旅游景点" ], "user_utterance": "你好,我是苏州人,但是不怎么出去玩,我朋友来苏州找我了,我准备带他逛逛水乡古镇,你能帮我推荐一下吗?", "system_utterance": "推荐您去周庄古镇。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "水乡 古镇" }, "turn_inform": { "旅游景点-景点类型": "水乡 古镇" }, "turn request": [] }, "user_actions": [ [ "Inform", "旅游景点", "景点类型", "水乡 古镇" ], [ "Greeting", "旅游景点", "", "" ] ], "system_actions": [ [ "Recommend", "旅游景点", "名称", "周庄 古镇" ] ], "db_results": [ "数据库检索结果:成功匹配个数为8", "{'名称': '周庄古镇', '区域': '昆山', '景点类型': '水乡古镇', '最适合人群': '朋友出游', '消费': '偏贵', '是否地铁直达': '否', '门票价格': '100元', '电话号码': '400-8282900', '地址': '苏州市昆山市周庄镇全福路43号', '评分': 4.5, '开放时间': '07:30-18:50', '特点': '小桥流水与人家,双桥水巷摇橹船,还有沈万三的足迹待你寻访'}", "{'名称': '同里古镇', '区域': '吴江', '景点类型': '水乡古镇', '最适合人群': '朋友出游', '消费': '偏贵', '是否地铁直达': '否', '门票价格': '100元', '电话号码': '0512-63325728', '地址': '苏州市吴江区同里古镇', '评分': 4.5, '开放时间': '07:30-17:30', '特点': '五湖环抱的江南水乡古镇,走三桥,参观小巧精致的园林退思园。'}", "{'名称': '山塘街', '区域': '姑苏区', '景点类型': '水乡古镇', '最适合人群': '情侣约会', '消费': '中等', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '0512-65314467', '地址': '苏州市姑苏区古城西北', '评分': 4.5, '开放时间': '08:30-21:00', '特点': '欣赏苏州老城风貌,参观会馆等古迹,乘游船,拍摄美丽的夜景。'}", "{'名称': '平江路', '区域': '姑苏区', '景点类型': '水乡古镇', '最适合人群': '情侣约会', '消费': '中等', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '0512-69163907', '地址': '江苏省苏州市姑苏区白塔东路65号', '评分': 4.5, '开放时间': '全天', '特点': '漫步保存完整的古街区,看小桥流水古宅,逛文艺小店,吃美食。'}", "{'名称': '木渎古镇', '区域': '吴中区', '景点类型': '水乡古镇', '最适合人群': '朋友出游', '消费': '便宜', '是否地铁直达': '否', '门票价格': '免费', '电话号码': '0512-66514042', '地址': '苏州市吴中区木渎镇山塘街188号', '评分': 4.4, '开放时间': '08:30-17:00', '特点': '以园林为特色的古镇,游严家花园等古典园林,坐船看小桥流水。'}", "{'名称': '甪直古镇', '区域': '吴中区', '景点类型': '水乡古镇', '最适合人群': '朋友出游', '消费': '便宜', '是否地铁直达': '否', '门票价格': '免费', '电话号码': '0512-66191668', '地址': '苏州市吴中区甪直镇晓市路21号', '评分': 4.3, '开放时间': '07:30-17:30', '特点': '甪直古镇有2500多年历史,甪直境内水流纵横,桥梁密布,有五湖之厅、六泽之冲之称。'}", "{'名称': '千灯古镇', '区域': '昆山', '景点类型': '水乡古镇', '最适合人群': '朋友出游', '消费': '便宜', '是否地铁直达': '否', '门票价格': '免费', '电话号码': '0512-57472155', '地址': '苏州市昆山市千灯古镇尚书路1号', '评分': 4.3, '开放时间': '08:00-17:00', '特点': '千灯古镇,距今已有2500多年的历史,古镇白墙黑瓦,昆韵盎然。'}", "{'名称': '锦溪古镇', '区域': '昆山', '景点类型': '水乡古镇', '最适合人群': '朋友出游', '消费': '中等', '是否地铁直达': '否', '门票价格': '65元', '电话号码': '0512-57224669', '地址': '苏州市昆山市锦溪镇邵甸港路18号', '评分': 4.4, '开放时间': '08:00-17:00', '特点': '锦溪古镇位于昆山南郊的淀山湖畔,是一座有千年历史的江南水乡。'}" ], "segmented_user_utterance": "你好 , 我 是 苏州人 , 但是 不怎么 出去玩 , 我 朋友 来 苏州 找 我 了 , 我 准备 带 他 逛逛 水乡 古镇 , 你 能 帮 我 推荐 一下 吗 ?", "segmented_system_utterance": "推荐 您 去 周庄 古镇 。" }, { "turn_id": 1, "turn_domain": [ "旅游景点" ], "user_utterance": "我希望这个地方消费水平适中的,这个是中等的吗?", "system_utterance": "这个不是,我又重新帮您找了一家山塘街。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "水乡 古镇", "旅游景点-消费": "中等" }, "turn_inform": { "旅游景点-消费": "中等" }, "turn request": [] }, "user_actions": [ [ "Inform", "旅游景点", "消费", "中等" ] ], "system_actions": [ [ "Recommend", "旅游景点", "名称", "山塘 街" ] ], "db_results": [ "数据库检索结果:成功匹配个数为3", "{'名称': '山塘街', '区域': '姑苏区', '景点类型': '水乡古镇', '最适合人群': '情侣约会', '消费': '中等', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '0512-65314467', '地址': '苏州市姑苏区古城西北', '评分': 4.5, '开放时间': '08:30-21:00', '特点': '欣赏苏州老城风貌,参观会馆等古迹,乘游船,拍摄美丽的夜景。'}", "{'名称': '平江路', '区域': '姑苏区', '景点类型': '水乡古镇', '最适合人群': '情侣约会', '消费': '中等', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '0512-69163907', '地址': '江苏省苏州市姑苏区白塔东路65号', '评分': 4.5, '开放时间': '全天', '特点': '漫步保存完整的古街区,看小桥流水古宅,逛文艺小店,吃美食。'}", "{'名称': '锦溪古镇', '区域': '昆山', '景点类型': '水乡古镇', '最适合人群': '朋友出游', '消费': '中等', '是否地铁直达': '否', '门票价格': '65元', '电话号码': '0512-57224669', '地址': '苏州市昆山市锦溪镇邵甸港路18号', '评分': 4.4, '开放时间': '08:00-17:00', '特点': '锦溪古镇位于昆山南郊的淀山湖畔,是一座有千年历史的江南水乡。'}" ], "segmented_user_utterance": "我 希望 这个 地方 消费水平 适中 的 , 这个 是 中等 的 吗 ?", "segmented_system_utterance": "这个 不是 , 我 又 重新 帮 您 找 了 一家 山塘 街 。" }, { "turn_id": 2, "turn_domain": [ "旅游景点" ], "user_utterance": "听这地方应该挺好玩,他们家特点是什么?", "system_utterance": "特点是欣赏苏州老城风貌,参观会馆等古迹,乘游船,拍摄美丽的夜景。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "水乡 古镇", "旅游景点-消费": "中等", "旅游景点-名称": "山塘 街" }, "turn_inform": { "旅游景点-名称": "山塘 街" }, "turn request": [ "特点" ] }, "user_actions": [ [ "Inform", "旅游景点", "名称", "山塘 街" ], [ "Request", "旅游景点", "特点", "" ] ], "system_actions": [ [ "Inform", "旅游景点", "特点", "欣赏 苏州 老城 风貌 , 参观 会馆 等 古迹 , 乘 游船 , 拍摄 美丽 的 夜景" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '山塘街', '区域': '姑苏区', '景点类型': '水乡古镇', '最适合人群': '情侣约会', '消费': '中等', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '0512-65314467', '地址': '苏州市姑苏区古城西北', '评分': 4.5, '开放时间': '08:30-21:00', '特点': '欣赏苏州老城风貌,参观会馆等古迹,乘游船,拍摄美丽的夜景。'}" ], "segmented_user_utterance": "听 这 地方 应该 挺好玩 , 他们 家 特点 是 什么 ?", "segmented_system_utterance": "特点 是 欣赏 苏州 老城 风貌 , 参观 会馆 等 古迹 , 乘 游船 , 拍摄 美丽 的 夜景 。" }, { "turn_id": 3, "turn_domain": [ "旅游景点" ], "user_utterance": "听起来很不错,他们家门票是多少?", "system_utterance": "门票免费。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "水乡 古镇", "旅游景点-消费": "中等", "旅游景点-名称": "山塘 街" }, "turn_inform": {}, "turn request": [ "门票价格" ] }, "user_actions": [ [ "Request", "旅游景点", "门票价格", "" ] ], "system_actions": [ [ "Inform", "旅游景点", "门票价格", "免费" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '山塘街', '区域': '姑苏区', '景点类型': '水乡古镇', '最适合人群': '情侣约会', '消费': '中等', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '0512-65314467', '地址': '苏州市姑苏区古城西北', '评分': 4.5, '开放时间': '08:30-21:00', '特点': '欣赏苏州老城风貌,参观会馆等古迹,乘游船,拍摄美丽的夜景。'}" ], "segmented_user_utterance": "听 起来 很 不错 , 他们 家 门票 是 多少 ?", "segmented_system_utterance": "门票 免费 。" }, { "turn_id": 4, "turn_domain": [ "旅游景点" ], "user_utterance": "还行,那他们家是否有地铁直达呢?", "system_utterance": "嗯嗯,可以地铁直达。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "水乡 古镇", "旅游景点-消费": "中等", "旅游景点-名称": "山塘 街" }, "turn_inform": {}, "turn request": [ "是否地铁直达" ] }, "user_actions": [ [ "Request", "旅游景点", "是否地铁直达", "" ] ], "system_actions": [ [ "Inform", "旅游景点", "是否地铁直达", "是" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '山塘街', '区域': '姑苏区', '景点类型': '水乡古镇', '最适合人群': '情侣约会', '消费': '中等', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '0512-65314467', '地址': '苏州市姑苏区古城西北', '评分': 4.5, '开放时间': '08:30-21:00', '特点': '欣赏苏州老城风貌,参观会馆等古迹,乘游船,拍摄美丽的夜景。'}" ], "segmented_user_utterance": "还 行 , 那 他们 家 是否 有 地铁 直达 呢 ?", "segmented_system_utterance": "嗯 嗯 , 可以 地铁 直达 。" }, { "turn_id": 5, "turn_domain": [ "通用" ], "user_utterance": "谢谢你的帮助,再见!", "system_utterance": "再见。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "水乡 古镇", "旅游景点-消费": "中等", "旅游景点-名称": "山塘 街" }, "turn_inform": {}, "turn request": [] }, "user_actions": [ [ "Bye", "通用", "", "" ] ], "system_actions": [ [ "Bye", "通用", "", "" ] ], "db_results": [], "segmented_user_utterance": "谢谢 你 的 帮助 , 再见 !", "segmented_system_utterance": "再见 。" } ] }, { "dialogue_id": "attraction_goal_3-33_v2###7307", "goal": "attraction_goal_3-33: 你和朋友一起来苏州出差,顺便想去“苏州乐园森林水世界”逛一逛。因此,你要询问客服这个地方“是否地铁直达”、“评分”、“特点”这些信息。最后,你向客服表示感谢,结束对话。", "domains": [ "旅游景点" ], "dialogue": [ { "turn_id": 0, "turn_domain": [ "旅游景点" ], "user_utterance": "你好,我和朋友一起来苏州出差,顺便想去苏州乐园森林水世界玩玩,能否帮我看看这个地方是否有地铁直达呢?", "system_utterance": "稍等,很抱歉,没有地铁能直接到这个景点呢。", "belief_state": { "inform slot-values": { "旅游景点-名称": "苏州 乐园 森林 水 世界" }, "turn_inform": { "旅游景点-名称": "苏州 乐园 森林 水 世界" }, "turn request": [ "是否地铁直达" ] }, "user_actions": [ [ "Inform", "旅游景点", "名称", "苏州 乐园 森林 水 世界" ], [ "Request", "旅游景点", "是否地铁直达", "" ], [ "Greeting", "旅游景点", "", "" ] ], "system_actions": [ [ "Inform", "旅游景点", "是否地铁直达", "否" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '苏州乐园森林水世界', '区域': '高新区', '景点类型': '游乐场', '最适合人群': '情侣约会', '消费': '偏贵', '是否地铁直达': '否', '门票价格': '198元', '电话号码': '0512-68717107', '地址': '苏州市高新区阳山东路以西,山神湾路以东', '评分': 4.4, '开放时间': '09:00-17:00', '特点': '各项水上娱乐项目皆具森林特色,被赋予森林主题,让人身处浩瀚森林又能享受到海洋般的舒爽水游乐。'}" ], "segmented_user_utterance": "你好 , 我 和 朋友 一 起来 苏州 出差 , 顺便 想 去 苏州 乐园 森林 水 世界 玩玩 , 能否 帮 我 看看 这个 地方 是否 有 地铁 直达 呢 ?", "segmented_system_utterance": "稍 等 , 很 抱歉 , 没有 地铁 能 直接 到 这个 景点 呢 。" }, { "turn_id": 1, "turn_domain": [ "旅游景点" ], "user_utterance": "你再帮我看看景点的评分好吗?", "system_utterance": "这个景点的评分是4.4分。", "belief_state": { "inform slot-values": { "旅游景点-名称": "苏州 乐园 森林 水 世界" }, "turn_inform": {}, "turn request": [ "评分" ] }, "user_actions": [ [ "Request", "旅游景点", "评分", "" ] ], "system_actions": [ [ "Inform", "旅游景点", "评分", "4.4" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '苏州乐园森林水世界', '区域': '高新区', '景点类型': '游乐场', '最适合人群': '情侣约会', '消费': '偏贵', '是否地铁直达': '否', '门票价格': '198元', '电话号码': '0512-68717107', '地址': '苏州市高新区阳山东路以西,山神湾路以东', '评分': 4.4, '开放时间': '09:00-17:00', '特点': '各项水上娱乐项目皆具森林特色,被赋予森林主题,让人身处浩瀚森林又能享受到海洋般的舒爽水游乐。'}" ], "segmented_user_utterance": "你 再 帮 我 看看 景点 的 评分 好 吗 ?", "segmented_system_utterance": "这个 景点 的 评分 是 4.4 分 。" }, { "turn_id": 2, "turn_domain": [ "旅游景点" ], "user_utterance": "这个景点有啥特点呢?", "system_utterance": "这个景点的特点是各项水上娱乐项目皆具森林特色,被赋予森林主题,让人身处浩瀚森林又能享受到海洋般的舒爽水游乐。", "belief_state": { "inform slot-values": { "旅游景点-名称": "苏州 乐园 森林 水 世界" }, "turn_inform": {}, "turn request": [ "特点" ] }, "user_actions": [ [ "Request", "旅游景点", "特点", "" ] ], "system_actions": [ [ "Inform", "旅游景点", "特点", "各项 水上 娱乐 项目 皆 具 森林 特色 , 被 赋予 森林 主题 , 让 人 身处 浩瀚 森林 又 能 享受 到 海洋 般的 舒爽 水 游乐 。" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '苏州乐园森林水世界', '区域': '高新区', '景点类型': '游乐场', '最适合人群': '情侣约会', '消费': '偏贵', '是否地铁直达': '否', '门票价格': '198元', '电话号码': '0512-68717107', '地址': '苏州市高新区阳山东路以西,山神湾路以东', '评分': 4.4, '开放时间': '09:00-17:00', '特点': '各项水上娱乐项目皆具森林特色,被赋予森林主题,让人身处浩瀚森林又能享受到海洋般的舒爽水游乐。'}" ], "segmented_user_utterance": "这个 景点 有 啥 特点 呢 ?", "segmented_system_utterance": "这个 景点 的 特点 是 各项 水上 娱乐 项目 皆 具 森林 特色 , 被 赋予 森林 主题 , 让 人 身处 浩瀚 森林 又 能 享受 到 海洋 般的 舒爽 水 游乐 。" }, { "turn_id": 3, "turn_domain": [ "通用" ], "user_utterance": "太好了,我觉得我会很喜欢这个地方,哈哈。", "system_utterance": "这个景点挺不错的,就是没有地铁需要您乘坐其它交通工具了。", "belief_state": { "inform slot-values": { "旅游景点-名称": "苏州 乐园 森林 水 世界" }, "turn_inform": {}, "turn request": [] }, "user_actions": [ [ "General", "通用", "", "" ] ], "system_actions": [ [ "General", "通用", "", "" ] ], "db_results": [], "segmented_user_utterance": "太好了 , 我 觉得 我会 很 喜欢 这个 地方 , 哈哈 。", "segmented_system_utterance": "这个 景点 挺不错 的 , 就是 没有 地铁 需要 您 乘坐 其它 交通工具 了 。" }, { "turn_id": 4, "turn_domain": [ "通用" ], "user_utterance": "我打出租车去吧,就是多花点车费,没事。", "system_utterance": "请问您还有其他问题吗?", "belief_state": { "inform slot-values": { "旅游景点-名称": "苏州 乐园 森林 水 世界" }, "turn_inform": {}, "turn request": [] }, "user_actions": [ [ "General", "通用", "", "" ] ], "system_actions": [ [ "General", "通用", "", "" ] ], "db_results": [], "segmented_user_utterance": "我 打 出租车 去 吧 , 就是 多花点 车费 , 没事 。", "segmented_system_utterance": "请问 您 还有 其他 问题 吗 ?" }, { "turn_id": 5, "turn_domain": [ "通用" ], "user_utterance": "没有了。谢谢你,再见。", "system_utterance": "再见。", "belief_state": { "inform slot-values": { "旅游景点-名称": "苏州 乐园 森林 水 世界" }, "turn_inform": {}, "turn request": [] }, "user_actions": [ [ "Bye", "通用", "", "" ] ], "system_actions": [ [ "Bye", "通用", "", "" ] ], "db_results": [], "segmented_user_utterance": "没有 了 。 谢谢 你 , 再见 。", "segmented_system_utterance": "再见 。" } ] } ] ``` #### Data Splits <!-- info: Describe and name the splits in the dataset if there are more than one. --> <!-- scope: periscope --> - Train: - dialogues: 10000 - turns: 134580 - Avg. turns: 13.5 - Valid: - dialogues: 600 - turns: 8116 - Avg. turns: 13.5 - Test: - dialogues: 600 - turn: 9286 - Avg. turns: 15.5 #### <!-- info: What does an outlier of the dataset in terms of length/perplexity/embedding look like? --> <!-- scope: microscope --> the most complex example (i.e. with the most domains): ``` { "dialogue_id": "attraction_restaurant_hotel_goal_2-69_v2###8355", "goal": "attraction_restaurant_hotel_goal_2-69: 公司派你去苏州出差,你准备抽空旅游一下。第一,你想去一个“山水景区”,消费水平最好是“偏贵”的,你需要问清楚这地方“开放时间”、“门票价格”这些信息。第二,你要找个地方住,让客服给你推荐一家价位“中等”的“商务出行”酒店,并且询问这家酒店的“停车场”、“电话号码”这些信息。第三,你还想拔草美食,问客服有没有价位“中等”的“江浙菜”餐厅推荐,然后了解这家餐厅的“是否地铁直达”、“人均消费”这些信息。最后你要感谢客服的帮助,并说再见。", "domains": [ "旅游景点", "餐厅", "酒店" ], "dialogue": [ { "turn_id": 0, "turn_domain": [ "旅游景点" ], "user_utterance": "你好,来苏州出差,准备抽空旅游一下。我想去山水景区,消费水平偏贵点,帮忙推荐一个。", "system_utterance": "你好,向您推荐金鸡湖景区,感受一下苏州现代化的一面。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵" }, "turn_inform": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵" }, "turn request": [] }, "user_actions": [ [ "Inform", "旅游景点", "景点类型", "山水 景区" ], [ "Inform", "旅游景点", "消费", "偏贵" ], [ "Greeting", "旅游景点", "", "" ], [ "General", "旅游景点", "", "" ] ], "system_actions": [ [ "Greeting", "通用", "", "" ], [ "General", "通用", "", "" ], [ "Recommend", "旅游景点", "名称", "金鸡湖 景区" ] ], "db_results": [ "数据库检索结果:成功匹配个数为2", "{'名称': '金鸡湖景区', '区域': '工业园区', '景点类型': '山水景区', '最适合人群': '情侣约会', '消费': '偏贵', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '400-7558558', '地址': '苏州市工业园区星港街158号', '评分': 4.5, '开放时间': '全天', '特点': '看东方之门等高楼,坐摩天轮,乘船夜游,感受苏州现代化的一面。'}", "{'名称': '沙家浜风景区', '区域': '常熟', '景点类型': '山水景区', '最适合人群': '家庭亲子', '消费': '偏贵', '是否地铁直达': '否', '门票价格': '110元', '电话号码': '0512-52500000', '地址': '苏州市常熟市沙家浜镇', '评分': 4.5, '开放时间': '08:00-16:30', '特点': '京剧样板戏《沙家浜》的发源地,访剧中场景,坐船深入芦苇荡。'}" ], "segmented_user_utterance": "你好 , 来 苏州 出差 , 准备 抽空 旅游 一下 。 我 想 去 山水 景区 , 消费水平 偏 贵点 , 帮忙 推荐 一个 。", "segmented_system_utterance": "你好 , 向 您 推荐 金鸡湖 景区 , 感受一下 苏州 现代化 的 一面 。" }, { "turn_id": 1, "turn_domain": [ "旅游景点" ], "user_utterance": "这里开放时间?需要安排一下。", "system_utterance": "景区是全天开放的,可以尽情游览!", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区" }, "turn_inform": { "旅游景点-名称": "金鸡湖 景区" }, "turn request": [ "开放时间" ] }, "user_actions": [ [ "Inform", "旅游景点", "名称", "金鸡湖 景区" ], [ "Request", "旅游景点", "开放时间", "" ], [ "General", "旅游景点", "", "" ] ], "system_actions": [ [ "Inform", "旅游景点", "开放时间", "全天" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '金鸡湖景区', '区域': '工业园区', '景点类型': '山水景区', '最适合人群': '情侣约会', '消费': '偏贵', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '400-7558558', '地址': '苏州市工业园区星港街158号', '评分': 4.5, '开放时间': '全天', '特点': '看东方之门等高楼,坐摩天轮,乘船夜游,感受苏州现代化的一面。'}" ], "segmented_user_utterance": "这里 开放 时间 ? 需要 安排 一下 。", "segmented_system_utterance": "景区 是 全天 开放 的 , 可以 尽情 游览 !" }, { "turn_id": 2, "turn_domain": [ "旅游景点" ], "user_utterance": "门票多少钱?", "system_utterance": "这里是免费景区。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区" }, "turn_inform": {}, "turn request": [ "门票价格" ] }, "user_actions": [ [ "Request", "旅游景点", "门票价格", "" ] ], "system_actions": [ [ "Inform", "旅游景点", "门票价格", "免费" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '金鸡湖景区', '区域': '工业园区', '景点类型': '山水景区', '最适合人群': '情侣约会', '消费': '偏贵', '是否地铁直达': '是', '门票价格': '免费', '电话号码': '400-7558558', '地址': '苏州市工业园区星港街158号', '评分': 4.5, '开放时间': '全天', '特点': '看东方之门等高楼,坐摩天轮,乘船夜游,感受苏州现代化的一面。'}" ], "segmented_user_utterance": "门票 多少 钱 ?", "segmented_system_utterance": "这里 是 免费 景区 。" }, { "turn_id": 3, "turn_domain": [ "酒店" ], "user_utterance": "哦,好的。还有酒店,帮我推荐个价位中等的商务酒店!", "system_utterance": "推荐苏州金鸡湖李公堤瑞贝庭公寓酒店,在金鸡湖景区附近。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区", "酒店-价位": "中等", "酒店-酒店类型": "商务 出行" }, "turn_inform": { "酒店-价位": "中等", "酒店-酒店类型": "商务 出行" }, "turn request": [] }, "user_actions": [ [ "Inform", "酒店", "价位", "中等" ], [ "Inform", "酒店", "酒店类型", "商务 出行" ], [ "General", "酒店", "", "" ] ], "system_actions": [ [ "Recommend", "酒店", "名称", "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" ], [ "General", "通用", "", "" ] ], "db_results": [ "数据库检索结果:成功匹配个数为16", "{'名称': '苏州慢享主题酒店', '区域': '姑苏区', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '收费', '房费': '266元', '地址': '苏州姑苏区景德路26-64号', '电话号码': '0512-67570999', '评分': 4.7}", "{'名称': '苏州慢享主题酒店', '区域': '姑苏区', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '收费', '房费': '278元', '地址': '苏州姑苏区景德路26-64号', '电话号码': '0512-67570999', '评分': 4.7}", "{'名称': '美锦酒店', '区域': '高新区', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '免费', '房费': '308元', '地址': '苏州高新区滨河路999号花样年喜年生活广场5栋1层', '电话号码': '0512-66053331', '评分': 4.8}", "{'名称': '美锦酒店', '区域': '高新区', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '免费', '房费': '349元', '地址': '苏州高新区滨河路999号花样年喜年生活广场5栋1层', '电话号码': '0512-66053331', '评分': 4.8}", "{'名称': '苏州金鸡湖李公堤瑞贝庭公寓酒店', '区域': '工业园区', '星级': '4', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '免费', '房费': '438元', '地址': '苏州工业园区李公堤三期E区商业街9幢', '电话号码': '0512-69995666', '评分': 4.6}", "{'名称': '苏州金鸡湖李公堤瑞贝庭公寓酒店', '区域': '工业园区', '星级': '4', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '免费', '房费': '438元', '地址': '苏州工业园区李公堤三期E区商业街9幢', '电话号码': '0512-69995666', '评分': 4.6}", "{'名称': '苏州途乐酒店公寓', '区域': '工业园区', '星级': '2', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '收费', '房费': '486元', '地址': '苏州工业园区苏州丰隆城市中心T1楼', '电话号码': '151-5149-7911', '评分': 4.6}", "{'名称': '苏州途乐酒店公寓', '区域': '工业园区', '星级': '2', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '收费', '房费': '486元', '地址': '苏州工业园区苏州丰隆城市中心T1楼', '电话号码': '151-5149-7911', '评分': 4.6}", "{'名称': '万悦酒店', '区域': '吴中区', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '免费', '房费': '346元', '地址': '苏州吴中区金山路47-2号', '电话号码': '0512-83808380', '评分': 4.5}", "{'名称': '万悦酒店', '区域': '吴中区', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '免费', '房费': '338元', '地址': '苏州吴中区金山路47-2号', '电话号码': '0512-83808380', '评分': 4.5}", "{'名称': '周庄多瓦台临河客栈', '区域': '昆山', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '收费', '房费': '279元', '地址': '昆山周庄镇东浜村75号', '电话号码': '181-3619-1632', '评分': 4.8}", "{'名称': '周庄多瓦台临河客栈', '区域': '昆山', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '收费', '房费': '279元', '地址': '昆山周庄镇东浜村75号', '电话号码': '181-3619-1632', '评分': 4.8}", "{'名称': '常熟长江路亚朵酒店', '区域': '常熟', '星级': '4', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '免费', '房费': '312元', '地址': '常熟长江路268号', '电话号码': '0512-51661666', '评分': 4.8}", "{'名称': '常熟长江路亚朵酒店', '区域': '常熟', '星级': '4', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '免费', '房费': '312元', '地址': '常熟长江路268号', '电话号码': '0512-51661666', '评分': 4.8}", "{'名称': '维也纳酒店', '区域': '常熟', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '免费', '房费': '322元', '地址': '常熟碧溪镇金港路8号', '电话号码': '0512-52018188', '评分': 4.8}", "{'名称': '维也纳酒店', '区域': '常熟', '星级': '3', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '免费', '房费': '340元', '地址': '常熟碧溪镇金港路8号', '电话号码': '0512-52018188', '评分': 4.8}" ], "segmented_user_utterance": "哦 , 好 的 。 还有 酒店 , 帮 我 推荐 个 价位 中等 的 商务酒店 !", "segmented_system_utterance": "推荐 苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店 , 在 金鸡湖 景区 附近 。" }, { "turn_id": 4, "turn_domain": [ "酒店" ], "user_utterance": "你想得真周到!这家酒店有停车场吗?", "system_utterance": "酒店配备有一个免费的停车场。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区", "酒店-价位": "中等", "酒店-酒店类型": "商务 出行", "酒店-名称": "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" }, "turn_inform": { "酒店-名称": "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" }, "turn request": [ "停车场" ] }, "user_actions": [ [ "Inform", "酒店", "名称", "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" ], [ "Request", "酒店", "停车场", "" ] ], "system_actions": [ [ "Inform", "酒店", "停车场", "免费" ] ], "db_results": [ "数据库检索结果:成功匹配个数为2", "{'名称': '苏州金鸡湖李公堤瑞贝庭公寓酒店', '区域': '工业园区', '星级': '4', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '免费', '房费': '438元', '地址': '苏州工业园区李公堤三期E区商业街9幢', '电话号码': '0512-69995666', '评分': 4.6}", "{'名称': '苏州金鸡湖李公堤瑞贝庭公寓酒店', '区域': '工业园区', '星级': '4', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '免费', '房费': '438元', '地址': '苏州工业园区李公堤三期E区商业街9幢', '电话号码': '0512-69995666', '评分': 4.6}" ], "segmented_user_utterance": "你 想 得 真 周到 ! 这家 酒店 有 停车场 吗 ?", "segmented_system_utterance": "酒店 配备 有 一个 免费 的 停车场 。" }, { "turn_id": 5, "turn_domain": [ "酒店" ], "user_utterance": "酒店的电话告诉我一下。", "system_utterance": "电话号码:0512-69995666。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区", "酒店-价位": "中等", "酒店-酒店类型": "商务 出行", "酒店-名称": "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" }, "turn_inform": {}, "turn request": [ "电话号码" ] }, "user_actions": [ [ "Request", "酒店", "电话号码", "" ] ], "system_actions": [ [ "Inform", "酒店", "电话号码", "0512 - 69995666" ] ], "db_results": [ "数据库检索结果:成功匹配个数为2", "{'名称': '苏州金鸡湖李公堤瑞贝庭公寓酒店', '区域': '工业园区', '星级': '4', '价位': '中等', '酒店类型': '商务出行', '房型': '大床房', '停车场': '免费', '房费': '438元', '地址': '苏州工业园区李公堤三期E区商业街9幢', '电话号码': '0512-69995666', '评分': 4.6}", "{'名称': '苏州金鸡湖李公堤瑞贝庭公寓酒店', '区域': '工业园区', '星级': '4', '价位': '中等', '酒店类型': '商务出行', '房型': '标准间', '停车场': '免费', '房费': '438元', '地址': '苏州工业园区李公堤三期E区商业街9幢', '电话号码': '0512-69995666', '评分': 4.6}" ], "segmented_user_utterance": "酒店 的 电话 告诉 我 一下 。", "segmented_system_utterance": "电话号码 : 0512 - 69995666 。" }, { "turn_id": 6, "turn_domain": [ "餐厅" ], "user_utterance": "嗯,我还想拔草美食。有没有价位中等的江浙菜餐厅推荐一个。", "system_utterance": "推荐老枫庭。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区", "餐厅-菜系": "江浙 菜", "餐厅-价位": "中等", "酒店-价位": "中等", "酒店-酒店类型": "商务 出行", "酒店-名称": "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" }, "turn_inform": { "餐厅-菜系": "江浙 菜", "餐厅-价位": "中等" }, "turn request": [] }, "user_actions": [ [ "Inform", "餐厅", "菜系", "江浙 菜" ], [ "Inform", "餐厅", "价位", "中等" ], [ "General", "餐厅", "", "" ] ], "system_actions": [ [ "Recommend", "餐厅", "名称", "老枫庭" ] ], "db_results": [ "数据库检索结果:成功匹配个数为3", "{'名称': '老枫庭', '区域': '吴中区', '菜系': '江浙菜', '价位': '中等', '是否地铁直达': '否', '人均消费': '94元', '地址': '木渎灵天路55号芭提雅休闲娱乐广场5号楼', '电话号码': '0512-66277557', '评分': 4.6, '营业时间': '周一至周日 11:00-13:30 17:00-20:30', '推荐菜': '松鼠桂鱼'}", "{'名称': '同里老家', '区域': '吴江', '菜系': '江浙菜', '价位': '中等', '是否地铁直达': '否', '人均消费': '87元', '地址': '同里镇环湖西路318号', '电话号码': '0512-63339777', '评分': 4.5, '营业时间': '周一至周日 10:00-22:00', '推荐菜': '太湖三白'}", "{'名称': '周庄花间堂桔梗餐厅', '区域': '昆山', '菜系': '江浙菜', '价位': '中等', '是否地铁直达': '否', '人均消费': '98元', '地址': '周庄镇中市街110号景点迷楼北10米', '电话号码': '138-1289-9961', '评分': 4.5, '营业时间': '周一至周日 07:00-20:30', '推荐菜': '银鱼炒蛋'}" ], "segmented_user_utterance": "嗯 , 我 还 想 拔草 美食 。 有没有 价位 中等 的 江浙 菜 餐厅 推荐 一个 。", "segmented_system_utterance": "推荐 老枫庭 。" }, { "turn_id": 7, "turn_domain": [ "餐厅" ], "user_utterance": "这家餐厅地铁直达吗?", "system_utterance": "抱歉,餐厅没有地铁直达!", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区", "餐厅-菜系": "江浙 菜", "餐厅-价位": "中等", "餐厅-名称": "老枫庭", "酒店-价位": "中等", "酒店-酒店类型": "商务 出行", "酒店-名称": "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" }, "turn_inform": { "餐厅-名称": "老枫庭" }, "turn request": [ "是否地铁直达" ] }, "user_actions": [ [ "Inform", "餐厅", "名称", "老枫庭" ], [ "Request", "餐厅", "是否地铁直达", "" ] ], "system_actions": [ [ "Inform", "餐厅", "是否地铁直达", "否" ], [ "General", "通用", "", "" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '老枫庭', '区域': '吴中区', '菜系': '江浙菜', '价位': '中等', '是否地铁直达': '否', '人均消费': '94元', '地址': '木渎灵天路55号芭提雅休闲娱乐广场5号楼', '电话号码': '0512-66277557', '评分': 4.6, '营业时间': '周一至周日 11:00-13:30 17:00-20:30', '推荐菜': '松鼠桂鱼'}" ], "segmented_user_utterance": "这家 餐厅 地铁 直达 吗 ?", "segmented_system_utterance": "抱歉 , 餐厅 没有 地铁 直达 !" }, { "turn_id": 8, "turn_domain": [ "餐厅" ], "user_utterance": "人均消费呢?", "system_utterance": "老枫庭人均消费在94元左右。", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区", "餐厅-菜系": "江浙 菜", "餐厅-价位": "中等", "餐厅-名称": "老枫庭", "酒店-价位": "中等", "酒店-酒店类型": "商务 出行", "酒店-名称": "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" }, "turn_inform": {}, "turn request": [ "人均消费" ] }, "user_actions": [ [ "Request", "餐厅", "人均消费", "" ] ], "system_actions": [ [ "Inform", "餐厅", "人均消费", "老枫庭" ] ], "db_results": [ "数据库检索结果:成功匹配个数为1", "{'名称': '老枫庭', '区域': '吴中区', '菜系': '江浙菜', '价位': '中等', '是否地铁直达': '否', '人均消费': '94元', '地址': '木渎灵天路55号芭提雅休闲娱乐广场5号楼', '电话号码': '0512-66277557', '评分': 4.6, '营业时间': '周一至周日 11:00-13:30 17:00-20:30', '推荐菜': '松鼠桂鱼'}" ], "segmented_user_utterance": "人均 消费 呢 ?", "segmented_system_utterance": "老枫庭 人均 消费 在 94 元 左右 。" }, { "turn_id": 9, "turn_domain": [ "通用" ], "user_utterance": "好,所有问题都解决了,谢谢你的帮助!再见!", "system_utterance": "不客气!这是我应该做的!再见!", "belief_state": { "inform slot-values": { "旅游景点-景点类型": "山水 景区", "旅游景点-消费": "偏贵", "旅游景点-名称": "金鸡湖 景区", "餐厅-菜系": "江浙 菜", "餐厅-价位": "中等", "餐厅-名称": "老枫庭", "酒店-价位": "中等", "酒店-酒店类型": "商务 出行", "酒店-名称": "苏州 金鸡湖 李公堤 瑞贝庭 公寓 酒店" }, "turn_inform": {}, "turn request": [] }, "user_actions": [ [ "Bye", "通用", "", "" ], [ "General", "通用", "", "" ] ], "system_actions": [ [ "Bye", "通用", "", "" ], [ "General", "通用", "", "" ] ], "db_results": [], "segmented_user_utterance": "好 , 所有 问题 都 解决 了 , 谢谢 你 的 帮助 ! 再见 !", "segmented_system_utterance": "不 客气 ! 这 是 我 应该 做 的 ! 再见 !" } ] } ``` ## Dataset in GEM ### Rationale for Inclusion in GEM #### Why is the Dataset in GEM? <!-- info: What does this dataset contribute toward better generation evaluation and why is it part of GEM? --> <!-- scope: microscope --> RiSAWOZ is the first large-scale multi-domain Chinese Wizard-of-Oz dataset with rich semantic annotations. #### Similar Datasets <!-- info: Do other datasets for the high level task exist? --> <!-- scope: telescope --> yes #### Unique Language Coverage <!-- info: Does this dataset cover other languages than other datasets for the same task? --> <!-- scope: periscope --> no #### Difference from other GEM datasets <!-- info: What else sets this dataset apart from other similar datasets in GEM? --> <!-- scope: microscope --> The corpus contains rich semantic annotations, such as ellipsis and coreference, in addition to traditional dialogue annotations (dialogue states, dialogue acts, etc.), which can be used in various tasks in dialogue system. #### Ability that the Dataset measures <!-- info: What aspect of model ability can be measured with this dataset? --> <!-- scope: periscope --> Natural Language Understanding, Dialogue State Tracking, Dialogue Context-to-Text Generation, Coreference Resolution, Unified Generative Ellipsis and Coreference Resolution ### GEM-Specific Curation #### Modificatied for GEM? <!-- info: Has the GEM version of the dataset been modified in any way (data, processing, splits) from the original curated data? --> <!-- scope: telescope --> no #### Additional Splits? <!-- info: Does GEM provide additional splits to the dataset? --> <!-- scope: telescope --> no ### Getting Started with the Task #### Pointers to Resources <!-- info: Getting started with in-depth research on the task. Add relevant pointers to resources that researchers can consult when they want to get started digging deeper into the task. --> <!-- scope: microscope --> [Website](https://terryqj0107.github.io/RiSAWOZ_webpage) #### Technical Terms <!-- info: Technical terms used in this card and the dataset and their definitions --> <!-- scope: microscope --> - In task-oriented dialogue system, the Natural Language Understanding (NLU) module aims to convert the user utterance into the representation that computer can understand, which includes intent and dialogue act (slot & value) detection. - Dialogue State Tracking (DST) is a core component in task-oriented dialogue systems, which extracts dialogue states (user goals) embedded in dialogue context. It has progressed toward open-vocabulary or generation-based DST where state-of-the-art models can generate dialogue states from dialogue context directly. - Context-to-Text Generation: encoding dialogue context to decode system response. - Coreference Resolution: predict coreference clusters where all mentions are referring to the same entity for each dialogue. - Unified Generative Ellipsis and Coreference Resolution: generating omitted or referred expressions from the dialogue context. ## Previous Results ### Previous Results #### Measured Model Abilities <!-- info: What aspect of model ability can be measured with this dataset? --> <!-- scope: telescope --> Natural Language Understanding, Dialogue State Tracking, Dialogue Context-to-Text Generation, Coreference Resolution, Unified Generative Ellipsis and Coreference Resolution #### Metrics <!-- info: What metrics are typically used for this task? --> <!-- scope: periscope --> `Other: Other Metrics` #### Other Metrics <!-- info: Definitions of other metrics --> <!-- scope: periscope --> - Natural Language Understanding: - F1 score: F1 score of user intent. - Dialogue State Tracking: - Joint Accuracy: accuracy of turn-level dialogue states. - Dialogue Context-to-Text Generation: - inform rate: measures the percentage that the output contains the appropriate entity the user asks for. - success rate: estimates the proportion that all the requested attributes have been answered. - BLEU: the BLEU score of generated system response. - Combined Score: (inform + success) ∗ 0.5 + BLEU as an overall quality. - Coreference Resolution: - MUC F1 Score: a link-based metric. Mentions in the same entity/cluster are considered “linked”. MUC penalizes the missing links and incorrect links, each with the same weight. - B3 F1 Score: a mention-based metric.The evaluation score depends on the fraction of the correct mentions included in the response entities (i.e. entities created by the system). - CEAFφ4 F1 Score: a metric which assumes each key entity should only be mapped to one response entity, and vice versa. It aligns the key entities (clusters) with the response entities in the best way, and compute scores from that alignment. - Average F1 Score: an average F1 score of the above three metrics. - Unified Generative Ellipsis and Coreference Resolution: - Exact Match Rate: measures whether the generated utterances exactly match the ground-truth utterances. - BLEU: the BLEU score of generated utterances - Resolution F1: comparing machine-generated words with ground-truth words only from the ellipsis/coreference part of user utterances. #### Proposed Evaluation <!-- info: List and describe the purpose of the metrics and evaluation methodology (including human evaluation) that the dataset creators used when introducing this task. --> <!-- scope: microscope --> see "Definitions of other metrics" #### Previous results available? <!-- info: Are previous results available? --> <!-- scope: telescope --> yes #### Other Evaluation Approaches <!-- info: What evaluation approaches have others used? --> <!-- scope: periscope --> same as our dataset #### Relevant Previous Results <!-- info: What are the most relevant previous results for this task/dataset? --> <!-- scope: microscope --> Joint Accuracy, Inform Rate, Success Rate, BLEU Score and Combined Score on MultiWOZ and CrossWOZ dataset. ## Dataset Curation ### Original Curation #### Original Curation Rationale <!-- info: Original curation rationale --> <!-- scope: telescope --> Gather human-to-human dialog in Chinese. #### Communicative Goal <!-- info: What was the communicative goal? --> <!-- scope: periscope --> Generate system response given dialogue context across multiple domains. #### Sourced from Different Sources <!-- info: Is the dataset aggregated from different data sources? --> <!-- scope: telescope --> no ### Language Data #### How was Language Data Obtained? <!-- info: How was the language data obtained? --> <!-- scope: telescope --> `Crowdsourced` #### Where was it crowdsourced? <!-- info: If crowdsourced, where from? --> <!-- scope: periscope --> `Other crowdworker platform` #### Topics Covered <!-- info: Does the language in the dataset focus on specific topics? How would you describe them? --> <!-- scope: periscope --> domains: Attraction, Restaurant, Hotel, Flight, Train, Weather, Movie, TV, Computer, Car, Hospital, Courses #### Data Validation <!-- info: Was the text validated by a different worker or a data curator? --> <!-- scope: telescope --> validated by data curator #### Was Data Filtered? <!-- info: Were text instances selected or filtered? --> <!-- scope: telescope --> hybrid #### Filter Criteria <!-- info: What were the selection criteria? --> <!-- scope: microscope --> Rule-based and manual selection criteria ### Structured Annotations #### Additional Annotations? <!-- quick --> <!-- info: Does the dataset have additional annotations for each instance? --> <!-- scope: telescope --> crowd-sourced #### Number of Raters <!-- info: What is the number of raters --> <!-- scope: telescope --> 51<n<100 #### Rater Qualifications <!-- info: Describe the qualifications required of an annotator. --> <!-- scope: periscope --> Chinese native speaker #### Raters per Training Example <!-- info: How many annotators saw each training example? --> <!-- scope: periscope --> 3 #### Raters per Test Example <!-- info: How many annotators saw each test example? --> <!-- scope: periscope --> 3 #### Annotation Service? <!-- info: Was an annotation service used? --> <!-- scope: telescope --> no #### Annotation Values <!-- info: Purpose and values for each annotation --> <!-- scope: microscope --> - dialogue_id (string): dialogue ID - goal (string): natural language descriptions of the user goal - domains (list of strings): domains mentioned in current dialogue session - turn_id (int): turn ID - turn_domain (list of strings): domain mentioned in current turn - belief_state (dict): dialogue state, including: - inform slot-values (dict): the slots and corresponding values informed until current turn - turn_inform (dict): the slots and corresponding values informed in current turn - turn request (dict): the slots requested in current turn - user_actions (list of lists): user dialogue acts in current turn - user_actions (list of lists): system dialogue acts in current turn - db_results (list of strings): database search results - segmented_user_utterance (string): word segmentation result of user utterance - segmented_system_utterance (string): word segmentation result of system utterance #### Any Quality Control? <!-- info: Quality control measures? --> <!-- scope: telescope --> unknown ### Consent #### Any Consent Policy? <!-- info: Was there a consent policy involved when gathering the data? --> <!-- scope: telescope --> yes #### Consent Policy Details <!-- info: What was the consent policy? --> <!-- scope: microscope --> Annotators agree using the dataset for research purpose. #### Other Consented Downstream Use <!-- info: What other downstream uses of the data did the original data creators and the data curators consent to? --> <!-- scope: microscope --> Any ### Private Identifying Information (PII) #### Contains PII? <!-- quick --> <!-- info: Does the source language data likely contain Personal Identifying Information about the data creators or subjects? --> <!-- scope: telescope --> no PII #### Justification for no PII <!-- info: Provide a justification for selecting `no PII` above. --> <!-- scope: periscope --> The slots and values as well as utterances do not contain any personal information. ### Maintenance #### Any Maintenance Plan? <!-- info: Does the original dataset have a maintenance plan? --> <!-- scope: telescope --> yes #### Maintenance Plan Details <!-- info: Describe the original dataset's maintenance plan. --> <!-- scope: microscope --> Building a leaderboard webpage to trace and display the latest results on the [dataset](https://terryqj0107.github.io/RiSAWOZ_webpage/) #### Maintainer Contact Information <!-- info: Provide contact information of a person responsible for the dataset maintenance --> <!-- scope: periscope --> Deyi Xiong (dyxiong@tju.edu.cn) #### Any Contestation Mechanism? <!-- info: Does the maintenance plan include a contestation mechanism allowing individuals to request removal fo content? --> <!-- scope: periscope --> contact maintainer #### Contestation Form Link <!-- info: Provide the form link or contact information --> <!-- scope: periscope --> Deyi Xiong (dyxiong@tju.edu.cn) ## Broader Social Context ### Previous Work on the Social Impact of the Dataset #### Usage of Models based on the Data <!-- info: Are you aware of cases where models trained on the task featured in this dataset ore related tasks have been used in automated systems? --> <!-- scope: telescope --> no ### Impact on Under-Served Communities #### Addresses needs of underserved Communities? <!-- info: Does this dataset address the needs of communities that are traditionally underserved in language technology, and particularly language generation technology? Communities may be underserved for exemple because their language, language variety, or social or geographical context is underepresented in NLP and NLG resources (datasets and models). --> <!-- scope: telescope --> yes #### Details on how Dataset Addresses the Needs <!-- info: Describe how this dataset addresses the needs of underserved communities. --> <!-- scope: microscope --> RiSAWOZ is the first large-scale multi-domain Chinese Wizard-of-Oz dataset with rich semantic annotations. ### Discussion of Biases #### Any Documented Social Biases? <!-- info: Are there documented social biases in the dataset? Biases in this context are variations in the ways members of different social categories are represented that can have harmful downstream consequences for members of the more disadvantaged group. --> <!-- scope: telescope --> no #### Are the Language Producers Representative of the Language? <!-- info: Does the distribution of language producers in the dataset accurately represent the full distribution of speakers of the language world-wide? If not, how does it differ? --> <!-- scope: periscope --> yes ## Considerations for Using the Data ### PII Risks and Liability #### Potential PII Risk <!-- info: Considering your answers to the PII part of the Data Curation Section, describe any potential privacy to the data subjects and creators risks when using the dataset. --> <!-- scope: microscope --> None ### Licenses #### Copyright Restrictions on the Dataset <!-- info: Based on your answers in the Intended Use part of the Data Overview Section, which of the following best describe the copyright and licensing status of the dataset? --> <!-- scope: periscope --> `open license - commercial use allowed` #### Copyright Restrictions on the Language Data <!-- info: Based on your answers in the Language part of the Data Curation Section, which of the following best describe the copyright and licensing status of the underlying language data? --> <!-- scope: periscope --> `open license - commercial use allowed` ### Known Technical Limitations #### Technical Limitations <!-- info: Describe any known technical limitations, such as spurrious correlations, train/test overlap, annotation biases, or mis-annotations, and cite the works that first identified these limitations when possible. --> <!-- scope: microscope --> None #### Unsuited Applications <!-- info: When using a model trained on this dataset in a setting where users or the public may interact with its predictions, what are some pitfalls to look out for? In particular, describe some applications of the general task featured in this dataset that its curation or properties make it less suitable for. --> <!-- scope: microscope --> Using the trained model on domains that are not included in the 12 domains selected for this dataset. #### Discouraged Use Cases <!-- info: What are some discouraged use cases of a model trained to maximize the proposed metrics on this dataset? In particular, think about settings where decisions made by a model that performs reasonably well on the metric my still have strong negative consequences for user or members of the public. --> <!-- scope: microscope --> Designing models that leverage unknown bias in the dataset to optimize specific metrics.
65,744
[ [ -0.032928466796875, -0.06304931640625, 0.0273895263671875, -0.00031280517578125, -0.010986328125, -0.0146942138671875, -0.02923583984375, -0.020172119140625, 0.023162841796875, 0.0521240234375, -0.0611572265625, -0.06732177734375, -0.0272216796875, -0.000926...
GEM/RotoWire_English-German
2022-10-24T15:30:03.000Z
[ "task_categories:table-to-text", "annotations_creators:automatically-created", "language_creators:unknown", "multilinguality:unknown", "size_categories:unknown", "source_datasets:original", "language:en", "language:de", "license:cc-by-4.0", "data-to-text", "region:us" ]
GEM
Dataset for the WNGT 2019 DGT shared task on "Document-Level Generation and Translation”.
@article{hayashi2019findings, title={Findings of the Third Workshop on Neural Generation and Translation}, author={Hayashi, Hiroaki and Oda, Yusuke and Birch, Alexandra and Konstas, Ioannis and Finch, Andrew and Luong, Minh-Thang and Neubig, Graham and Sudoh, Katsuhito}, journal={EMNLP-IJCNLP 2019}, pages={1}, year={2019} }
1
84
2022-03-02T23:29:22
--- annotations_creators: - automatically-created language_creators: - unknown language: - en - de license: - cc-by-4.0 multilinguality: - unknown size_categories: - unknown source_datasets: - original task_categories: - table-to-text task_ids: [] pretty_name: RotoWire_English-German tags: - data-to-text --- # Dataset Card for GEM/RotoWire_English-German ## Dataset Description - **Homepage:** https://sites.google.com/view/wngt19/dgt-task - **Repository:** https://github.com/neulab/dgt - **Paper:** https://www.aclweb.org/anthology/D19-5601/ - **Leaderboard:** N/A - **Point of Contact:** Hiroaki Hayashi ### Link to Main Data Card You can find the main data card on the [GEM Website](https://gem-benchmark.com/data_cards/RotoWire_English-German). ### Dataset Summary This dataset is a data-to-text dataset in the basketball domain. The input are tables in a fixed format with statistics about a game (in English) and the target is a German translation of the originally English description. The translations were done by professional translators with basketball experience. The dataset can be used to evaluate the cross-lingual data-to-text capabilities of a model with complex inputs. You can load the dataset via: ``` import datasets data = datasets.load_dataset('GEM/RotoWire_English-German') ``` The data loader can be found [here](https://huggingface.co/datasets/GEM/RotoWire_English-German). #### website [Website](https://sites.google.com/view/wngt19/dgt-task) #### paper [ACL Anthology](https://www.aclweb.org/anthology/D19-5601/) #### authors Graham Neubig (Carnegie Mellon University), Hiroaki Hayashi (Carnegie Mellon University) ## Dataset Overview ### Where to find the Data and its Documentation #### Webpage <!-- info: What is the webpage for the dataset (if it exists)? --> <!-- scope: telescope --> [Website](https://sites.google.com/view/wngt19/dgt-task) #### Download <!-- info: What is the link to where the original dataset is hosted? --> <!-- scope: telescope --> [Github](https://github.com/neulab/dgt) #### Paper <!-- info: What is the link to the paper describing the dataset (open access preferred)? --> <!-- scope: telescope --> [ACL Anthology](https://www.aclweb.org/anthology/D19-5601/) #### BibTex <!-- info: Provide the BibTex-formatted reference for the dataset. Please use the correct published version (ACL anthology, etc.) instead of google scholar created Bibtex. --> <!-- scope: microscope --> ``` @inproceedings{hayashi-etal-2019-findings, title = "Findings of the Third Workshop on Neural Generation and Translation", author = "Hayashi, Hiroaki and Oda, Yusuke and Birch, Alexandra and Konstas, Ioannis and Finch, Andrew and Luong, Minh-Thang and Neubig, Graham and Sudoh, Katsuhito", booktitle = "Proceedings of the 3rd Workshop on Neural Generation and Translation", month = nov, year = "2019", address = "Hong Kong", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/D19-5601", doi = "10.18653/v1/D19-5601", pages = "1--14", abstract = "This document describes the findings of the Third Workshop on Neural Generation and Translation, held in concert with the annual conference of the Empirical Methods in Natural Language Processing (EMNLP 2019). First, we summarize the research trends of papers presented in the proceedings. Second, we describe the results of the two shared tasks 1) efficient neural machine translation (NMT) where participants were tasked with creating NMT systems that are both accurate and efficient, and 2) document generation and translation (DGT) where participants were tasked with developing systems that generate summaries from structured data, potentially with assistance from text in another language.", } ``` #### Contact Name <!-- quick --> <!-- info: If known, provide the name of at least one person the reader can contact for questions about the dataset. --> <!-- scope: periscope --> Hiroaki Hayashi #### Contact Email <!-- info: If known, provide the email of at least one person the reader can contact for questions about the dataset. --> <!-- scope: periscope --> hiroakih@andrew.cmu.edu #### Has a Leaderboard? <!-- info: Does the dataset have an active leaderboard? --> <!-- scope: telescope --> no ### Languages and Intended Use #### Multilingual? <!-- quick --> <!-- info: Is the dataset multilingual? --> <!-- scope: telescope --> yes #### Covered Languages <!-- quick --> <!-- info: What languages/dialects are covered in the dataset? --> <!-- scope: telescope --> `English`, `German` #### License <!-- quick --> <!-- info: What is the license of the dataset? --> <!-- scope: telescope --> cc-by-4.0: Creative Commons Attribution 4.0 International #### Intended Use <!-- info: What is the intended use of the dataset? --> <!-- scope: microscope --> Foster the research on document-level generation technology and contrast the methods for different types of inputs. #### Primary Task <!-- info: What primary task does the dataset support? --> <!-- scope: telescope --> Data-to-Text #### Communicative Goal <!-- quick --> <!-- info: Provide a short description of the communicative goal of a model trained for this task on this dataset. --> <!-- scope: periscope --> Describe a basketball game given its box score table (and possibly a summary in a foreign language). ### Credit #### Curation Organization Type(s) <!-- info: In what kind of organization did the dataset curation happen? --> <!-- scope: telescope --> `academic` #### Curation Organization(s) <!-- info: Name the organization(s). --> <!-- scope: periscope --> Carnegie Mellon University #### Dataset Creators <!-- info: Who created the original dataset? List the people involved in collecting the dataset and their affiliation(s). --> <!-- scope: microscope --> Graham Neubig (Carnegie Mellon University), Hiroaki Hayashi (Carnegie Mellon University) #### Funding <!-- info: Who funded the data creation? --> <!-- scope: microscope --> Graham Neubig #### Who added the Dataset to GEM? <!-- info: Who contributed to the data card and adding the dataset to GEM? List the people+affiliations involved in creating this data card and who helped integrate this dataset into GEM. --> <!-- scope: microscope --> Hiroaki Hayashi (Carnegie Mellon University) ### Dataset Structure #### Data Fields <!-- info: List and describe the fields present in the dataset. --> <!-- scope: telescope --> - `id` (`string`): The identifier from the original dataset. - `gem_id` (`string`): The identifier from GEMv2. - `day` (`string`): Date of the game (Format: `MM_DD_YY`) - `home_name` (`string`): Home team name. - `home_city` (`string`): Home team city name. - `vis_name` (`string`): Visiting (Away) team name. - `vis_city` (`string`): Visiting team (Away) city name. - `home_line` (`Dict[str, str]`): Home team statistics (e.g., team free throw percentage). - `vis_line` (`Dict[str, str]`): Visiting team statistics (e.g., team free throw percentage). - `box_score` (`Dict[str, Dict[str, str]]`): Box score table. (Stat_name to [player ID to stat_value].) - `summary_en` (`List[string]`): Tokenized target summary in English. - `sentence_end_index_en` (`List[int]`): Sentence end indices for `summary_en`. - `summary_de` (`List[string]`): Tokenized target summary in German. - `sentence_end_index_de` (`List[int]`): ): Sentence end indices for `summary_de`. - (Unused) `detok_summary_org` (`string`): Original summary provided by RotoWire dataset. - (Unused) `summary` (`List[string]`): Tokenized summary of `detok_summary_org`. - (Unused) `detok_summary` (`string`): Detokenized (with organizer's detokenizer) summary of `summary`. #### Reason for Structure <!-- info: How was the dataset structure determined? --> <!-- scope: microscope --> - Structured data are directly imported from the original RotoWire dataset. - Textual data (English, German) are associated to each sample. #### Example Instance <!-- info: Provide a JSON formatted example of a typical instance in the dataset. --> <!-- scope: periscope --> ``` { 'id': '11_02_16-Jazz-Mavericks-TheUtahJazzdefeatedthe', 'gem_id': 'GEM-RotoWire_English-German-train-0' 'day': '11_02_16', 'home_city': 'Utah', 'home_name': 'Jazz', 'vis_city': 'Dallas', 'vis_name': 'Mavericks', 'home_line': { 'TEAM-FT_PCT': '58', ... }, 'vis_line': { 'TEAM-FT_PCT': '80', ... }, 'box_score': { 'PLAYER_NAME': { '0': 'Harrison Barnes', ... }, ... 'summary_en': ['The', 'Utah', 'Jazz', 'defeated', 'the', 'Dallas', 'Mavericks', ...], 'sentence_end_index_en': [16, 52, 100, 137, 177, 215, 241, 256, 288], 'summary_de': ['Die', 'Utah', 'Jazz', 'besiegten', 'am', 'Mittwoch', 'in', 'der', ...], 'sentence_end_index_de': [19, 57, 107, 134, 170, 203, 229, 239, 266], 'detok_summary_org': "The Utah Jazz defeated the Dallas Mavericks 97 - 81 ...", 'detok_summary': "The Utah Jazz defeated the Dallas Mavericks 97-81 ...", 'summary': ['The', 'Utah', 'Jazz', 'defeated', 'the', 'Dallas', 'Mavericks', ...], } ``` #### Data Splits <!-- info: Describe and name the splits in the dataset if there are more than one. --> <!-- scope: periscope --> - Train - Validation - Test #### Splitting Criteria <!-- info: Describe any criteria for splitting the data, if used. If there are differences between the splits (e.g., if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here. --> <!-- scope: microscope --> - English summaries are provided sentence-by-sentence to professional German translators with basketball knowledge to obtain sentence-level German translations. - Split criteria follows the original RotoWire dataset. #### <!-- info: What does an outlier of the dataset in terms of length/perplexity/embedding look like? --> <!-- scope: microscope --> - The (English) summary length in the training set varies from 145 to 650 words, with an average of 323 words. ## Dataset in GEM ### Rationale for Inclusion in GEM #### Why is the Dataset in GEM? <!-- info: What does this dataset contribute toward better generation evaluation and why is it part of GEM? --> <!-- scope: microscope --> The use of two modalities (data, foreign text) to generate a document-level text summary. #### Similar Datasets <!-- info: Do other datasets for the high level task exist? --> <!-- scope: telescope --> yes #### Unique Language Coverage <!-- info: Does this dataset cover other languages than other datasets for the same task? --> <!-- scope: periscope --> yes #### Difference from other GEM datasets <!-- info: What else sets this dataset apart from other similar datasets in GEM? --> <!-- scope: microscope --> The potential use of two modalities (data, foreign text) as input. #### Ability that the Dataset measures <!-- info: What aspect of model ability can be measured with this dataset? --> <!-- scope: periscope --> - Translation - Data-to-text verbalization - Aggregation of the two above. ### GEM-Specific Curation #### Modificatied for GEM? <!-- info: Has the GEM version of the dataset been modified in any way (data, processing, splits) from the original curated data? --> <!-- scope: telescope --> yes #### GEM Modifications <!-- info: What changes have been made to he original dataset? --> <!-- scope: periscope --> `other` #### Modification Details <!-- info: For each of these changes, described them in more details and provided the intended purpose of the modification --> <!-- scope: microscope --> - Added GEM ID in each sample. - Normalize the number of players in each sample with "N/A" for consistent data loading. #### Additional Splits? <!-- info: Does GEM provide additional splits to the dataset? --> <!-- scope: telescope --> no ### Getting Started with the Task #### Pointers to Resources <!-- info: Getting started with in-depth research on the task. Add relevant pointers to resources that researchers can consult when they want to get started digging deeper into the task. --> <!-- scope: microscope --> - [Challenges in Data-to-Document Generation](https://aclanthology.org/D17-1239) - [Data-to-Text Generation with Content Selection and Planning](https://ojs.aaai.org//index.php/AAAI/article/view/4668) - [Findings of the Third Workshop on Neural Generation and Translation](https://aclanthology.org/D19-5601) #### Technical Terms <!-- info: Technical terms used in this card and the dataset and their definitions --> <!-- scope: microscope --> - Data-to-text - Neural machine translation (NMT) - Document-level generation and translation (DGT) ## Previous Results ### Previous Results #### Measured Model Abilities <!-- info: What aspect of model ability can be measured with this dataset? --> <!-- scope: telescope --> - Textual accuracy towards the gold-standard summary. - Content faithfulness to the input structured data. #### Metrics <!-- info: What metrics are typically used for this task? --> <!-- scope: periscope --> `BLEU`, `ROUGE`, `Other: Other Metrics` #### Other Metrics <!-- info: Definitions of other metrics --> <!-- scope: periscope --> Model-based measures proposed by (Wiseman et al., 2017): - Relation Generation - Content Selection - Content Ordering #### Proposed Evaluation <!-- info: List and describe the purpose of the metrics and evaluation methodology (including human evaluation) that the dataset creators used when introducing this task. --> <!-- scope: microscope --> To evaluate the fidelity of the generated content to the input data. #### Previous results available? <!-- info: Are previous results available? --> <!-- scope: telescope --> yes #### Other Evaluation Approaches <!-- info: What evaluation approaches have others used? --> <!-- scope: periscope --> N/A. #### Relevant Previous Results <!-- info: What are the most relevant previous results for this task/dataset? --> <!-- scope: microscope --> See Table 2 to 7 of (https://aclanthology.org/D19-5601) for previous results for this dataset. ## Dataset Curation ### Original Curation #### Original Curation Rationale <!-- info: Original curation rationale --> <!-- scope: telescope --> A random subset of RotoWire dataset was chosen for German translation annotation. #### Communicative Goal <!-- info: What was the communicative goal? --> <!-- scope: periscope --> Foster the research on document-level generation technology and contrast the methods for different types of inputs. #### Sourced from Different Sources <!-- info: Is the dataset aggregated from different data sources? --> <!-- scope: telescope --> yes #### Source Details <!-- info: List the sources (one per line) --> <!-- scope: periscope --> RotoWire ### Language Data #### How was Language Data Obtained? <!-- info: How was the language data obtained? --> <!-- scope: telescope --> `Created for the dataset` #### Creation Process <!-- info: If created for the dataset, describe the creation process. --> <!-- scope: microscope --> Professional German language translators were hired to translate basketball summaries from a subset of RotoWire dataset. #### Language Producers <!-- info: What further information do we have on the language producers? --> <!-- scope: microscope --> Translators are familiar with basketball terminology. #### Topics Covered <!-- info: Does the language in the dataset focus on specific topics? How would you describe them? --> <!-- scope: periscope --> Basketball (NBA) game summaries. #### Data Validation <!-- info: Was the text validated by a different worker or a data curator? --> <!-- scope: telescope --> validated by data curator #### Data Preprocessing <!-- info: How was the text data pre-processed? (Enter N/A if the text was not pre-processed) --> <!-- scope: microscope --> Sentence-level translations were aligned back to the original English summary sentences. #### Was Data Filtered? <!-- info: Were text instances selected or filtered? --> <!-- scope: telescope --> not filtered ### Structured Annotations #### Additional Annotations? <!-- quick --> <!-- info: Does the dataset have additional annotations for each instance? --> <!-- scope: telescope --> automatically created #### Annotation Service? <!-- info: Was an annotation service used? --> <!-- scope: telescope --> no #### Annotation Values <!-- info: Purpose and values for each annotation --> <!-- scope: microscope --> Sentence-end indices for the tokenized summaries. Sentence boundaries can help users accurately identify aligned sentences in both languages, as well as allowing an accurate evaluation that involves sentence boundaries (ROUGE-L). #### Any Quality Control? <!-- info: Quality control measures? --> <!-- scope: telescope --> validated through automated script #### Quality Control Details <!-- info: Describe the quality control measures that were taken. --> <!-- scope: microscope --> Token and number overlaps between pairs of aligned sentences are measured. ### Consent #### Any Consent Policy? <!-- info: Was there a consent policy involved when gathering the data? --> <!-- scope: telescope --> no #### Justification for Using the Data <!-- info: If not, what is the justification for reusing the data? --> <!-- scope: microscope --> Reusing by citing the original papers: - Sam Wiseman, Stuart M. Shieber, Alexander M. Rush: Challenges in Data-to-Document Generation. EMNLP 2017. - Hiroaki Hayashi, Yusuke Oda, Alexandra Birch, Ioannis Konstas, Andrew Finch, Minh-Thang Luong, Graham Neubig, Katsuhito Sudoh. Findings of the Third Workshop on Neural Generation and Translation. WNGT 2019. ### Private Identifying Information (PII) #### Contains PII? <!-- quick --> <!-- info: Does the source language data likely contain Personal Identifying Information about the data creators or subjects? --> <!-- scope: telescope --> unlikely #### Categories of PII <!-- info: What categories of PII are present or suspected in the data? --> <!-- scope: periscope --> `generic PII` #### Any PII Identification? <!-- info: Did the curators use any automatic/manual method to identify PII in the dataset? --> <!-- scope: periscope --> no identification ### Maintenance #### Any Maintenance Plan? <!-- info: Does the original dataset have a maintenance plan? --> <!-- scope: telescope --> no ## Broader Social Context ### Previous Work on the Social Impact of the Dataset #### Usage of Models based on the Data <!-- info: Are you aware of cases where models trained on the task featured in this dataset ore related tasks have been used in automated systems? --> <!-- scope: telescope --> no ### Impact on Under-Served Communities #### Addresses needs of underserved Communities? <!-- info: Does this dataset address the needs of communities that are traditionally underserved in language technology, and particularly language generation technology? Communities may be underserved for exemple because their language, language variety, or social or geographical context is underepresented in NLP and NLG resources (datasets and models). --> <!-- scope: telescope --> no ### Discussion of Biases #### Any Documented Social Biases? <!-- info: Are there documented social biases in the dataset? Biases in this context are variations in the ways members of different social categories are represented that can have harmful downstream consequences for members of the more disadvantaged group. --> <!-- scope: telescope --> no #### Are the Language Producers Representative of the Language? <!-- info: Does the distribution of language producers in the dataset accurately represent the full distribution of speakers of the language world-wide? If not, how does it differ? --> <!-- scope: periscope --> - English text in this dataset is from Rotowire, originally written by writers at Rotowire.com that are likely US-based. - German text is produced by professional translators proficient in both English and German. ## Considerations for Using the Data ### PII Risks and Liability #### Potential PII Risk <!-- info: Considering your answers to the PII part of the Data Curation Section, describe any potential privacy to the data subjects and creators risks when using the dataset. --> <!-- scope: microscope --> - Structured data contain real National Basketball Association player and organization names. ### Licenses #### Copyright Restrictions on the Dataset <!-- info: Based on your answers in the Intended Use part of the Data Overview Section, which of the following best describe the copyright and licensing status of the dataset? --> <!-- scope: periscope --> `open license - commercial use allowed` #### Copyright Restrictions on the Language Data <!-- info: Based on your answers in the Language part of the Data Curation Section, which of the following best describe the copyright and licensing status of the underlying language data? --> <!-- scope: periscope --> `open license - commercial use allowed` ### Known Technical Limitations #### Technical Limitations <!-- info: Describe any known technical limitations, such as spurrious correlations, train/test overlap, annotation biases, or mis-annotations, and cite the works that first identified these limitations when possible. --> <!-- scope: microscope --> Potential overlap of box score tables between splits. This was extensively studied and pointed out by [1]. [1]: Thomson, Craig, Ehud Reiter, and Somayajulu Sripada. "SportSett: Basketball-A robust and maintainable data-set for Natural Language Generation." Proceedings of the Workshop on Intelligent Information Processing and Natural Language Generation. 2020. #### Unsuited Applications <!-- info: When using a model trained on this dataset in a setting where users or the public may interact with its predictions, what are some pitfalls to look out for? In particular, describe some applications of the general task featured in this dataset that its curation or properties make it less suitable for. --> <!-- scope: microscope --> Users may interact with a trained model to learn about a NBA game in a textual manner. On generated texts, they may observe factual errors that contradicts the actual data that the model conditions on. Factual errors include wrong statistics of a player (e.g., 3PT), non-existent injury information. #### Discouraged Use Cases <!-- info: What are some discouraged use cases of a model trained to maximize the proposed metrics on this dataset? In particular, think about settings where decisions made by a model that performs reasonably well on the metric my still have strong negative consequences for user or members of the public. --> <!-- scope: microscope --> Publishing the generated text as is. Even if the model achieves high scores on the evaluation metrics, there is a risk of factual errors mentioned above.
22,983
[ [ -0.0198974609375, -0.05950927734375, 0.0224151611328125, 0.0011882781982421875, -0.0082855224609375, 0.0022449493408203125, -0.0242919921875, -0.022979736328125, 0.0231475830078125, 0.01151275634765625, -0.054107666015625, -0.06304931640625, -0.039031982421875, ...
GEM/references
2022-06-23T19:32:57.000Z
[ "region:us" ]
GEM
null
null
0
84
2022-03-02T23:29:22
# GEM References ## What is it? This repository contains all the reference datasets that are used for running evaluation on the GEM benchmark. Some of these datasets were originally hosted as a [GitHub release](https://github.com/GEM-benchmark/GEM-metrics/releases) on the [`GEM-metrics`](https://github.com/GEM-benchmark/GEM-metrics) repository, but have been migrated to the Hugging Face Hub. ## Converting datasets to JSON We provide a `convert_dataset_to_json.py` conversion script that converts the datasets in the GEM organisation to the JSON format expected by the `GEM-metrics` library. To run the script, first install [`jq`](https://stedolan.github.io/jq/download/) and then install the script's Python dependencies: ``` python -m pip install -r requirements.txt ``` You can then run the script as follows: ```python python generate_evaluation_datasets.py ``` This script will: * Download and convert the datasets under the GEM organisation to JSON format * Validate that the each dataset has the expected columns of `gem_id`, `target`, and `references`
1,074
[ [ -0.02886962890625, -0.025115966796875, 0.0284576416015625, 0.0068206787109375, -0.01332855224609375, 0.007419586181640625, -0.004241943359375, -0.000774383544921875, 0.00801849365234375, 0.04901123046875, -0.058624267578125, -0.02532958984375, -0.016448974609375...
GEM-submissions/v1-outputs-and-scores
2022-02-25T11:48:06.000Z
[ "region:us" ]
GEM-submissions
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.014984130859375, 0.057220458984375, 0.0288238525390625, -0.03509521484375, 0.04656982421875, 0.052520751953125, 0.00506591796875, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060455322265625, 0.03793334...
GEM-submissions/lewtun__hugging-face-test-t5-base.outputs.json-36bf2a59__1646052073
2022-02-28T12:41:18.000Z
[ "benchmark:gem", "evaluation", "benchmark", "region:us" ]
GEM-submissions
null
null
0
84
2022-03-02T23:29:22
--- benchmark: gem type: prediction submission_name: Hugging Face test T5-base.outputs.json 36bf2a59 tags: - evaluation - benchmark --- # GEM Submission Submission name: Hugging Face test T5-base.outputs.json 36bf2a59
220
[ [ -0.0247955322265625, -0.037322998046875, 0.027679443359375, 0.036712646484375, -0.01427459716796875, 0.0284576416015625, 0.024627685546875, -0.001972198486328125, 0.040985107421875, 0.0251617431640625, -0.07342529296875, -0.046783447265625, -0.0531005859375, ...
GEM-submissions/lewtun__this-is-a-test__1646052811
2022-02-28T12:53:35.000Z
[ "benchmark:gem", "evaluation", "benchmark", "region:us" ]
GEM-submissions
null
null
0
84
2022-03-02T23:29:22
--- benchmark: gem type: prediction submission_name: This is a test tags: - evaluation - benchmark --- # GEM Submission Submission name: This is a test
154
[ [ -0.0010280609130859375, -0.063232421875, 0.038299560546875, 0.00849151611328125, -0.0184173583984375, 0.02978515625, 0.0123443603515625, 0.0229644775390625, 0.03131103515625, 0.0272674560546875, -0.07513427734375, -0.00852203369140625, -0.032379150390625, 0....
GEM-submissions/lewtun__this-is-a-test__1646230987
2022-03-02T14:23:10.000Z
[ "benchmark:gem", "evaluation", "benchmark", "region:us" ]
GEM-submissions
null
null
0
84
2022-03-02T23:29:22
--- benchmark: gem type: prediction submission_name: This is a test tags: - evaluation - benchmark --- # GEM Submission Submission name: This is a test
154
[ [ -0.0010280609130859375, -0.063232421875, 0.038299560546875, 0.00849151611328125, -0.0184173583984375, 0.02978515625, 0.0123443603515625, 0.0229644775390625, 0.03131103515625, 0.0272674560546875, -0.07513427734375, -0.00852203369140625, -0.032379150390625, 0....
GV05/shlomit_speech
2022-02-03T18:44:14.000Z
[ "region:us" ]
GV05
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0213775634765625, -0.01497650146484375, 0.05718994140625, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.0170135498046875, -0.052093505859375, -0.01497650146484375, -0.0604248046875, 0.0379028...
Graphcore/vqa-lxmert
2022-10-25T08:59:34.000Z
[ "language:en", "license:cc-by-4.0", "region:us" ]
Graphcore
VQA is a new dataset containing open-ended questions about images. These questions require an understanding of vision, language and commonsense knowledge to answer.
@inproceedings{antol2015vqa, title={Vqa: Visual question answering}, author={Antol, Stanislaw and Agrawal, Aishwarya and Lu, Jiasen and Mitchell, Margaret and Batra, Dhruv and Zitnick, C Lawrence and Parikh, Devi}, booktitle={Proceedings of the IEEE international conference on computer vision}, pages={2425--2433}, year={2015} }
0
84
2022-03-02T23:29:22
--- language: - en license: - cc-by-4.0 ---
45
[ [ -0.009002685546875, -0.01300048828125, 0.045654296875, 0.03460693359375, -0.0135040283203125, 0.0165252685546875, 0.025238037109375, 0.003536224365234375, 0.04052734375, 0.051788330078125, -0.0455322265625, -0.016632080078125, -0.049652099609375, -0.00334548...
GroNLP/ik-nlp-22_pestyle
2022-10-25T09:06:27.000Z
[ "task_categories:translation", "annotations_creators:machine-generated", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:translation", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "language:it", "license:other", "region:us" ]
GroNLP
This dataset contains a sample of sentences taken from the FLORES-101 dataset that were either translated from scratch or post-edited from an existing automatic translation by three human translators. Translation were performed for the English-Italian language pair, and translators' behavioral data (keystrokes, pauses, editing times) were collected using the PET platform.
No citation information available.
0
84
2022-03-02T23:29:22
--- annotations_creators: - machine-generated - expert-generated language_creators: - found language: - en - it license: - other multilinguality: - translation size_categories: - 1K<n<10K source_datasets: - original task_categories: - translation pretty_name: iknlp22-pestyle --- # Dataset Card for IK-NLP-22 Project 1: A Study in Post-Editing Stylometry ## Table of Contents - [Dataset Card for IK-NLP-22 Project 1: A Study in Post-Editing Stylometry](#dataset-card-for-ik-nlp-22-project-1-a-study-in-post-editing-stylometry) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Train Split](#train-split) - [Test splits](#test-splits) - [Dataset Creation](#dataset-creation) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Source:** [FLORES-101](https://huggingface.co/datasets/gsarti/flores_101) - **Point of Contact:** [Gabriele Sarti](mailto:ik-nlp-course@rug.nl) ### Dataset Summary This dataset contains a sample of sentences taken from the [FLORES-101](https://huggingface.co/datasets/gsarti/flores_101) dataset that were either translated from scratch or post-edited from an existing automatic translation by three human translators. Translation were performed for the English-Italian language pair, and translators' behavioral data (keystrokes, pauses, editing times) were collected using the [PET](https://github.com/wilkeraziz/PET) platform. This dataset is made available for final projects of the 2022 edition of the Natural Language Processing course at the [Information Science Master's Degree](https://www.rug.nl/masters/information-science/?lang=en) at the University of Groningen, taught by [Arianna Bisazza](https://research.rug.nl/en/persons/arianna-bisazza) and [Gabriele Sarti](https://research.rug.nl/en/persons/gabriele-sarti) with the assistance of [Anjali Nair](https://nl.linkedin.com/in/anjalinair012). **Disclaimer**: *This repository is provided without direct data access due to currently unpublished results.* _**For this reason, it is strictly forbidden to share or publish all the data associated to this repository**_. *Students will be provided with a compressed folder containing the data upon choosing a project based on this dataset. To load the dataset using 🤗 Datasets, download and unzip the provided folder and pass it to the* `load_dataset` *method as:* `datasets.load_dataset('GroNLP/ik-nlp-22_pestyle', 'full', data_dir='path/to/unzipped/folder')` ### Languages The language data of is in English (BCP-47 `en`) and Italian (BCP-47 `it`) ## Dataset Structure ### Data Instances The dataset contains four configurations: `full`, `test_mask_subject`, `test_mask_modality`, `test_mask_time`. `full` contains the main `train` split in which all fields are available. The other three, `test_mask_subject`, `test_mask_modality`, `test_mask_time`, contain a `test` split each with different fields removed to avoid information leaking during evaluation. See more details in the [Data Splits](#data-splits) section. ### Data Fields The following fields are contained in the training set: |Field|Description| |-----|-----------| |`item_id` | The sentence identifier. The first digits of the number represent the document containing the sentence, while the last digit of the number represents the sentence position inside the document. Documents can contain from 3 to 5 semantically-related sentences each. | |`subject_id` | The identifier for the translator performing the translation from scratch or post-editing task. Values: `t1`, `t2` or `t3`. | |`modality` | The modality of the translation task. Values: `ht` (translation from scratch), `pe1` (post-editing Google Translate translations), `pe2` (post-editing [mBART](https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt) translations). | |`src_text` | The original source sentence extracted from Wikinews, wikibooks or wikivoyage. | |`mt_text` | Missing if tasktype is `ht`. Otherwise, contains the automatically-translated sentence before post-editing. | |`tgt_text` | Final sentence produced by the translator (either via translation from scratch of `sl_text` or post-editing `mt_text`) | |`edit_time` | Total editing time for the translation in seconds. | |`k_total` | Total number of keystrokes for the translation. | |`k_letter` | Total number of letter keystrokes for the translation. | |`k_digit` | Total number of digit keystrokes for the translation. | |`k_white` | Total number of whitespace keystrokes for the translation. | |`k_symbol` | Total number of symbol (punctuation, etc.) keystrokes for the translation. | |`k_nav` | Total number of navigation keystrokes (left-right arrows, mouse clicks) for the translation. | |`k_erase` | Total number of erase keystrokes (backspace, cancel) for the translation. | |`k_copy` | Total number of copy (Ctrl + C) actions during the translation. | |`k_cut` | Total number of cut (Ctrl + X) actions during the translation. | |`k_paste` | Total number of paste (Ctrl + V) actions during the translation. | |`n_pause_geq_300` | Number of pauses of 300ms or more during the translation. | |`len_pause_geq_300` | Total duration of pauses of 300ms or more, in milliseconds. | |`n_pause_geq_1000` | Number of pauses of 1s or more during the translation. | |`len_pause_geq_1000` | Total duration of pauses of 1000ms or more, in milliseconds. | |`num_annotations` | Number of times the translator focused the texbox for performing the translation of the sentence during the translation session. E.g. 1 means the translation was performed once and never revised. | |`n_insert` | Number of post-editing insertions (empty for modality `ht`) computed using the [tercom](https://github.com/jhclark/tercom) library. | |`n_delete` | Number of post-editing deletions (empty for modality `ht`) computed using the [tercom](https://github.com/jhclark/tercom) library. | |`n_substitute` | Number of post-editing substitutions (empty for modality `ht`) computed using the [tercom](https://github.com/jhclark/tercom) library. | |`n_shift` | Number of post-editing shifts (empty for modality `ht`) computed using the [tercom](https://github.com/jhclark/tercom) library. | |`bleu` | Sentence-level BLEU score between MT and post-edited fields (empty for modality `ht`) computed using the [SacreBLEU](https://github.com/mjpost/sacrebleu) library with default parameters. | |`chrf` | Sentence-level chrF score between MT and post-edited fields (empty for modality `ht`) computed using the [SacreBLEU](https://github.com/mjpost/sacrebleu) library with default parameters. | |`ter` | Sentence-level TER score between MT and post-edited fields (empty for modality `ht`) computed using the [tercom](https://github.com/jhclark/tercom) library. | |`aligned_edit` | Aligned visual representation of REF (`mt_text`), HYP (`tl_text`) and edit operations (I = Insertion, D = Deletion, S = Substitution) performed on the field. Replace `\\n` with `\n` to show the three aligned rows.| ### Data Splits | config| train| test| |------:|-----:|----:| |`main` | 1170 | 120 | #### Train Split The `train` split contains a total of 1170 triplets (or pairs, when translation from scratch is performed) annotated with behavioral data produced during the translation. The following is an example of the subject `t3` post-editing a machine translation produced by system 2 (tasktype `pe2`) taken from the `train` split. The field `aligned_edit` is showed over three lines to provide a visual understanding of its contents. ```json { "item_id": 1072, "subject_id": "t3", "tasktype": "pe2", "src_text": "At the beginning dress was heavily influenced by the Byzantine culture in the east.", "mt_text": "All'inizio il vestito era fortemente influenzato dalla cultura bizantina dell'est.", "tgt+text": "Inizialmente, l'abbigliamento era fortemente influenzato dalla cultura bizantina orientale.", "edit_time": 45.687, "k_total": 51, "k_letter": 31, "k_digit": 0, "k_white": 2, "k_symbol": 3, "k_nav": 7, "k_erase": 3, "k_copy": 0, "k_cut": 0, "k_paste": 0, "n_pause_geq_300": 9, "len_pause_geq_300": 40032, "n_pause_geq_1000": 5, "len_pause_geq_1000": 38392, "num_annotations": 1, "n_insert": 0.0, "n_delete": 1.0, "n_substitute": 3.0, "n_shift": 0.0, "bleu": 47.99, "chrf": 62.05, "ter": 40.0, "aligned_edit: "REF: all'inizio il vestito era fortemente influenzato dalla cultura bizantina dell'est.\\n HYP: ********** inizialmente, l'abbigliamento era fortemente influenzato dalla cultura bizantina orientale.\\n EVAL: D S S S" } ``` The text is provided as-is, without further preprocessing or tokenization. #### Test splits The three `test` splits (one per configuration) contain the same 120 entries each, following the same structure as `train`. Each test split omit some of the fields to prevent leakage of information: - In `test_mask_subject` the `subject_id` is absent, for the main task of post-editor stylometry. - In `test_mask_modality` the following fields are absent for the modality prediction extra task: `modality`, `mt_text`, `n_insert`, `n_delete`, `n_substitute`, `n_shift`, `ter`, `bleu`, `chrf`, `aligned_edit`. - In `test_mask_time` the following fields are absent for the time and pause prediction extra task: `edit_time`, `n_pause_geq_300`, `len_pause_geq_300`, `n_pause_geq_1000`, and `len_pause_geq_1000`. ### Dataset Creation The dataset was parsed from PET XML files into CSV format using a script adapted from the one by [Antonio Toral](https://research.rug.nl/en/persons/antonio-toral-ruiz) found at the following link: [https://github.com/antot/postediting_novel_frontiers](https://github.com/antot/postediting_novel_frontiers) ## Additional Information ### Dataset Curators For problems related to this 🤗 Datasets version, please contact us at [ik-nlp-course@rug.nl](mailto:ik-nlp-course@rug.nl). ### Licensing Information It is forbidden to share or publish the data associated with this 🤗 Dataset version. ### Citation Information No citation information is provided for this dataset.
10,748
[ [ -0.0301513671875, -0.056488037109375, 0.02703857421875, 0.0261383056640625, -0.0313720703125, -0.0108184814453125, -0.037750244140625, -0.028961181640625, 0.0255126953125, 0.03515625, -0.052947998046875, -0.07037353515625, -0.03564453125, 0.032379150390625, ...
HarrisDePerceptron/sv_corpora_parliament_processed
2022-02-06T22:08:17.000Z
[ "region:us" ]
HarrisDePerceptron
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0213775634765625, -0.01497650146484375, 0.05718994140625, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.0170135498046875, -0.052093505859375, -0.01497650146484375, -0.0604248046875, 0.0379028...
HarrisDePerceptron/ur_corpora_pib
2022-02-08T10:30:32.000Z
[ "region:us" ]
HarrisDePerceptron
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0213775634765625, -0.01497650146484375, 0.05718994140625, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.0170135498046875, -0.052093505859375, -0.01497650146484375, -0.0604248046875, 0.0379028...
Jeska/autonlp-data-vaccinfaq
2021-10-19T12:06:57.000Z
[ "task_categories:text-classification", "region:us" ]
Jeska
null
null
0
84
2022-03-02T23:29:22
--- task_categories: - text-classification --- # AutoNLP Dataset for project: vaccinfaq ## Table of content - [Dataset Description](#dataset-description) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) ## Dataset Descritpion This dataset has been automatically processed by AutoNLP for project vaccinfaq. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "target": 6, "text": "What je naam?" }, { "target": 6, "text": "Hoe heet je?" } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "target": "ClassLabel(num_classes=181, names=['chitchat_ask_bye', 'chitchat_ask_hi', 'chitchat_ask_hi_de', 'chitchat_ask_hi_en', 'chitchat_ask_hi_fr', 'chitchat_ask_hoe_gaat_het', 'chitchat_ask_name', 'chitchat_ask_thanks', 'faq_ask_aantal_gevaccineerd', 'faq_ask_aantal_gevaccineerd_wereldwijd', 'faq_ask_afspraak_afzeggen', 'faq_ask_afspraak_gemist', 'faq_ask_algemeen_info', 'faq_ask_allergisch_na_vaccinatie', 'faq_ask_alternatieve_medicatie', 'faq_ask_andere_vaccins', 'faq_ask_astrazeneca', 'faq_ask_astrazeneca_bij_ouderen', 'faq_ask_astrazeneca_bloedklonters', 'faq_ask_astrazeneca_prik_2', 'faq_ask_attest', 'faq_ask_autisme_na_vaccinatie', 'faq_ask_auto-immuun', 'faq_ask_begeleiding', 'faq_ask_beschermen', 'faq_ask_beschermingsduur', 'faq_ask_beschermingspercentage', 'faq_ask_besmetten_na_vaccin', 'faq_ask_betalen_voor_vaccin', 'faq_ask_betrouwbaar', 'faq_ask_betrouwbare_bronnen', 'faq_ask_bijsluiter', 'faq_ask_bijwerking_AZ', 'faq_ask_bijwerking_JJ', 'faq_ask_bijwerking_algemeen', 'faq_ask_bijwerking_lange_termijn', 'faq_ask_bijwerking_moderna', 'faq_ask_bijwerking_pfizer', 'faq_ask_bloed_geven', 'faq_ask_borstvoeding', 'faq_ask_buitenlander', 'faq_ask_chronisch_ziek', 'faq_ask_combi', 'faq_ask_complottheorie', 'faq_ask_complottheorie_5G', 'faq_ask_complottheorie_Bill_Gates', 'faq_ask_contra_ind', 'faq_ask_corona_is_griep', 'faq_ask_corona_vermijden', 'faq_ask_covid_door_vaccin', 'faq_ask_curevac', 'faq_ask_derde_prik', 'faq_ask_dna', 'faq_ask_duur_vaccinatie', 'faq_ask_eerst_weigeren', 'faq_ask_eerste_prik_buitenland', 'faq_ask_essentieel_beroep', 'faq_ask_experimenteel', 'faq_ask_foetus', 'faq_ask_geen_antwoord', 'faq_ask_geen_risicopatient', 'faq_ask_geen_uitnodiging', 'faq_ask_gestockeerd', 'faq_ask_gezondheidstoestand_gekend', 'faq_ask_gif_in_vaccin', 'faq_ask_goedkeuring', 'faq_ask_groepsimmuniteit', 'faq_ask_hartspierontsteking', 'faq_ask_hersenziekte', 'faq_ask_hoe_dodelijk', 'faq_ask_hoe_weet_overheid', 'faq_ask_hoeveel_dosissen', 'faq_ask_huisarts', 'faq_ask_huisdieren', 'faq_ask_iedereen', 'faq_ask_in_vaccin', 'faq_ask_info_vaccins', 'faq_ask_janssen', 'faq_ask_janssen_een_dosis', 'faq_ask_jong_en_gezond', 'faq_ask_keuze', 'faq_ask_keuze_vaccinatiecentrum', 'faq_ask_kinderen', 'faq_ask_kosjer_halal', 'faq_ask_leveringen', 'faq_ask_logistiek', 'faq_ask_logistiek_veilig', 'faq_ask_magnetisch', 'faq_ask_man_vrouw_verschillen', 'faq_ask_mantelzorger', 'faq_ask_maximaal_een_dosis', 'faq_ask_meer_bijwerkingen_tweede_dosis', 'faq_ask_minder_mobiel', 'faq_ask_moderna', 'faq_ask_mondmasker', 'faq_ask_motiveren', 'faq_ask_mrna_vs_andere_vaccins', 'faq_ask_naaldangst', 'faq_ask_nadelen', 'faq_ask_nuchter', 'faq_ask_ontwikkeling', 'faq_ask_onvruchtbaar', 'faq_ask_oplopen_vaccinatie', 'faq_ask_pfizer', 'faq_ask_phishing', 'faq_ask_pijnstiller', 'faq_ask_planning_eerstelijnszorg', 'faq_ask_planning_ouderen', 'faq_ask_positieve_test_na_vaccin', 'faq_ask_prioritaire_gropen', 'faq_ask_privacy', 'faq_ask_probleem_registratie', 'faq_ask_problemen_uitnodiging', 'faq_ask_quarantaine', 'faq_ask_qvax_probleem', 'faq_ask_reproductiegetal', 'faq_ask_risicopatient', 'faq_ask_risicopatient_diabetes', 'faq_ask_risicopatient_hartvaat', 'faq_ask_risicopatient_immuunziekte', 'faq_ask_risicopatient_kanker', 'faq_ask_risicopatient_luchtwegaandoening', 'faq_ask_smaakverlies', 'faq_ask_snel_ontwikkeld', 'faq_ask_sneller_aan_de_beurt', 'faq_ask_taxi', 'faq_ask_test_voor_vaccin', 'faq_ask_testen', 'faq_ask_tijd_tot_tweede_dosis', 'faq_ask_timing_andere_vaccins', 'faq_ask_trage_start', 'faq_ask_tweede_dosis_afspraak', 'faq_ask_tweede_dosis_vervroegen', 'faq_ask_twijfel_bijwerkingen', 'faq_ask_twijfel_effectiviteit', 'faq_ask_twijfel_inhoud', 'faq_ask_twijfel_ivm_vaccinatie', 'faq_ask_twijfel_noodzaak', 'faq_ask_twijfel_ontwikkeling', 'faq_ask_twijfel_praktisch', 'faq_ask_twijfel_vaccins_zelf', 'faq_ask_twijfel_vrijheid', 'faq_ask_uit_flacon', 'faq_ask_uitnodiging_afspraak_kwijt', 'faq_ask_uitnodiging_na_vaccinatie', 'faq_ask_vaccin_doorgeven', 'faq_ask_vaccin_immuunsysteem', 'faq_ask_vaccin_variant', 'faq_ask_vaccinatiecentrum', 'faq_ask_vaccine_covid_gehad', 'faq_ask_vaccine_covid_gehad_effect', 'faq_ask_vakantie', 'faq_ask_veelgestelde_vragen', 'faq_ask_vegan', 'faq_ask_verplicht', 'faq_ask_verschillen', 'faq_ask_vrijwillig_Janssen', 'faq_ask_vrijwilliger', 'faq_ask_waar_en_wanneer', 'faq_ask_waarom', 'faq_ask_waarom_niet_verplicht', 'faq_ask_waarom_ouderen_eerst', 'faq_ask_waarom_twee_prikken', 'faq_ask_waarom_twijfel', 'faq_ask_wanneer_algemene_bevolking', 'faq_ask_wanneer_iedereen_gevaccineerd', 'faq_ask_wat_is_corona', 'faq_ask_wat_is_rna', 'faq_ask_wat_is_vaccin', 'faq_ask_wat_na_vaccinatie', 'faq_ask_welk_vaccin_krijg_ik', 'faq_ask_welke_vaccin', 'faq_ask_wie_ben_ik', 'faq_ask_wie_doet_inenting', 'faq_ask_wie_is_risicopatient', 'faq_ask_wie_nu', 'faq_ask_wilsonbekwaam', 'faq_ask_zwanger', 'get_started', 'nlu_fallback', 'test'], names_file=None, id=None)", "text": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 11651 | | valid | 1267 |
6,028
[ [ -0.0330810546875, -0.033782958984375, 0.007415771484375, 0.00939178466796875, -0.0032806396484375, 0.002227783203125, 0.01302337646484375, -0.0164947509765625, 0.022064208984375, 0.00960540771484375, -0.03948974609375, -0.047882080078125, -0.02093505859375, ...
Khanoooo/autonlp-data-Corona
2021-08-08T19:59:14.000Z
[ "region:us" ]
Khanoooo
null
null
0
84
2022-03-02T23:29:22
It's all about Corona
21
[ [ -0.017120361328125, -0.0531005859375, 0.06939697265625, 0.0567626953125, 0.0135650634765625, 0.0537109375, 0.01253509521484375, -0.01422882080078125, 0.06829833984375, 0.0261077880859375, 0.004070281982421875, -0.025787353515625, -0.0070953369140625, -0.0158...
MickyMike/large_c_corpus
2021-09-26T21:20:15.000Z
[ "region:us" ]
MickyMike
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.014984130859375, 0.057220458984375, 0.0288238525390625, -0.03509521484375, 0.04656982421875, 0.052520751953125, 0.00506591796875, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060455322265625, 0.03793334...
NishinoTSK/leishmaniav1
2022-01-14T04:09:05.000Z
[ "region:us" ]
NishinoTSK
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.014984130859375, 0.057220458984375, 0.0288238525390625, -0.03509521484375, 0.04656982421875, 0.052520751953125, 0.00506591796875, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060455322265625, 0.03793334...
PlanTL-GOB-ES/pharmaconer
2022-11-18T12:06:36.000Z
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "annotations_creators:expert-generated", "multilinguality:monolingual", "language:es", "license:cc-by-4.0", "biomedical", "clinical", "spanish", "region:us" ]
PlanTL-GOB-ES
PharmaCoNER: Pharmacological Substances, Compounds and Proteins Named Entity Recognition track This dataset is designed for the PharmaCoNER task, sponsored by Plan de Impulso de las Tecnologías del Lenguaje (Plan TL). It is a manually classified collection of clinical case studies derived from the Spanish Clinical Case Corpus (SPACCC), an open access electronic library that gathers Spanish medical publications from SciELO (Scientific Electronic Library Online). The annotation of the entire set of entity mentions was carried out by medicinal chemistry experts and it includes the following 4 entity types: NORMALIZABLES, NO_NORMALIZABLES, PROTEINAS and UNCLEAR. The PharmaCoNER corpus contains a total of 396,988 words and 1,000 clinical cases that have been randomly sampled into 3 subsets. The training set contains 500 clinical cases, while the development and test sets contain 250 clinical cases each. In terms of training examples, this translates to a total of 8074, 3764 and 3931 annotated sentences in each set. The original dataset was distributed in Brat format (https://brat.nlplab.org/standoff.html). For further information, please visit https://temu.bsc.es/pharmaconer/ or send an email to encargo-pln-life@bsc.es
@inproceedings{, title = "PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track", author = "Gonzalez-Agirre, Aitor and Marimon, Montserrat and Intxaurrondo, Ander and Rabal, Obdulia and Villegas, Marta and Krallinger, Martin", booktitle = "Proceedings of The 5th Workshop on BioNLP Open Shared Tasks", month = nov, year = "2019", address = "Hong Kong, China", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/D19-5701", doi = "10.18653/v1/D19-5701", pages = "1--10", abstract = "", }
4
84
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language: - es tags: - biomedical - clinical - spanish multilinguality: - monolingual task_categories: - token-classification task_ids: - named-entity-recognition license: - cc-by-4.0 --- # PharmaCoNER ## Dataset Description Manually classified collection of Spanish clinical case studies. - **Homepage:** [zenodo](https://zenodo.org/record/4270158) - **Paper:** [PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track](https://aclanthology.org/D19-5701/) - **Point of Contact:** encargo-pln-life@bsc.es ### Dataset Summary Manually classified collection of clinical case studies derived from the Spanish Clinical Case Corpus (SPACCC), an open access electronic library that gathers Spanish medical publications from [SciELO](https://scielo.org/). The PharmaCoNER corpus contains a total of 396,988 words and 1,000 clinical cases that have been randomly sampled into 3 subsets. The training set contains 500 clinical cases, while the development and test sets contain 250 clinical cases each. In terms of training examples, this translates to a total of 8129, 3787 and 3952 annotated sentences in each set. The original dataset is distributed in [Brat](https://brat.nlplab.org/standoff.html) format. The annotation of the entire set of entity mentions was carried out by domain experts. It includes the following 4 entity types: NORMALIZABLES, NO_NORMALIZABLES, PROTEINAS and UNCLEAR. This dataset was designed for the PharmaCoNER task, sponsored by [Plan-TL](https://plantl.mineco.gob.es/Paginas/index.aspx). For further information, please visit [the official website](https://temu.bsc.es/pharmaconer/). ### Supported Tasks Named Entity Recognition (NER) ### Languages - Spanish (es) ### Directory Structure * README.md * pharmaconer.py * dev-set_1.1.conll * test-set_1.1.conll * train-set_1.1.conll ## Dataset Structure ### Data Instances Three four-column files, one for each split. ### Data Fields Every file has four columns: * 1st column: Word form or punctuation symbol * 2nd column: Original BRAT file name * 3rd column: Spans * 4th column: IOB tag #### Example <pre> La S0004-06142006000900008-1 123_125 O paciente S0004-06142006000900008-1 126_134 O tenía S0004-06142006000900008-1 135_140 O antecedentes S0004-06142006000900008-1 141_153 O de S0004-06142006000900008-1 154_156 O hipotiroidismo S0004-06142006000900008-1 157_171 O , S0004-06142006000900008-1 171_172 O hipertensión S0004-06142006000900008-1 173_185 O arterial S0004-06142006000900008-1 186_194 O en S0004-06142006000900008-1 195_197 O tratamiento S0004-06142006000900008-1 198_209 O habitual S0004-06142006000900008-1 210_218 O con S0004-06142006000900008-1 219-222 O atenolol S0004-06142006000900008-1 223_231 B-NORMALIZABLES y S0004-06142006000900008-1 232_233 O enalapril S0004-06142006000900008-1 234_243 B-NORMALIZABLES </pre> ### Data Splits | Split | Size | | ------------- | ------------- | | `train` | 8,129 | | `dev` | 3,787 | | `test` | 3,952 | ## Dataset Creation ### Curation Rationale For compatibility with similar datasets in other languages, we followed as close as possible existing curation guidelines. ### Source Data #### Initial Data Collection and Normalization Manually classified collection of clinical case report sections. The clinical cases were not restricted to a single medical discipline, covering a variety of medical disciplines, including oncology, urology, cardiology, pneumology or infectious diseases. This is key to cover a diverse set of chemicals and drugs. #### Who are the source language producers? Humans, there is no machine generated data. ### Annotations #### Annotation process The annotation process of the PharmaCoNER corpus was inspired by previous annotation schemes and corpora used for the BioCreative CHEMDNER and GPRO tracks, translating the guidelines used for these tracks into Spanish and adapting them to the characteristics and needs of clinically oriented documents by modifying the annotation criteria and rules to cover medical information needs. This adaptation was carried out in collaboration with practicing physicians and medicinal chemistry experts. The adaptation, translation and refinement of the guidelines was done on a sample set of the SPACCC corpus and linked to an iterative process of annotation consistency analysis through interannotator agreement (IAA) studies until a high annotation quality in terms of IAA was reached. #### Who are the annotators? Practicing physicians and medicinal chemistry experts. ### Personal and Sensitive Information No personal or sensitive information included. ## Considerations for Using the Data ### Social Impact of Dataset This corpus contributes to the development of medical language models in Spanish. ### Discussion of Biases [N/A] ## Additional Information ### Dataset Curators Text Mining Unit (TeMU) at the Barcelona Supercomputing Center (bsc-temu@bsc.es). For further information, send an email to (plantl-gob-es@bsc.es). This work was funded by the [Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA)](https://avancedigital.mineco.gob.es/en-us/Paginas/index.aspx) within the framework of the [Plan-TL](https://plantl.mineco.gob.es/Paginas/index.aspx). ### Licensing information This work is licensed under [CC Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/) License. Copyright by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) (2022) ### Citation Information ```bibtex @inproceedings{, title = "PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track", author = "Gonzalez-Agirre, Aitor and Marimon, Montserrat and Intxaurrondo, Ander and Rabal, Obdulia and Villegas, Marta and Krallinger, Martin", booktitle = "Proceedings of The 5th Workshop on BioNLP Open Shared Tasks", month = nov, year = "2019", address = "Hong Kong, China", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/D19-5701", doi = "10.18653/v1/D19-5701", pages = "1--10", } ``` ### Contributions [N/A]
6,472
[ [ -0.0216827392578125, -0.032806396484375, 0.0401611328125, 0.0241546630859375, -0.0152587890625, -0.003520965576171875, -0.01934814453125, -0.03582763671875, 0.04571533203125, 0.04229736328125, -0.029632568359375, -0.0748291015625, -0.0635986328125, 0.0409545...
RohanAiLab/persian_daily_news
2022-10-21T16:13:09.000Z
[ "source_datasets:original", "language:fa", "region:us" ]
RohanAiLab
Persian Daily News dataset is a collection of 2 million news articles with the headline of each news article. This dataset contains news articles and their summaries for the last 10 years. This dataset is provided by Rohan AI lab for research purposes.
https://saied71.github.io/RohanAiLab/, author={Saied Alimoradi}, year={2021} }
0
84
2022-03-02T23:29:22
--- pretty_name: Persian Daily News language: - fa source_datasets: - original task_categories: - Summarization - sequence-modeling --- # Persian Daily News # Dataset Summary persian_daily_news is a collection of 2 million of unique news articles with the headline for each article. dataset can be used in abstractive summarization and paraphrasing tasks. This effort is part of a bigger perspective to have several datasets in Persian language(and other low resources languages) for different tasks that have two important factors: `free` and `easy-to-use`. Here is a quick HOW-TO for using this dataset in datasets library:[Demo-datasets](https://saied71.github.io/RohanAiLab/2021/09/03/Demo-datasets.html) # Description As discussed before, this dataset contains 2M news articles. Each article has these two attributes: text and summary. Here is a sample of dataset: ``` text: به گزارش گروه بین الملل ، خبرگزاری رسمی قطر اعلام کرد، بعد از امضای موافقتنامه همکاری نظامی بین قطر و روسیه این امکان فراهم شده است تا نظامیان قطری برای تکمیل آموزش‌های نظامی خود عازم روسیه شده و در آنجا تعلیم ببینند.در چارچوب این قرارداد که امروز یک شنبه توسط سرتیپ ستاد عبدالعزیز صالح السلیطی رییس هییت همکاری‌های بین المللی نظامی قطر و سرلشکر ویکتور جوریمیکین رییس اداره عمومی نیروی انسانی وزارت دفاع روسیه به امضا رسید، روابط نظامی بین دوحه و مسکو در زمینه موسسات آموزش‌های نظامی شاهد توسه قابل توجهی خواهد شد.به نوشته این خبرگزاری روابط قطر و روسیه در حال گسترش بوده و به سوی شکل‌گیری مشارکت راهبردی در تمامی زمینه‌ها پیش می‌رود. summary: از این پس نظامیان قطری برای آموزش عازم روسیه شده و در موسسات آموزش نظامی این کشور تعلیم خواهند دید. ``` # Citation ``` rohanailab@gmail.com title={persian_daily_news}, author={Saied Alimoradi}, year={2021} } ```
1,751
[ [ -0.03369140625, -0.05084228515625, 0.03350830078125, 0.0273895263671875, -0.043426513671875, -0.0104522705078125, -0.01171112060546875, -0.0196685791015625, 0.01485443115234375, 0.040283203125, -0.0274200439453125, -0.05438232421875, -0.042877197265625, 0.02...
RuudVelo/nl_corpora_parliament_processed
2022-01-27T13:47:05.000Z
[ "region:us" ]
RuudVelo
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Sam2021/Arguement_Mining_CL2017
2021-08-30T23:11:05.000Z
[ "region:us" ]
Sam2021
tokens along with chunk id. IOB1 format Begining of arguement denoted by B-ARG,inside arguement denoted by I-ARG, other chunks are O Orginial train,test split as used by the paper is provided
@article{stab2017parsing, title={Parsing argumentation structures in persuasive essays}, author={Stab, Christian and Gurevych, Iryna}, journal={Computational Linguistics}, volume={43}, number={3}, pages={619--659}, year={2017}, publisher={MIT Press One Rogers Street, Cambridge, MA 02142-1209, USA journals-info~…} }
1
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
SaulLu/Natural_Questions_HTML
2021-08-27T18:58:58.000Z
[ "region:us" ]
SaulLu
null
null
0
84
2022-03-02T23:29:22
This is a dataset extracted from the Natural Questions dataset This dataset is currently under development
107
[ [ -0.040435791015625, -0.0655517578125, -0.00865936279296875, -0.004634857177734375, -0.014007568359375, 0.0259246826171875, 0.00417327880859375, -0.035797119140625, 0.0239105224609375, 0.0909423828125, -0.0753173828125, -0.0017194747924804688, -0.0132217407226562...
SaulLu/Natural_Questions_HTML_reduced_all
2021-10-04T15:25:50.000Z
[ "region:us" ]
SaulLu
null
null
1
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
SebastianS/github-issues
2022-10-25T09:06:38.000Z
[ "task_categories:text-classification", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:unknown", "language:en-US", "region:us" ]
SebastianS
null
null
0
84
2022-03-02T23:29:22
--- annotations_creators: [] language_creators: - crowdsourced language: - en-US license: [] multilinguality: - monolingual pretty_name: github-issues size_categories: - unknown source_datasets: [] task_categories: - text-classification task_ids: [] --- # Dataset Card for GitHub Issues ## Dataset Description this was an example dataset made from the huggingface course
371
[ [ -0.023651123046875, -0.037506103515625, -0.0038204193115234375, -0.0005636215209960938, -0.01094818115234375, 0.0128936767578125, 0.0026798248291015625, -0.00202178955078125, 0.0543212890625, 0.0266571044921875, -0.0565185546875, -0.056610107421875, -0.014213562...
Shushant/nepali
2021-12-21T05:06:24.000Z
[ "region:us" ]
Shushant
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
Smiling/webnovels-en
2021-11-04T05:51:23.000Z
[ "region:us" ]
Smiling
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
SocialGrep/one-million-reddit-confessions
2022-07-01T18:48:52.000Z
[ "annotations_creators:lexyr", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "language:en", "license:cc-by-4.0", "region:us" ]
SocialGrep
null
null
2
84
2022-03-02T23:29:22
--- annotations_creators: - lexyr language_creators: - crowdsourced language: - en license: - cc-by-4.0 multilinguality: - monolingual size_categories: - 1M<n<10M source_datasets: - original paperswithcode_id: null --- # Dataset Card for one-million-reddit-confessions ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://socialgrep.com/datasets](https://socialgrep.com/datasets?utm_source=huggingface&utm_medium=link&utm_campaign=onemillionconfessions) - **Point of Contact:** [Website](https://socialgrep.com/contact?utm_source=huggingface&utm_medium=link&utm_campaign=onemillionconfessions) ### Dataset Summary This corpus contains a million posts from the following subreddits: - /r/trueoffmychest - /r/confession - /r/confessions - /r/offmychest Posts are annotated with their score. ### Languages Mainly English. ## Dataset Structure ### Data Instances A data point is a Reddit post. ### Data Fields - 'type': the type of the data point. Can be 'post' or 'comment'. - 'id': the base-36 Reddit ID of the data point. Unique when combined with type. - 'subreddit.id': the base-36 Reddit ID of the data point's host subreddit. Unique. - 'subreddit.name': the human-readable name of the data point's host subreddit. - 'subreddit.nsfw': a boolean marking the data point's host subreddit as NSFW or not. - 'created_utc': a UTC timestamp for the data point. - 'permalink': a reference link to the data point on Reddit. - 'score': score of the data point on Reddit. - 'domain': the domain of the data point's link. - 'url': the destination of the data point's link, if any. - 'selftext': the self-text of the data point, if any. - 'title': the title of the post data point. ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information CC-BY v4.0 ### Contributions [Needs More Information]
3,518
[ [ -0.048065185546875, -0.05859375, 0.0223236083984375, 0.034088134765625, -0.03082275390625, 0.0013723373413085938, -0.015167236328125, -0.0194244384765625, 0.053680419921875, 0.04559326171875, -0.08203125, -0.07574462890625, -0.0545654296875, 0.02693176269531...
SuperAI2-Machima/Yord_ThaiQA_LST20
2022-02-25T06:31:36.000Z
[ "region:us" ]
SuperAI2-Machima
null
null
0
84
2022-03-02T23:29:22
พี่ยอด และน้อง ๆ ในทีมบ้านมัณิชมา ร่วมกันสร้างชุดข้อมูล คำถาม - คำตอบ จากชุดข้อมูล LST-20 โดยใช้ POS และ NER เพื่อมาสร้างชุดประโยคคำถาม ได้ข้อมูลคำถาม - ตอบ ทั้งหมดประมาณ 1,000 แถว
184
[ [ -0.01702880859375, -0.0301666259765625, -0.00995635986328125, 0.06756591796875, -0.07366943359375, -0.0207977294921875, 0.0001437664031982422, 0.009124755859375, 0.046173095703125, 0.04241943359375, -0.0186614990234375, -0.0272369384765625, -0.043212890625, ...
TRoboto/names
2022-01-29T16:33:25.000Z
[ "license:cc-by-4.0", "region:us" ]
TRoboto
List of Arabic first names with meaning and origin of most names
@software{Al-Fetyani_Maha_Processing_Library_2021, author = {Al-Fetyani, Mohammad}, month = {11}, title = {{Maha Processing Library}}, url = {https://github.com/TRoboto/Maha}, year = {2021} }
1
84
2022-03-02T23:29:22
--- project: Maha license: cc-by-4.0 --- ## Dataset Summary It includes list of Arabic names with meaning and origin of most names
141
[ [ -0.0264892578125, -0.0010585784912109375, -0.016387939453125, -0.0012979507446289062, -0.0296478271484375, 0.00760650634765625, 0.0137939453125, -0.01617431640625, 0.004253387451171875, 0.013427734375, -0.052093505859375, -0.07220458984375, -0.1041259765625, ...
Tevatron/wikipedia-squad
2021-09-23T02:25:31.000Z
[ "region:us" ]
Tevatron
null
@inproceedings{karpukhin-etal-2020-dense, title = "Dense Passage Retrieval for Open-Domain Question Answering", author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov, Sergey and Chen, Danqi and Yih, Wen-tau", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.550", doi = "10.18653/v1/2020.emnlp-main.550", pages = "6769--6781", }
1
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0214080810546875, -0.01497650146484375, 0.057098388671875, 0.028839111328125, -0.0350341796875, 0.046478271484375, 0.052520751953125, 0.005046844482421875, 0.051361083984375, 0.016998291015625, -0.05206298828125, -0.01497650146484375, -0.06036376953125, 0...
Tevatron/wikipedia-wq-corpus
2021-09-23T01:42:47.000Z
[ "region:us" ]
Tevatron
null
@inproceedings{karpukhin-etal-2020-dense, title = "Dense Passage Retrieval for Open-Domain Question Answering", author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov, Sergey and Chen, Danqi and Yih, Wen-tau", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.550", doi = "10.18653/v1/2020.emnlp-main.550", pages = "6769--6781", }
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Tevatron/wikipedia-wq
2021-09-23T01:44:07.000Z
[ "region:us" ]
Tevatron
null
@inproceedings{karpukhin-etal-2020-dense, title = "Dense Passage Retrieval for Open-Domain Question Answering", author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov, Sergey and Chen, Danqi and Yih, Wen-tau", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.550", doi = "10.18653/v1/2020.emnlp-main.550", pages = "6769--6781", }
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
TurkuNLP/register_oscar
2023-09-25T11:30:50.000Z
[ "region:us" ]
TurkuNLP
null
null
5
84
2022-03-02T23:29:22
# Dataset Card for register_oscar ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) ### Dataset Summary The Register Oscar dataset is a multilingual dataset, containing languaegs from the Oscar dataset that have been tagged with register information. 8 main-level registers: * Narrative (NA) * Informational Description (IN) * Opinion (OP) * Interactive Discussion (ID) * How-to/Instruction (HI) * Informational Persuasion (IP) * Lyrical (LY) * Spoken (SP) For further description of the labels, see (Douglas Biber and Jesse Egbert. 2018. Register variation online) Code used to tag Register Oscar can be found at https://github.com/TurkuNLP/register-labeling ### Languages Currently contains the following languages: Arabic, Bengali, Catalan, English, Spanish, Basque, French, Hindi, Indonesian, Portuguese, Swahili, Urdu, Vietnamese and Chinese. For further information on the languages and data, see https://huggingface.co/datasets/oscar ## Dataset Structure ### Data Instances ``` {"id": "0", "labels": ["NA"], "text": "Zarif: Iran inajua mpango wa Saudia wa kufanya mauaji ya kigaidi dhidi ya maafisa wa ngazi za juu wa Iran\n"} ``` ### Data Fields * id: unique id of the document (from the Oscar dataset) * labels: the list of labels assigned to the text * text: the original text of the document (as appears in the Oscar dataset) ### Citing ``` @inproceedings{laippala-etal-2022-towards, title = "Towards better structured and less noisy Web data: Oscar with Register annotations", author = {Laippala, Veronika and Salmela, Anna and R{\"o}nnqvist, Samuel and Aji, Alham Fikri and Chang, Li-Hsin and Dhifallah, Asma and Goulart, Larissa and Kortelainen, Henna and P{\`a}mies, Marc and Prina Dutra, Deise and Skantsi, Valtteri and Sutawika, Lintang and Pyysalo, Sampo}, booktitle = "Proceedings of the Eighth Workshop on Noisy User-generated Text (W-NUT 2022)", month = oct, year = "2022", address = "Gyeongju, Republic of Korea", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.wnut-1.23", pages = "215--221", abstract = {Web-crawled datasets are known to be noisy, as they feature a wide range of language use covering both user-generated and professionally edited content as well as noise originating from the crawling process. This article presents one solution to reduce this noise by using automatic register (genre) identification -whether the texts are, e.g., forum discussions, lyrical or how-to pages. We apply the multilingual register identification model by R{\"o}nnqvist et al. (2021) and label the widely used Oscar dataset. Additionally, we evaluate the model against eight new languages, showing that the performance is comparable to previous findings on a restricted set of languages. Finally, we present and apply a machine learning method for further cleaning text files originating from Web crawls from remains of boilerplate and other elements not belonging to the main text of the Web page. The register labeled and cleaned dataset covers 351 million documents in 14 languages and is available at https://huggingface.co/datasets/TurkuNLP/register{\_}oscar.}, } ```
3,538
[ [ -0.041290283203125, -0.01311492919921875, 0.00176239013671875, 0.010345458984375, -0.02093505859375, 0.0201263427734375, -0.02099609375, -0.050201416015625, 0.040740966796875, 0.017791748046875, -0.0211639404296875, -0.055908203125, -0.048126220703125, 0.023...
VJGamer/test
2021-10-24T18:06:25.000Z
[ "region:us" ]
VJGamer
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0214080810546875, -0.01497650146484375, 0.057098388671875, 0.028839111328125, -0.0350341796875, 0.046478271484375, 0.052520751953125, 0.005046844482421875, 0.051361083984375, 0.016998291015625, -0.05206298828125, -0.01497650146484375, -0.06036376953125, 0...
WillFerreiraSantos/halos
2022-01-13T03:25:41.000Z
[ "region:us" ]
WillFerreiraSantos
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0214080810546875, -0.01497650146484375, 0.057098388671875, 0.028839111328125, -0.0350341796875, 0.046478271484375, 0.052520751953125, 0.005046844482421875, 0.051361083984375, 0.016998291015625, -0.05206298828125, -0.01497650146484375, -0.06036376953125, 0...
Xenova/sponsorblock-768
2022-02-08T20:36:56.000Z
[ "region:us" ]
Xenova
null
null
1
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Xinghua/test
2021-10-28T07:47:33.000Z
[ "region:us" ]
Xinghua
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
Yatoro/github-issues
2021-11-19T01:26:49.000Z
[ "region:us" ]
Yatoro
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
abhishek/autonlp-data-imdb_eval
2021-04-28T14:46:54.000Z
[ "region:us" ]
abhishek
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
abhishek/autonlp-data-prodigy-10
2022-10-25T09:07:06.000Z
[ "language:en", "region:us" ]
abhishek
null
null
0
84
2022-03-02T23:29:22
--- language: - en --- # AutoNLP Dataset for project: prodigy-10 ## Table of content - [Dataset Description](#dataset-description) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) ## Dataset Descritpion This dataset has been automatically processed by AutoNLP for project prodigy-10. ### Languages The BCP-47 code for the dataset's language is en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "tags": [ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 ], "tokens": [ "tory", "backing", "for", "i", "d", "cards", "the", "tories", "are", "to", "back", "controversial", "government", "plans", "to", "introduce", "i", "d", "cards", ".", " ", "the", "shadow", "cabinet", "revealed", "its", "support", "ahead", "of", "next", "week", "s", "commons", "vote", "on", "a", "bill", "to", "introduce", "compulsory", "i", "d.", "the", "decision", "follows", "a", " ", "tough", "meeting", " ", "where", "some", "senior", "tories", "argued", "vociferously", "against", "the", "move", " ", "party", "sources", "told", "the", "bbc", ".", "the", "bill", " ", "which", "ministers", "claim", "will", "tackle", "crime", " ", "terrorism", "and", "illegal", "immigration", " ", "is", "expected", "to", "be", "opposed", "by", "the", "liberal", "democrats", ".", " ", "they", "have", "said", "the", "scheme", "is", " ", "deeply", "flawed", " ", "and", "a", "waste", "of", "money", ".", "sources", "within", "the", "conservative", "party", "told", "the", "bbc", "michael", "howard", "has", "always", "been", "in", "favour", "of", "i", "d", "cards", " ", "and", "tried", "to", "introduce", "them", "when", "he", "was", "home", "secretary", ".", "the", "party", "has", "been", " ", "agnostic", " ", "on", "the", "issue", "until", "now", "but", "had", "now", "decided", "to", "come", "off", "the", "fence", " ", "the", "tory", "source", "said", ".", "despite", "giving", "their", "backing", "to", "i", "d", "cards", " ", "the", "conservatives", "insisted", "they", "would", "hold", "ministers", "to", "account", "over", "the", "precise", "purpose", "of", "the", "scheme", ".", " ", "they", "said", "they", "would", "also", "press", "labour", "over", "whether", "objectives", "could", "be", "met", "and", "whether", "the", "home", "office", "would", "deliver", "them", ".", "and", "they", "pledged", "to", "assess", "the", "cost", "effectiveness", "of", "i", "d", "cards", "and", "whether", "people", "s", "privacy", "would", "be", "properly", "protected", ".", " ", "it", "is", "important", "to", "remember", "that", "this", "bill", "will", "take", "a", "decade", "to", "come", "into", "full", "effect", " ", "a", "spokesman", "said", ".", " ", "it", "will", "do", "nothing", "to", "solve", "the", "immediate", "problems", "of", "rising", "crime", "and", "uncontrolled", "immigration", ".", " ", "lib", "dem", "home", "affairs", "spokesman", "mark", "oaten", "said", ":", " ", "this", "has", "all", "the", "signs", "of", "michael", "howard", "overruling", "colleagues", " ", "concerns", "over", "i", "d", "cards", ".", " ", "the", "tories", "should", "have", "the", "courage", "to", "try", "and", "change", "public", "opinion", "not", "follow", "it", ".", " ", "the", "new", "chairman", "of", "the", "bar", "council", " ", "guy", "mansfield", "qc", "warned", "there", "was", "a", "real", "risk", "that", "people", "on", "the", " ", "margins", "of", "society", " ", "would", "be", "driven", "into", "the", "hands", "of", "extremists", ".", " ", "what", "is", "going", "to", "happen", "to", "young", "asian", "men", "when", "there", "has", "been", "a", "bomb", "gone", "off", "somewhere", " ", "they", "are", "going", "to", "be", "stopped", ".", "if", "they", "haven", "t", "[", "i", "d", "cards", "]", "they", "are", "going", "to", "be", "detained", "." ] }, { "tags": [ 2, 6, 8, 8, 0, 8, 0, 8, 8, 8, 2, 6, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 0, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 0, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 0, 8, 8, 8, 0, 8, 2, 6, 8, 2, 6, 8, 8, 8, 8, 8, 8, 8, 8, 0, 8, 8, 8, 8, 8, 8, 8, 8, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 0, 8, 2, 6, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 0, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 0, 8, 2, 6, 6, 8, 8, 8, 0, 8, 2, 6, 8, 8, 8, 8, 2, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 2, 8, 8, 8, 8, 8, 2, 6, 8, 8, 8, 8, 8, 8, 8, 8, 0, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 2, 6, 8, 8, 2, 8, 8, 0, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 2, 6, 8, 8, 0, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 2, 6, 8, 8, 8, 8, 8, 8, 8, 2, 6, 8 ], "tokens": [ "o", "gara", "revels", "in", "ireland", "victory", "ireland", "fly", "-", "half", "ronan", "o", "gara", "hailed", "his", "side", "s", "19", "-", "13", "victory", "over", "england", "as", "a", " ", "special", " ", "win", ".", " ", "the", "munster", "number", "10", "kicked", "a", "total", "of", "14", "points", " ", "including", "two", "drop", "goals", " ", "to", "help", "keep", "alive", "their", "grand", "slam", "hopes", ".", "he", "told", "bbc", "sport", ":", " ", "we", "made", "hard", "work", "of", "it", "but", "it", "s", "still", "special", "to", "beat", "england", ".", " ", "i", "had", "three", "chances", "to", "win", "the", "game", "but", "didn", "t.", "we", "have", "work", "to", "do", "after", "this", "but", "we", "never", "take", "a", "victory", "over", "england", "lightly", ".", " ", "ireland", "hooker", "shane", "byrne", "echoed", "o", "gara", "s", "comments", "but", "admitted", "the", "game", "had", "been", "england", "s", "best", "outing", "in", "the", "six", "nations", ".", "byrne", "said", ":", " ", "it", "was", "a", "really", " ", "really", "hard", "game", "but", "from", "one", "to", "15", "in", "our", "team", "we", "worked", "really", " ", "really", "hard", ".", " ", "we", "just", "had", "to", "stick", "to", "our", "defensive", "pattern", " ", "trust", "ourselves", "and", "trust", "those", "around", "us", ".", "all", "round", "it", "was", "fantastic", ".", " ", "ireland", "captain", "brian", "o", "driscoll", " ", "who", "scored", "his", "side", "s", "only", "try", " ", "said", ":", " ", "we", "are", "delighted", " ", "we", "felt", "if", "we", "performed", "well", "then", "we", "would", "win", "but", "with", "england", "also", "having", "played", "very", "well", "it", "makes", "it", "all", "the", "sweeter", ".", " ", "we", "did", "get", "the", "bounce", "of", "the", "ball", "and", "some", "days", "that", "happens", "and", "you", "ve", "just", "got", "to", "jump", "on", "the", "back", "of", "it", ".", " ", "ireland", "coach", "eddie", "o", "sullivan", "was", "surprised", "that", "england", "coach", "andy", "robinson", "said", "he", "was", "certain", "mark", "cueto", "was", "onside", "for", "a", "disallowed", "try", "just", "before", "the", "break", ".", " ", "andy", "was", "sitting", "two", "yards", "from", "me", "and", "i", "couldn", "t", "see", "whether", "he", "was", "offside", "or", "not", "so", "i", "don", "t", "know", "how", "andy", "could", "have", "known", " ", "said", "o", "sullivan", ".", " ", "what", "i", "do", "know", "is", "that", "england", "played", "well", "and", "when", "that", "happens", "it", "makes", "a", "very", "good", "victory", "for", "us", ".", " ", "we", "had", "to", "defend", "for", "long", "periods", "and", "that", "is", "all", "good", "for", "the", "confidence", "of", "the", "team", ".", " ", "i", "think", "our", "try", "was", "very", "well", "worked", " ", "it", "was", "a", "gem", " ", "as", "good", "a", "try", "as", "we", "have", "scored", "for", "a", "while", ".", " ", "o", "sullivan", "also", "rejected", "robinson", "s", "contention", "england", "dominated", "the", "forward", "play", ".", " ", "i", "think", "we", "lost", "one", "lineout", "and", "they", "lost", "four", "or", "five", "so", "i", "don", "t", "know", "how", "that", "adds", "up", "to", "domination", " ", "he", "said", ".", "o", "driscoll", "also", "insisted", "ireland", "were", "happy", "to", "handle", "the", "pressure", "of", "being", "considered", "favourites", "to", "win", "the", "six", "nations", "title", ".", " ", "this", "season", "for", "the", "first", "time", "we", "have", "been", "able", "to", "play", "with", "the", "favourites", " ", "tag", " ", "he", "said", ".", " ", "hopefully", "we", "have", "proved", "that", "today", "and", "can", "continue", "to", "keep", "doing", "so", ".", " ", "as", "for", "my", "try", "it", "was", "a", "move", "we", "had", "worked", "on", "all", "week", ".", "there", "was", "a", "bit", "of", "magic", "from", "geordan", "murphy", "and", "it", "was", "a", "great", "break", "from", "denis", "hickie", "." ] } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "tags": "Sequence(feature=ClassLabel(num_classes=9, names=['B-LOCATION', 'B-ORG', 'B-PERSON', 'B-PRODUCT', 'I-LOCATION', 'I-ORG', 'I-PERSON', 'I-PRODUCT', 'O'], names_file=None, id=None), length=-1, id=None)", "tokens": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 186 | | valid | 58 |
22,513
[ [ -0.041778564453125, -0.0165252685546875, 0.032073974609375, 0.0295562744140625, -0.0286712646484375, 0.027099609375, 0.00997161865234375, -0.01015472412109375, 0.053924560546875, 0.048248291015625, -0.039215087890625, -0.03753662109375, -0.0391845703125, 0.0...
abidlabs/crowdsourced-speech
2022-01-21T15:42:59.000Z
[ "region:us" ]
abidlabs
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
abidlabs/test-translation-dataset
2022-02-01T23:15:18.000Z
[ "region:us" ]
abidlabs
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
albertvillanova/datasets-tests-compression
2021-08-16T13:22:32.000Z
[ "region:us" ]
albertvillanova
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
alittleie/mis_238
2021-10-03T02:05:08.000Z
[ "region:us" ]
alittleie
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0213775634765625, -0.01497650146484375, 0.05718994140625, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.0170135498046875, -0.052093505859375, -0.01497650146484375, -0.0604248046875, 0.0379028...
alperbayram/HaberTweetlerininDuyguAnaliziVeSiniflandirma
2022-02-27T11:56:20.000Z
[ "region:us" ]
alperbayram
null
null
0
84
2022-03-02T23:29:22
# Dataset Card for [Haber Tweetlerinin Duygu Analizi Ve Siniflandirma] Github repo [repo](https://github.com/alperbayram/Duygu_Analizi_ve_Metin_Siniflandirma) ## Dataset Description Twitter verileri üzerinde Türkçe Bert modelleri kullanarak yapılan duygu analizi ve metin sınıflandırma işlemleri ve görselleştirilmesi. İşlemler Drive ve colab üzerinde gerçekleştirilmiştir. | Adımlar | Yaptıklarım | | ------------- | ------------- | | Nr.0|Tüm kütüphaneleri Not defterimize ekledik| | Nr.1|Tweetleri çektik| | Nr.2 |Tweetleri Temizledik | | Nr.3 |Tweetlerden kelime bulutu oluşturduk ve png olarak drive kaydettik | | Nr.4 |Duygu analizi için bert modellerini yükledik| | Nr.5| Duygu analizi yaptık ve tabloya ekledik | | Nr.6|Duygu analizi sonuçlarını gösterdik ve görselleştirdik| | Nr.7 |Metin sınıflandırma için bert modellerini yükledik| | Nr.8 |Metin sınıflandırma yaptık ve tabloya ekledik | | Nr.9|Metin sınıflandırma sonuçlarını gösterdik ve görselleştirdik| | Nr.10| Bütün işlemleri tek tablo olarak Drive'a kaydettik | ### Dataset Curators [alper bayram](https://github.com/alperbayram) ### Languages [TR]
1,142
[ [ -0.03424072265625, -0.036529541015625, 0.01165771484375, 0.0119781494140625, -0.0343017578125, -0.0156402587890625, -0.01456451416015625, -0.00727081298828125, 0.034820556640625, 0.041259765625, -0.056488037109375, -0.07073974609375, -0.03240966796875, -0.00...
alperbayram/TwitterDuygu
2021-11-19T21:00:07.000Z
[ "region:us" ]
alperbayram
null
null
0
84
2022-03-02T23:29:22
language: - tr # negatif 54% # pozitif 46%
45
[ [ -0.039093017578125, -0.04791259765625, 0.00778961181640625, 0.04754638671875, -0.04364013671875, 0.0004088878631591797, -0.004665374755859375, 0.0160369873046875, 0.04180908203125, 0.0158538818359375, -0.035400390625, -0.07232666015625, -0.053436279296875, 0...
anechaev/ru_med_history
2022-02-12T08:51:53.000Z
[ "region:us" ]
anechaev
null
null
1
84
2022-03-02T23:29:22
# Medical Histories Ru-ru Medical Histories from Russian medical textbooks. A text dataset with medical histories. All dates were masked into <DATE>.
152
[ [ 0.03424072265625, -0.041839599609375, 0.035430908203125, -0.006134033203125, -0.029510498046875, -0.01396942138671875, 0.0284576416015625, -0.0242919921875, 0.04791259765625, 0.0592041015625, -0.05853271484375, -0.049346923828125, -0.027099609375, -0.0099182...
benjaminbeilharz/empathetic_dialogues_for_lm
2022-02-23T15:49:12.000Z
[ "region:us" ]
benjaminbeilharz
null
null
1
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0213775634765625, -0.01497650146484375, 0.05718994140625, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.0170135498046875, -0.052093505859375, -0.01497650146484375, -0.0604248046875, 0.0379028...
bobbydylan/top2k
2022-02-01T02:59:04.000Z
[ "region:us" ]
bobbydylan
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0213470458984375, -0.01496124267578125, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.0465087890625, 0.052490234375, 0.005046844482421875, 0.051361083984375, 0.0170135498046875, -0.05206298828125, -0.01494598388671875, -0.0604248046875, 0.03790...
brunodorneles/ner
2021-11-03T13:14:46.000Z
[ "region:us" ]
brunodorneles
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
csikasote/bemba_train_dev_sets_processed
2022-02-08T10:21:51.000Z
[ "region:us" ]
csikasote
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
csikasote/bemba_trainset_processed
2022-02-07T17:16:12.000Z
[ "region:us" ]
csikasote
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
cstrathe435/Task2Dial
2022-02-03T12:55:28.000Z
[ "region:us" ]
cstrathe435
null
null
0
84
2022-03-02T23:29:22
# Dataset Card for Task2Dial ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Acknowledgements] (#funding-information) ## Dataset Description - **Homepage:** [Needs More Information] - **Repository:** [Needs More Information] - **Paper:** https://aclanthology.org/2021.icnlsp-1.28/ - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary The Task2Dial dataset includes (1) a set of recipe documents with 353 individual dialogues; and (2) conversations between an IG and an IF, which are grounded in the associated recipe documents. Presents sample utterances from a dialogue along with the associated recipe. It demonstrates some important features of the dataset, such as mentioning entities not present in the recipe document; re-composition of the original text to focus on the important steps and the breakdown of the recipe into manageable and appropriate steps. Following recent efforts in the field to standardise NLG research, we have made the dataset freely available. ### Supported Tasks and Leaderboards We demonstrate the task of implementing the Task2Dial in a conversational agent called chefbot in the following git repo: https://github.com/carlstrath/ChefBot ### Languages English ### Data Fields Dataset.1: Task2Dial main, 353 cooking recipes modelled on real conversations between an IF and IG. Dataset. 2: A list of alternative ingredients for every swappable ingredient in the Task2Dial dataset. Dataset. 3. A list of objects and utensils with explanations, comparisons, handling and common storage location information. ## Dataset Creation The proposed task considers the recipe-following scenario with an information giver (IG) and an information follower (IF), where the IG has access to the recipe and gives instructions to the IF. The IG might choose to omit irrelevant information, simplify the content of a recipe or provide it as is. The IF will either follow the task or ask for further information. The IG might have to rely on information outside the given document (i.e. commonsense) to enhance understanding and success of the task. In addition, the IG decides on how to present the recipe steps, i.e. split them into sub- steps or merge them together, often diverging from the original number of recipe steps. The task is regarded as successful when the IG has successfully followed/understood the recipe. Hence, other dialogue-focused metrics, such as the number of turns, are not appropriate here. Formally, Task2Dial can be defined as follows: Given a recipe 𝑅𝑖 from 𝑅 =𝑅1, 𝑅2, 𝑅3,..., 𝑅𝑛, an ontology or ontologies 𝑂𝑖 =𝑂11,𝑂2,...,𝑂𝑛 of cooking-related concepts, a history of the conversation ℎ, predict the response 𝑟 of the IG. ### Curation Rationale Text selection was dependent on the quality of the information provided in the existing recipes. Too little information and the transcription and interpretation of the text became diffused with missing or incorrect knowledge. Conversely, providing too much information in the text resulted in a lack of creativity and commonsense reasoning by the data curators. Thus, the goal of the curation was to identify text that contained all the relevant information to complete the cooking task (tools, ingredients, weights, timings, servings) but not in such detail that it subtracted from the creativity, commonsense and imagination of the annotators. ### Source Data #### Initial Data Collection and Normalization Three open-source and creative commons licensed cookery websites6 were identified for data extraction, which permits any use or non- commercial use of data for research purposes. As content submission to the cooking websites was unrestricted, data appropriateness was ratified by the ratings and reviews given to each recipe by the public, highly rated recipes with a positive feedback were given preference over recipes with low scores and poor reviews [38]. From this, a list of 353 recipes was compiled and divided amongst the annotators for the data collection. As mentioned earlier, annotators were asked to take on the roles of both IF and IG, rather than a multi-turn WoZ approach, to allow flexibility in the utterances. This approach allowed the annotators additional time to formulate detailed and concise responses. #### Who are the source language producers? Undergraduate RAs were recruited through email. The participants were paid an hourly rate based on a university pay scale which is above the living wage and corresponds to the real living wage, following ethical guidelines for responsible innovation. The annotation team was composed of two males and one female data curators, under the age of 25 of mixed ethnicity’s with experience in AI and computing. This minimised the gender bias that is frequently observed in crowdsourcing platforms. #### Annotation process Each annotator was provided with a detailed list of instructions, an example dialogue and an IF/IG template (see Appendix A). The annotators were asked to read both the example dialogue and the original recipe to understand the text, context, composition, translation and annotation. The instructions included information handling and storage of data, text formatting, metadata and examples of high-quality and poor dialogues. An administrator was on hand throughout the data collection to support and guide the annotators. This approach reduced the number of low-quality dialogues associated with large crowdsourcing platforms that are often discarded post evaluation, as demonstrated in the data collection of the Doc2Dial dataset. #### Who are the annotators? Research assistants (RAs) from the School of Computing were employed on temporary contracts to construct and format the dataset. After an initial meeting to discuss the job role and determine suitability, the RAs were asked to complete a paid trial, this was evaluated and further advice was given on how to write dialogues and format the data to ensure high quality. After the successful completion of the trial, the RAs were permitted to continue with the remainder of the data collection. To ensure the high quality of the dataset, samples of the dialogues were often reviewed and further feedback was provided. ### Personal and Sensitive Information An ethics request was submitted for review by the board of ethics at our university. No personal or other data that may be used to identify an individual was collected in this study. ## Considerations for Using the Data The Task2Dial dataset is currently only for the cooking domain, but using the methodologies provided other tasks can be modelled for example, furniture assembly and maintenance tasks. ### Social Impact of Dataset Our proposed task aims to motivate research for modern dialogue systems that address the following challenges. Firstly, modern dialogue systems should be flexible and allow for "off-script" scenarios in order to emulate real-world phenomena, such as the ones present in human-human communication. This will require new ways of encoding user intents and new approaches to dialogue management in general. Secondly, as dialogue systems find different domain applications, the complexity of the dialogues might increase as well as the reliance on domain knowledge that can be encoded in structured or unstructured ways, such as documents, databases etc. Many applications, might require access to different domain knowledge sources in a course of a dialogue, and in such context, selection might prove beneficial in choosing "what to say". ### Discussion of Biases Prior to data collection, we performed three pilot studies. In the first, two participants assumed the roles of IG and IF respectively, where the IG had access to a recipe and provided recipe instructions to the IF (who did not have access to the recipe) over the phone, recording the session and then transcribing it. Next, we repeated the process with text-based dialogue through an online platform following a similar setup, however, the interaction was solely chat-based. The final study used self-dialogue, with one member of the team writing entire dialogues assuming both the IF and IG roles. We found that self-dialogue results were proximal to the results of two-person studies. However, time and cost were higher for producing two-person dialogues, with the additional time needed for transcribing and correction, thus, we opted to use self-dialogue. ## Additional Information Video: https://www.youtube.com/watch?v=zISkwn95RXs&ab_channel=ICNLSPConference ### Dataset Curators The recipes are composed by people of a different races / ethnicity, nationalities, socioeconomic status, abilities, age, gender and language with significant variation in pronunciations, structure, language and grammar. This provided the annotators with unique linguistic content for each recipe to interpret the data and configure the text into an IF/IG format. To help preserve sociolinguistic patterns in speech, the data curators retained the underlying language when para- phrasing, to intercede social and regional dialects with their own interpretation of the data to enhance the lexical richness. ### Licensing Information CC ### Citation Information https://aclanthology.org/2021.icnlsp-1.28/ ### Acknowledgements The research is supported under the EPSRC projects CiViL (EP/T014598/1) and NLG for low-resource domains (EP/T024917/1).
10,356
[ [ -0.01065826416015625, -0.056549072265625, 0.0159912109375, 0.004917144775390625, -0.0034637451171875, -0.00838470458984375, -0.011077880859375, -0.03131103515625, 0.0082855224609375, 0.06524658203125, -0.057403564453125, -0.03662109375, -0.057464599609375, 0...
damlab/HIV_PI
2022-03-09T19:48:01.000Z
[ "license:mit", "region:us" ]
damlab
null
null
0
84
2022-03-02T23:29:22
--- license: mit --- # Dataset Description ## Dataset Summary This dataset was derived from the Stanford HIV Genotype-Phenotype database and contains 1,733 HIV protease sequences. A pproximately half of the sequences are resistant to at least one antiretroviral therapeutic (ART). Supported Tasks and Leaderboards: None Languages: English ## Dataset Structure ### Data Instances Each column represents the protein amino acid sequence of the HIV protease protein. The ID field indicates the Genbank reference ID for future cross-referencing. There are 1,733 total protease sequences. Data Fields: ID, sequence, fold, FPV, IDV, NFV, SQV Data Splits: None ## Dataset Creation Curation Rationale: This dataset was curated to train a model (HIV-BERT-PI) designed to predict whether an HIV protease sequence would result in resistance to certain antiretroviral (ART) drugs. Initial Data Collection and Normalization: Dataset was downloaded and curated on 12/21/2021. ## Considerations for Using the Data Social Impact of Dataset: Due to the tendency of HIV to mutate, drug resistance is a common issue when attempting to treat those infected with HIV. Protease inhibitors are a class of drugs that HIV is known to develop resistance via mutations. Thus, by providing a collection of protease sequences known to be resistant to one or more drugs, this dataset provides a significant collection of data that could be utilized to perform computational analysis of protease resistance mutations. Discussion of Biases: Due to the sampling nature of this database, it is predominantly composed of subtype B sequences from North America and Europe with only minor contributions of Subtype C, A, and D. Currently, there was no effort made to balance the performance across these classes. As such, one should consider refinement with additional sequences to perform well on non-B sequences. ## Additional Information: - Dataset Curators: Will Dampier - Citation Information: TBA
2,052
[ [ -0.0054779052734375, -0.03594970703125, 0.006870269775390625, 0.01039886474609375, 0.004772186279296875, -0.003376007080078125, 0.0280609130859375, -0.0101165771484375, 0.02667236328125, 0.04034423828125, -0.059844970703125, -0.0391845703125, -0.05712890625, ...
davanstrien/crowdsourced-keywords
2022-03-01T14:07:11.000Z
[ "region:us" ]
davanstrien
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
dvilasuero/ag_news_error_analysis
2021-12-29T17:23:31.000Z
[ "region:us" ]
dvilasuero
null
null
0
84
2022-03-02T23:29:22
This is a dataset created for testing purposes in the context of this tutorial: https://rubrix.readthedocs.io/en/master/tutorials/08-error_analysis_using_loss.html You can find more details on section 5. of the tutorial and the corresponding dataset with corrected labels at https://huggingface.co/datasets/Recognai/ag_news_corrected_labels
343
[ [ -0.017578125, -0.044036865234375, -0.00916290283203125, 0.01861572265625, 0.01169586181640625, 0.000044286251068115234, 0.0125885009765625, 0.019439697265625, 0.00350189208984375, 0.0264434814453125, -0.07470703125, -0.031158447265625, -0.031097412109375, 0....
ebrigham/multi_sentiment
2022-02-18T08:03:39.000Z
[ "region:us" ]
ebrigham
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
edbeeching/github-issues
2022-02-11T14:20:42.000Z
[ "region:us" ]
edbeeching
null
null
0
84
2022-03-02T23:29:22
annotations_creators: - other language_creators: - crowdsourced languages: - en-US licenses: - other-my-license multilinguality: - monolingual pretty_name: HuggingFace Github Issues size_categories: - unknown source_datasets: - original task_categories: - text-classification - text-retrieval task_ids: - multi-class-classification - multi-label-classification - document-retrieval
381
[ [ -0.035614013671875, -0.0290374755859375, 0.018768310546875, 0.040374755859375, -0.01470184326171875, 0.0305938720703125, -0.0163726806640625, -0.047607421875, 0.05755615234375, 0.047454833984375, -0.04156494140625, -0.03228759765625, -0.05859375, 0.039337158...
elonmuskceo/persistent-space-dataset
2022-09-03T13:31:38.000Z
[ "region:us" ]
elonmuskceo
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
erwanlc/cocktails_recipe
2022-10-25T09:17:00.000Z
[ "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:2M<n<3M", "language:en", "license:other", "region:us" ]
erwanlc
null
null
3
84
2022-03-02T23:29:22
--- annotations_creators: - machine-generated language_creators: - machine-generated language: - en license: - other multilinguality: - monolingual size_categories: - 2M<n<3M source_datasets: [] task_categories: [] task_ids: [] pretty_name: cocktails_recipe language_bcp47: - en - en-US --- # Dataset Card for cocktails_recipe ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Personal and Sensitive Information](#personal-and-sensitive-information) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains a list of cocktails and how to do them. ### Languages The language is english. ## Dataset Structure ### Data Fields - Title: name of the cocktail - Glass: type of glass to use - Garnish: garnish to use for the glass - Recipe: how to do the cocktail - Ingredients: ingredients required ### Data Splits Currently, there is no splits. ## Dataset Creation ### Source Data #### Initial Data Collection and Normalization The dataset was created by scraping the Diffords cocktail website. ### Personal and Sensitive Information It should not contain any personal or sensitive information. ### Contributions Thanks to [@github-erwanlc](https://github.com/erwanlc) for adding this dataset.
1,616
[ [ -0.0165252685546875, -0.03692626953125, 0.00394439697265625, 0.023406982421875, -0.035125732421875, 0.0208740234375, 0.01137542724609375, -0.00849151611328125, 0.06109619140625, 0.031890869140625, -0.055267333984375, -0.08160400390625, -0.033203125, -0.00277...
flax-community/conceptual-12m-multilingual-marian-es
2021-07-21T19:35:31.000Z
[ "region:us" ]
flax-community
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
flax-community/conceptual-12m-multilingual-marian
2021-07-20T19:16:40.000Z
[ "region:us" ]
flax-community
null
null
0
84
2022-03-02T23:29:22
This dataset is created from subset of [Conceptual Captions](https://ai.google.com/research/ConceptualCaptions/). The original dataset has 12M captions but this dataset has around 10M image, caption pairs in different languages with 2.5M unique images. This dataset has captions translated from English to Spanish, German, French using language specific English to [Marian](https://huggingface.co/Helsinki-NLP) models. Data distribution is following: `train_file_marian_final.tsv`: 10010625 captions (2502656 captions of English, German, Spanish, French each) <br /> `val_file_marian_final.tsv`: 110592 captions (27648 captions of English, German, Spanish, French each)
671
[ [ -0.0232696533203125, -0.0299530029296875, 0.026611328125, 0.0423583984375, -0.0438232421875, -0.00148773193359375, -0.01392364501953125, -0.0243377685546875, 0.0300445556640625, 0.046722412109375, -0.045196533203125, -0.046966552734375, -0.054840087890625, 0...
flax-community/conceptual-captions-12
2021-07-19T12:40:00.000Z
[ "region:us" ]
flax-community
null
null
1
84
2022-03-02T23:29:22
This file contains English captions from Conceptual 12M dataset by Google. Since we don't own the images, we have provided the link to images, name of downloaded file, and caption for that image in the TSV file. We would like to thank [Luke Melas](https://github.com/lukemelas) for helping us get the cleaned CC-12M data on our TPU-VMs.
339
[ [ -0.004119873046875, -0.0284881591796875, 0.03985595703125, 0.0110321044921875, -0.04888916015625, 0.03326416015625, -0.01519775390625, -0.02069091796875, 0.033050537109375, 0.072021484375, -0.06927490234375, -0.047210693359375, -0.03826904296875, 0.022994995...
flax-community/dummy-oscar-als-32
2021-07-12T13:23:59.000Z
[ "region:us" ]
flax-community
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
flax-community/german-common-voice-processed
2021-07-13T18:09:18.000Z
[ "region:us" ]
flax-community
null
null
1
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
flax-community/multilingual-vqa
2021-07-21T06:27:40.000Z
[ "region:us" ]
flax-community
null
null
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
fvillena/cantemist
2021-06-03T22:50:38.000Z
[ "region:us" ]
fvillena
\
\
0
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
jakeazcona/short-text-labeled-emotion-classification
2021-12-05T18:38:57.000Z
[ "region:us" ]
jakeazcona
null
null
3
84
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
ai4bharat/IndicQuestionGeneration
2022-10-13T06:08:25.000Z
[ "annotations_creators:no-annotation", "language_creators:found", "multilinguality:multilingual", "size_categories:98K<n<98K", "source_datasets:we start with the SQuAD question answering dataset repurposed to serve as a question generation dataset. We translate this dataset into different Indic languages.", ...
ai4bharat
This is the Question Generation dataset released as part of IndicNLG Suite. Each example has five fields: id, squad_id, answer, context and question. We create this dataset in eleven languages including as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. This is a translated data. The examples in each language are exactly similar but in different languages. The number of examples in each language is 98,027.
@inproceedings{Kumar2022IndicNLGSM, title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages}, author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar}, year={2022}, url = "https://arxiv.org/abs/2203.05437" }
1
84
2022-03-10T09:59:41
--- annotations_creators: - no-annotation language_creators: - found language: - as - bn - gu - hi - kn - ml - mr - or - pa - ta - te license: - cc-by-nc-4.0 multilinguality: - multilingual pretty_name: IndicQuestionGeneration size_categories: - 98K<n<98K source_datasets: - we start with the SQuAD question answering dataset repurposed to serve as a question generation dataset. We translate this dataset into different Indic languages. task_categories: - conditional-text-generation task_ids: - conditional-text-generation-other-question-generation --- # Dataset Card for "IndicQuestionGeneration" ## Table of Contents - [Dataset Card Creation Guide](#dataset-card-creation-guide) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Who are the source language producers?](#who-are-the-source-language-producers) - [Annotations](#annotations) - [Annotation process](#annotation-process) - [Who are the annotators?](#who-are-the-annotators) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://indicnlp.ai4bharat.org/indicnlg-suite - **Paper:** [IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages](https://arxiv.org/abs/2203.05437) - **Point of Contact:** ### Dataset Summary IndicQuestionGeneration is the question generation dataset released as part of IndicNLG Suite. Each example has five fields: id, squad_id, answer, context and question. We create this dataset in eleven languages, including as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. This is translated data. The examples in each language are exactly similar but in different languages. The number of examples in each language is 98,027. ### Supported Tasks and Leaderboards **Tasks:** Question Generation **Leaderboards:** Currently there is no Leaderboard for this dataset. ### Languages - `Assamese (as)` - `Bengali (bn)` - `Gujarati (gu)` - `Kannada (kn)` - `Hindi (hi)` - `Malayalam (ml)` - `Marathi (mr)` - `Oriya (or)` - `Punjabi (pa)` - `Tamil (ta)` - `Telugu (te)` ## Dataset Structure ### Data Instances One random example from the `hi` dataset is given below in JSON format. ``` { "id": 8, "squad_id": "56be8e613aeaaa14008c90d3", "answer": "अमेरिकी फुटबॉल सम्मेलन", "context": "अमेरिकी फुटबॉल सम्मेलन (एएफसी) के चैंपियन डेनवर ब्रोंकोस ने नेशनल फुटबॉल कांफ्रेंस (एनएफसी) की चैंपियन कैरोलिना पैंथर्स को 24-10 से हराकर अपना तीसरा सुपर बाउल खिताब जीता।", "question": "एएफसी का मतलब क्या है?" } ``` ### Data Fields - `id (string)`: Unique identifier. - `squad_id (string)`: Unique identifier in Squad dataset. - `answer (strings)`: Answer as one of the two inputs. - `context (string)`: Context, the other input. - `question (string)`: Question, the output. ### Data Splits Here is the number of samples in each split for all the languages. Language | ISO 639-1 Code | Train | Dev | Test | ---------- | ---------- | ---------- | ---------- | ---------- | Assamese | as | 69,979 | 17,495 | 10,553 | Bengali | bn | 69,979 | 17,495 | 10,553 | Gujarati | gu | 69,979 | 17,495 | 10,553 | Hindi | hi | 69,979 | 17,495 | 10,553 | Kannada | kn | 69,979 | 17,495 | 10,553 | Malayalam | ml | 69,979 | 17,495 | 10,553 | Marathi | mr | 69,979 | 17,495 | 10,553 | Oriya | or | 69,979 | 17,495 | 10,553 | Punjabi | pa | 69,979 | 17,495 | 10,553 | Tamil | ta | 69,979 | 17,495 | 10,553 | Telugu | te | 69,979 | 17,495 | 10,553 | ## Dataset Creation ### Curation Rationale [Detailed in the paper](https://arxiv.org/abs/2203.05437) ### Source Data Squad Dataset(https://rajpurkar.github.io/SQuAD-explorer/) #### Initial Data Collection and Normalization [Detailed in the paper](https://arxiv.org/abs/2203.05437) #### Who are the source language producers? [Detailed in the paper](https://arxiv.org/abs/2203.05437) ### Annotations [More information needed] #### Annotation process [More information needed] #### Who are the annotators? [More information needed] ### Personal and Sensitive Information [More information needed] ## Considerations for Using the Data ### Social Impact of Dataset [More information needed] ### Discussion of Biases [More information needed] ### Other Known Limitations [More information needed] ## Additional Information ### Dataset Curators [More information needed] ### Licensing Information Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders. ### Citation Information If you use any of the datasets, models or code modules, please cite the following paper: ``` @inproceedings{Kumar2022IndicNLGSM, title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages}, author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar}, year={2022}, url = "https://arxiv.org/abs/2203.05437", ``` ### Contributions [Detailed in the paper](https://arxiv.org/abs/2203.05437)
6,319
[ [ -0.037750244140625, -0.04351806640625, -0.0092620849609375, 0.0284576416015625, -0.01548004150390625, 0.0196533203125, -0.0372314453125, -0.031951904296875, 0.029388427734375, 0.018096923828125, -0.0643310546875, -0.055938720703125, -0.0386962890625, 0.03704...
hackathon-pln-es/Axolotl-Spanish-Nahuatl
2023-04-13T08:51:58.000Z
[ "task_categories:text2text-generation", "task_categories:translation", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:translation", "size_categories:unknown", "source_datasets:original", "language:es", "license:mpl-2.0", "conditional-text-generation...
hackathon-pln-es
null
null
8
84
2022-03-30T15:52:03
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - es license: - mpl-2.0 multilinguality: - translation size_categories: - unknown source_datasets: - original task_categories: - text2text-generation - translation task_ids: [] pretty_name: "Axolotl Spanish-Nahuatl parallel corpus , is a digital corpus that compiles\ \ several sources with parallel content in these two languages. \n\nA parallel corpus\ \ is a type of corpus that contains texts in a source language with their correspondent\ \ translation in one or more target languages. Gutierrez-Vasques, X., Sierra, G.,\ \ and Pompa, I. H. (2016). Axolotl: a web accessible parallel corpus for spanish-nahuatl.\ \ In Proceedings of the Ninth International Conference on Language Resources and\ \ Evaluation (LREC 2016), Portoro, Slovenia. European Language Resources Association\ \ (ELRA). Grupo de Ingenieria Linguistica (GIL, UNAM). Corpus paralelo español-nahuatl.\ \ http://www.corpus.unam.mx/axolotl." language_bcp47: - es-MX tags: - conditional-text-generation --- # Axolotl-Spanish-Nahuatl : Parallel corpus for Spanish-Nahuatl machine translation ## Table of Contents - [Dataset Card for [Axolotl-Spanish-Nahuatl]](#dataset-card-for-Axolotl-Spanish-Nahuatl) ## Dataset Description - **Source 1:** http://www.corpus.unam.mx/axolotl - **Source 2:** http://link.springer.com/article/10.1007/s10579-014-9287-y - **Repository:1** https://github.com/ElotlMX/py-elotl - **Repository:2** https://github.com/christos-c/bible-corpus/blob/master/bibles/Nahuatl-NT.xml - **Paper:** https://aclanthology.org/N15-2021.pdf ## Dataset Collection In order to get a good translator, we collected and cleaned two of the most complete Nahuatl-Spanish parallel corpora available. Those are Axolotl collected by an expert team at UNAM and Bible UEDIN Nahuatl Spanish crawled by Christos Christodoulopoulos and Mark Steedman from Bible Gateway site. After this, we ended with 12,207 samples from Axolotl due to misalignments and duplicated texts in Spanish in both original and nahuatl columns and 7,821 samples from Bible UEDIN for a total of 20028 utterances. ## Team members - Emilio Morales [(milmor)](https://huggingface.co/milmor) - Rodrigo Martínez Arzate [(rockdrigoma)](https://huggingface.co/rockdrigoma) - Luis Armando Mercado [(luisarmando)](https://huggingface.co/luisarmando) - Jacobo del Valle [(jjdv)](https://huggingface.co/jjdv) ## Applications - MODEL: Spanish Nahuatl Translation Task with a T5 model in ([t5-small-spanish-nahuatl](https://huggingface.co/hackathon-pln-es/t5-small-spanish-nahuatl)) - DEMO: Spanish Nahuatl Translation in ([Spanish-nahuatl](https://huggingface.co/spaces/hackathon-pln-es/Spanish-Nahuatl-Translation))
2,767
[ [ -0.03131103515625, -0.0253143310546875, 0.02325439453125, 0.0439453125, -0.03424072265625, 0.01438140869140625, -0.0124664306640625, -0.041717529296875, 0.0213470458984375, 0.0282745361328125, -0.04229736328125, -0.06756591796875, -0.042022705078125, 0.04592...
bond005/sberdevices_golos_100h_farfield
2022-10-27T04:23:04.000Z
[ "task_categories:automatic-speech-recognition", "task_categories:audio-classification", "annotations_creators:expert-generated", "language_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100k", "source_datasets:extended", "language:...
bond005
null
null
0
84
2022-10-26T05:04:50
--- pretty_name: Golos annotations_creators: - expert-generated language_creators: - crowdsourced - expert-generated language: - ru license: - other multilinguality: - monolingual paperswithcode_id: golos size_categories: - 10K<n<100k source_datasets: - extended task_categories: - automatic-speech-recognition - audio-classification --- # Dataset Card for sberdevices_golos_100h_farfield ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Golos ASR corpus](https://www.openslr.org/114) - **Repository:** [Golos dataset](https://github.com/sberdevices/golos) - **Paper:** [Golos: Russian Dataset for Speech Research](https://arxiv.org/pdf/2106.10161.pdf) - **Leaderboard:** [The 🤗 Speech Bench](https://huggingface.co/spaces/huggingface/hf-speech-bench) - **Point of Contact:** [Nikolay Karpov](mailto:karpnv@gmail.com) ### Dataset Summary Sberdevices Golos is a corpus of approximately 1200 hours of 16kHz Russian speech from crowd (reading speech) and farfield (communication with smart devices) domains, prepared by SberDevices Team (Alexander Denisenko, Angelina Kovalenko, Fedor Minkin, and Nikolay Karpov). The data is derived from the crowd-sourcing platform, and has been manually annotated. Authors divide all dataset into train and test subsets. The training subset includes approximately 1000 hours. For experiments with a limited number of records, authors identified training subsets of shorter length: 100 hours, 10 hours, 1 hour, 10 minutes. This dataset is a simpler version of the above mentioned Golos: - it includes the farfield domain only (without any sound from the crowd domain); - validation split is built on the 10-hour training subset; - training split corresponds to the 100-hour training subset without sounds from the 10-hour training subset; - test split is a full original test split. ### Supported Tasks and Leaderboards - `automatic-speech-recognition`: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). The task has an active Hugging Face leaderboard which can be found at https://huggingface.co/spaces/huggingface/hf-speech-bench. The leaderboard ranks models uploaded to the Hub based on their WER. ### Languages The audio is in Russian. ## Dataset Structure ### Data Instances A typical data point comprises the audio data, usually called `audio` and its transcription, called `transcription`. Any additional information about the speaker and the passage which contains the transcription is not provided. ``` {'audio': {'path': None, 'array': array([ 1.22070312e-04, 1.22070312e-04, 9.15527344e-05, ..., 6.10351562e-05, 6.10351562e-05, 3.05175781e-05]), dtype=float64), 'sampling_rate': 16000}, 'transcription': 'джой источники истории турции'} ``` ### Data Fields - audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`. - transcription: the transcription of the audio file. ### Data Splits This dataset is a simpler version of the original Golos: - it includes the farfield domain only (without any sound from the crowd domain); - validation split is built on the 10-hour training subset; - training split corresponds to the 100-hour training subset without sounds from the 10-hour training subset; - test split is a full original test split. | | Train | Validation | Test | | ----- | ------ | ---------- | ----- | | examples | 9570 | 933 | 1916 | | hours | 10.3h | 1.0h | 1.4h | ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process All recorded audio files were manually annotated on the crowd-sourcing platform. #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information The dataset consists of people who have donated their voice. You agree to not attempt to determine the identity of speakers in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators The dataset was initially created by Alexander Denisenko, Angelina Kovalenko, Fedor Minkin, and Nikolay Karpov. ### Licensing Information [Public license with attribution and conditions reserved](https://github.com/sberdevices/golos/blob/master/license/en_us.pdf) ### Citation Information ``` @misc{karpov2021golos, author = {Karpov, Nikolay and Denisenko, Alexander and Minkin, Fedor}, title = {Golos: Russian Dataset for Speech Research}, publisher = {arXiv}, year = {2021}, url = {https://arxiv.org/abs/2106.10161} } ``` ### Contributions Thanks to [@bond005](https://github.com/bond005) for adding this dataset.
6,637
[ [ -0.0199737548828125, -0.0296478271484375, -0.00490570068359375, 0.0187835693359375, -0.02276611328125, -0.00494384765625, -0.032318115234375, -0.0293731689453125, 0.031524658203125, 0.0252685546875, -0.055877685546875, -0.051116943359375, -0.04180908203125, ...
sjyhne/mapai_dataset
2022-12-16T12:16:55.000Z
[ "license:cc-by-4.0", "region:us" ]
sjyhne
null
null
0
84
2022-12-14T09:38:56
--- license: cc-by-4.0 dataset_info: features: - name: image dtype: image - name: lidar dtype: image - name: mask dtype: image - name: filename dtype: string splits: - name: validation num_bytes: 2192377881.0 num_examples: 1500 - name: task1_test num_bytes: 1893839365.136 num_examples: 1368 - name: task2_test num_bytes: 1372710117.0 num_examples: 978 - name: train num_bytes: 10403414735.0 num_examples: 7000 download_size: 7060470972 dataset_size: 15862342098.136 features: - name: image dtype: image - name: lidar dtype: image - name: mask dtype: image - name: filename dtype: string splits: - name: validation num_bytes: 2192349531 num_examples: 1500 - name: task1_test num_bytes: 1893803900.136 num_examples: 1368 - name: task2_test num_bytes: 1372684773 num_examples: 978 - name: train num_bytes: 10403282435 num_examples: 7000 download_size: 7060394132 dataset_size: 15862120639.136 --- # Dataset Card for the MapAI Dataset ## Dataset Description - **Repository:** [MapAI: Precision in Building Segmentation - Github Repository](https://github.com/Sjyhne/MapAI-Competition) - **Paper:** [MapAI: Precision in Building Segmentation](https://journals.uio.no/NMI/article/view/9849) - **Point of Contact:** [Sander Riisøen Jyhne](mailto:sander.jyhne@kartverket.no) - **Leaderboard:** [Papers with Code leaderboard for MapAI: Precision in Building Segmentation Dataset](https://paperswithcode.com/dataset/sander-jyhne) ### Dataset Summary The dataset comprises 7500 training images and 1500 validation images from Denmark. The test dataset is split into two tasks, where the first task (1368 images) is to segment the buildings only using aerial images. In contrast, the second task (978 images) allows using aerial images and lidar data. All data samples have a resolution of 500x500. The aerial images are RGB images, while the lidar data are rasterized. The ground truth masks have two classes, building, and background. All data derives from a production setting, which means that there will be buildings that are not present in the ground truth and vice versa. ### Supported Tasks and Leaderboards - 'segmentation' ### Data Fields - 'image': A 500x500x3 RGB orthophoto - 'lidar': A 500x500 rasterized LiDAR image - 'mask': A 500x500 ground truth mask where 1's are buildings and 0's are background - 'filename': An identifying filename for the data tile ### Data Splits The MapAI Dataset has four splits; _train_, _validation_, _task1_test_, _task2_test_. Below are the statistics for each split. | Dataset Split | Number of Instances in Split | | ------------- | ------------------------------------------- | | Train | 7 500 | | Validation | 1 500 | | Task1_test | 1 368 | | Task2_test | 978 | ### Social Impact of Dataset The purpose of the dataset is to help develop models for accurate segmentation of buildings, which will help downstream tasks such as 3-dimensional building construction. ### Citation Information ``` @article{Jyhne2022, author = {Sander Jyhne and Morten Goodwin and Per-Arne Andersen and Ivar Oveland and Alexander Salveson Nossum and Karianne Ormseth and Mathilde Ørstavik and Andrew C Flatman}, doi = {10.5617/NMI.9849}, issn = {2703-9196}, issue = {3}, journal = {Nordic Machine Intelligence}, keywords = {Aerial Images,Deep Learning,Image segmentation,machine learning,remote sensing,semantic segmentation}, month = {9}, pages = {1-3}, title = {MapAI: Precision in Building Segmentation}, volume = {2}, url = {https://journals.uio.no/NMI/article/view/9849}, year = {2022}, } ```
3,846
[ [ -0.036468505859375, -0.050567626953125, 0.005207061767578125, -0.002048492431640625, -0.020965576171875, -0.0194854736328125, -0.0031280517578125, -0.0322265625, -0.005481719970703125, 0.0129241943359375, -0.0155181884765625, -0.06475830078125, -0.04165649414062...
keremberke/clash-of-clans-object-detection
2023-01-29T12:38:03.000Z
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "Gaming", "region:us" ]
keremberke
null
@misc{ clash-of-clans-vop4y_dataset, title = { Clash of Clans Dataset }, type = { Open Source Dataset }, author = { Find This Base }, howpublished = { \\url{ https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y } }, url = { https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { feb }, note = { visited on 2023-01-18 }, }
2
84
2022-12-30T05:14:59
--- task_categories: - object-detection tags: - roboflow - roboflow2huggingface - Gaming --- <div align="center"> <img width="640" alt="keremberke/clash-of-clans-object-detection" src="https://huggingface.co/datasets/keremberke/clash-of-clans-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['ad', 'airsweeper', 'bombtower', 'canon', 'clancastle', 'eagle', 'inferno', 'kingpad', 'mortar', 'queenpad', 'rcpad', 'scattershot', 'th13', 'wardenpad', 'wizztower', 'xbow'] ``` ### Number of Images ```json {'train': 88, 'test': 13, 'valid': 24} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/clash-of-clans-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y/dataset/5](https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y/dataset/5?ref=roboflow2huggingface?ref=roboflow2huggingface) ### Citation ``` @misc{ clash-of-clans-vop4y_dataset, title = { Clash of Clans Dataset }, type = { Open Source Dataset }, author = { Find This Base }, howpublished = { \\url{ https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y } }, url = { https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { feb }, note = { visited on 2023-01-18 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.ai on March 30, 2022 at 4:31 PM GMT It includes 125 images. CoC are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 1920x1920 (Fit (black edges)) No image augmentation techniques were applied.
1,987
[ [ -0.043853759765625, -0.0287322998046875, 0.0009722709655761719, 0.0005941390991210938, -0.017333984375, 0.01348876953125, 0.003452301025390625, -0.036468505859375, 0.031402587890625, 0.0163116455078125, -0.046142578125, -0.05859375, -0.045928955078125, 0.015...
keremberke/garbage-object-detection
2023-01-05T11:30:08.000Z
[ "task_categories:object-detection", "roboflow", "region:us" ]
keremberke
null
@misc{ garbage-classification-3_dataset, title = { GARBAGE CLASSIFICATION 3 Dataset }, type = { Open Source Dataset }, author = { Material Identification }, howpublished = { \\url{ https://universe.roboflow.com/material-identification/garbage-classification-3 } }, url = { https://universe.roboflow.com/material-identification/garbage-classification-3 }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { mar }, note = { visited on 2023-01-02 }, }
4
84
2023-01-01T09:38:12
--- task_categories: - object-detection tags: - roboflow --- ### Roboflow Dataset Page [https://universe.roboflow.com/material-identification/garbage-classification-3/dataset/2](https://universe.roboflow.com/material-identification/garbage-classification-3/dataset/2?ref=roboflow2huggingface) ### Dataset Labels ``` ['biodegradable', 'cardboard', 'glass', 'metal', 'paper', 'plastic'] ``` ### Citation ``` @misc{ garbage-classification-3_dataset, title = { GARBAGE CLASSIFICATION 3 Dataset }, type = { Open Source Dataset }, author = { Material Identification }, howpublished = { \\url{ https://universe.roboflow.com/material-identification/garbage-classification-3 } }, url = { https://universe.roboflow.com/material-identification/garbage-classification-3 }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { mar }, note = { visited on 2023-01-02 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on July 27, 2022 at 5:44 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 10464 images. GARBAGE-GARBAGE-CLASSIFICATION are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Stretch) The following augmentation was applied to create 1 versions of each source image: * 50% probability of horizontal flip * 50% probability of vertical flip * Equal probability of one of the following 90-degree rotations: none, clockwise, counter-clockwise, upside-down
1,899
[ [ -0.018280029296875, -0.01568603515625, 0.034912109375, -0.0005936622619628906, -0.02056884765625, -0.01116180419921875, 0.0183258056640625, -0.034881591796875, 0.0110931396484375, 0.03411865234375, -0.039947509765625, -0.057220458984375, -0.0276336669921875, ...
Multimodal-Fatima/OK-VQA_train
2023-03-23T22:30:06.000Z
[ "region:us" ]
Multimodal-Fatima
null
null
1
84
2023-03-07T14:44:07
--- dataset_info: features: - name: image dtype: image - name: question_type dtype: string - name: confidence dtype: int32 - name: answers sequence: string - name: answers_original list: - name: answer dtype: string - name: raw_answer dtype: string - name: answer_confidence dtype: string - name: answer_id dtype: int64 - name: id_image dtype: int64 - name: answer_type dtype: string - name: question_id dtype: int64 - name: question dtype: string - name: id dtype: int64 - name: clip_tags_ViT_L_14 sequence: string - name: clip_tags_LAION_ViT_H_14_2B sequence: string - name: blip_caption_beam_5 dtype: string - name: LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14 sequence: string - name: LLM_Description_gpt3_downstream_tasks_visual_genome_LAION-ViT-H-14-2B sequence: string - name: DETA_detections_deta_swin_large_o365_coco_classes list: - name: attribute dtype: string - name: box sequence: float32 - name: label dtype: string - name: location dtype: string - name: ratio dtype: float32 - name: size dtype: string - name: tag dtype: string - name: DETA_detections_deta_swin_large_o365_coco_classes_caption_module_random list: - name: attribute dtype: string - name: box sequence: float64 - name: captions_module sequence: string - name: captions_module_filter sequence: string - name: label dtype: string - name: location dtype: string - name: ratio dtype: float64 - name: size dtype: string - name: tag dtype: string splits: - name: train num_bytes: 1686555802.0 num_examples: 9009 download_size: 1572400067 dataset_size: 1686555802.0 --- # Dataset Card for "OK-VQA_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
2,045
[ [ -0.049224853515625, -0.00799560546875, 0.0205078125, -0.005279541015625, -0.00930023193359375, -0.01461029052734375, 0.0308685302734375, -0.005428314208984375, 0.043670654296875, 0.033782958984375, -0.057373046875, -0.032012939453125, -0.03228759765625, -0.0...
MU-NLPC/Calc-aqua_rat
2023-10-30T15:57:12.000Z
[ "task_categories:question-answering", "size_categories:10K<n<100K", "language:en", "license:apache-2.0", "arxiv:2305.15017", "arxiv:1705.04146", "region:us" ]
MU-NLPC
null
null
2
84
2023-04-16T21:08:23
--- language: - en license: apache-2.0 size_categories: - 10K<n<100K task_categories: - question-answering pretty_name: AQuA-RAT with Calculator dataset_info: - config_name: default features: - name: id dtype: string - name: question dtype: string - name: chain dtype: string - name: result dtype: string - name: options struct: - name: A dtype: string - name: B dtype: string - name: C dtype: string - name: D dtype: string - name: E dtype: string - name: question_without_options dtype: string splits: - name: train num_bytes: 72917721 num_examples: 94760 - name: validation num_bytes: 212928 num_examples: 254 - name: test num_bytes: 206180 num_examples: 254 download_size: 42057527 dataset_size: 73336829 - config_name: original-splits features: - name: id dtype: string - name: question dtype: string - name: chain dtype: string - name: result dtype: string - name: options struct: - name: A dtype: string - name: B dtype: string - name: C dtype: string - name: D dtype: string - name: E dtype: string - name: question_without_options dtype: string splits: - name: train num_bytes: 74265737 num_examples: 97467 - name: validation num_bytes: 212928 num_examples: 254 - name: test num_bytes: 206180 num_examples: 254 download_size: 42873590 dataset_size: 74684845 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* - config_name: original-splits data_files: - split: train path: original-splits/train-* - split: validation path: original-splits/validation-* - split: test path: original-splits/test-* --- # Dataset Card for Calc-aqua_rat ## Summary This dataset is an instance of [AQuA-RAT](https://huggingface.co/datasets/aqua_rat) dataset extended with in-context calls of a sympy calculator. ## Supported Tasks The dataset is intended for training Chain-of-Thought reasoning models able to use external tools to enhance the factuality of their responses. This dataset presents in-context scenarios where models can outsource the computations in the reasoning chain to a calculator. ## Construction Process The dataset was constructed automatically by evaluating all candidate calls to a `sympy` library that were extracted from the originally annotated *rationale*s. The selection of candidates is pivoted by the matching of equals ('=') symbols in the chain, where the left-hand side of the equation is evaluated, and accepted as a correct gadget call, if the result occurs closely on the right-hand side. Therefore, the extraction of calculator calls may inhibit false negatives (where the calculator could have been used but was not), but not any known false positives. We also perform in-dataset and cross-dataset data-leak detection within the [Calc-X collection](https://huggingface.co/collections/MU-NLPC/calc-x-652fee9a6b838fd820055483). Specifically for AQuA-RAT, we removed a few percent of the train split that were near-duplicates with some of the test or validation examples. A full description of the extraction process can be found in the [corresponding parse script](https://github.com/prompteus/calc-x/blob/7799a7841940b15593d4667219424ee71c74327e/gadgets/aqua.py#L19), **If you find an issue in the dataset or in the fresh version of the parsing script, we'd be happy if you report it, or create a PR.** ## Data splits The dataset with the near-duplicates removed can be loaded in the default config using: ```python datasets.load_dataset("MU-NLPC/calc-aqua_rat") ``` If you want the unfiltered version, you can use: ```python datasets.load_dataset("MU-NLPC/calc-aqua_rat", "original-splits") ``` ## Attributes - **id**: an id of the example - **question**: A natural language definition of the problem to solve, including the options to choose from - **chain**: A natural language step-by-step solution with automatically inserted calculator calls and outputs of the sympy calculator - **result**: The correct option (one of A...E) - **options**: a dictionary with 5 possible options (A, B, C, D and E), among which one is correct - **question_without_options**: same as **question** but without the options inserted Attributes **id**, **question**, **chain**, and **result** are present in all datasets in [Calc-X collection](https://huggingface.co/collections/MU-NLPC/calc-x-652fee9a6b838fd820055483). ## Related work This dataset was created as a part of a larger effort in training models capable of using a calculator during inference, which we call Calcformers. - [**Calc-X collection**](https://huggingface.co/collections/MU-NLPC/calc-x-652fee9a6b838fd820055483) - datasets for training Calcformers - [**Calcformers collection**](https://huggingface.co/collections/MU-NLPC/calcformers-65367392badc497807b3caf5) - calculator-using models we trained and published on HF - [**Calc-X and Calcformers paper**](https://arxiv.org/abs/2305.15017) - [**Calc-X and Calcformers repo**](https://github.com/prompteus/calc-x) Here are links to the original dataset: - [**original AQuA-RAT dataset**](https://huggingface.co/datasets/aqua_rat) - [**original AQuA-RAT paper**](https://arxiv.org/pdf/1705.04146.pdf) - [**original AQuA-RAT repo**](https://github.com/google-deepmind/AQuA) ## License Apache-2.0, consistently with the original aqua-rat dataset. ## Cite If you use this dataset in research, please cite the original [AQuA-RAT paper](https://arxiv.org/pdf/1705.04146.pdf), and [Calc-X paper](https://arxiv.org/abs/2305.15017) as follows: ```bibtex @inproceedings{kadlcik-etal-2023-soft, title = "Calc-X and Calcformers: Empowering Arithmetical Chain-of-Thought through Interaction with Symbolic Systems", author = "Marek Kadlčík and Michal Štefánik and Ondřej Sotolář and Vlastimil Martinek", booktitle = "Proceedings of the The 2023 Conference on Empirical Methods in Natural Language Processing: Main track", month = dec, year = "2023", address = "Singapore, Singapore", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/2305.15017", } ```
6,365
[ [ -0.0270538330078125, -0.035736083984375, 0.0245819091796875, 0.0071563720703125, 0.0089111328125, 0.0027370452880859375, -0.011444091796875, -0.023834228515625, 0.0191497802734375, 0.03033447265625, -0.04638671875, -0.0212554931640625, -0.02880859375, 0.0261...
vietgpt/ARC-Challenge_en
2023-07-15T09:20:18.000Z
[ "language:en", "region:us" ]
vietgpt
null
null
2
84
2023-06-04T01:58:25
--- language: en dataset_info: features: - name: id dtype: string - name: question dtype: string - name: choices sequence: - name: text dtype: string - name: label dtype: string - name: answerKey dtype: string splits: - name: train num_bytes: 349760 num_examples: 1119 - name: test num_bytes: 375511 num_examples: 1172 - name: validation num_bytes: 96660 num_examples: 299 download_size: 449457 dataset_size: 821931 --- # Dataset Card for "ARC-Challenge_en" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
672
[ [ -0.047149658203125, -0.006977081298828125, 0.00884246826171875, 0.018341064453125, -0.006359100341796875, 0.01271820068359375, 0.01580810546875, -0.0251312255859375, 0.05926513671875, 0.03863525390625, -0.07196044921875, -0.053192138671875, -0.036712646484375, ...
clarin-knext/dbpedia-pl
2023-06-07T08:12:53.000Z
[ "language:pl", "arxiv:2305.19840", "region:us" ]
clarin-knext
null
null
2
84
2023-06-06T22:28:09
--- language: - pl --- Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: konrad.wojtasik@pwr.edu.pl
201
[ [ -0.0153961181640625, -0.0628662109375, 0.035400390625, 0.0164031982421875, -0.022186279296875, -0.010406494140625, -0.0115814208984375, -0.034515380859375, -0.001323699951171875, 0.0286712646484375, -0.038330078125, -0.048126220703125, -0.02899169921875, -0....
dmayhem93/agieval-logiqa-en
2023-06-18T17:28:42.000Z
[ "license:cc-by-nc-sa-4.0", "arxiv:2304.06364", "region:us" ]
dmayhem93
null
null
0
84
2023-06-18T12:49:02
--- dataset_info: features: - name: query dtype: string - name: choices sequence: string - name: gold sequence: int64 splits: - name: test num_bytes: 852087 num_examples: 651 download_size: 420337 dataset_size: 852087 license: cc-by-nc-sa-4.0 --- # Dataset Card for "agieval-logiqa-en" Dataset taken from https://github.com/microsoft/AGIEval and processed as in that repo. Raw datset: https://github.com/lgw863/LogiQA-dataset [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) @misc{zhong2023agieval, title={AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models}, author={Wanjun Zhong and Ruixiang Cui and Yiduo Guo and Yaobo Liang and Shuai Lu and Yanlin Wang and Amin Saied and Weizhu Chen and Nan Duan}, year={2023}, eprint={2304.06364}, archivePrefix={arXiv}, primaryClass={cs.CL} } @inproceedings{Liu2020LogiQAAC, title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning}, author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang}, booktitle={International Joint Conference on Artificial Intelligence}, year={2020} }
1,268
[ [ -0.01262664794921875, -0.0260009765625, 0.008148193359375, 0.0007309913635253906, -0.0177459716796875, 0.00659942626953125, 0.0209197998046875, -0.03973388671875, 0.0072174072265625, 0.0248260498046875, -0.050445556640625, -0.042083740234375, -0.03045654296875, ...